From 5914a8d0721ec804661fd13a03551c9fe8f614a8 Mon Sep 17 00:00:00 2001
From: DefectDojo release bot
Date: Mon, 3 Jul 2023 16:10:02 +0000
Subject: [PATCH 01/85] Update versions in application files
---
components/package.json | 2 +-
dojo/__init__.py | 2 +-
helm/defectdojo/Chart.yaml | 4 ++--
3 files changed, 4 insertions(+), 4 deletions(-)
diff --git a/components/package.json b/components/package.json
index 13cc2a40a3..4e8aea8365 100644
--- a/components/package.json
+++ b/components/package.json
@@ -1,6 +1,6 @@
{
"name": "defectdojo",
- "version": "2.24.0",
+ "version": "2.25.0-dev",
"license" : "BSD-3-Clause",
"private": true,
"dependencies": {
diff --git a/dojo/__init__.py b/dojo/__init__.py
index 425c77274e..4c1f6f5856 100644
--- a/dojo/__init__.py
+++ b/dojo/__init__.py
@@ -4,6 +4,6 @@
# Django starts so that shared_task will use this app.
from .celery import app as celery_app # noqa
-__version__ = '2.24.0'
+__version__ = '2.25.0-dev'
__url__ = 'https://github.com/DefectDojo/django-DefectDojo'
__docs__ = 'https://documentation.defectdojo.com'
diff --git a/helm/defectdojo/Chart.yaml b/helm/defectdojo/Chart.yaml
index 4781a3fd42..40ab2609ac 100644
--- a/helm/defectdojo/Chart.yaml
+++ b/helm/defectdojo/Chart.yaml
@@ -1,8 +1,8 @@
apiVersion: v2
-appVersion: "2.24.0"
+appVersion: "2.25.0-dev"
description: A Helm chart for Kubernetes to install DefectDojo
name: defectdojo
-version: 1.6.74
+version: 1.6.75-dev
icon: https://www.defectdojo.org/img/favicon.ico
maintainers:
- name: madchap
From 7d714d4fc0f4a791070ef1ad28db86c2a1a2e7af Mon Sep 17 00:00:00 2001
From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com>
Date: Mon, 3 Jul 2023 13:32:54 -0500
Subject: [PATCH 02/85] Update rabbitmq Docker tag from 3.12.0 to v3.12.1
(docker-compose.yml) (#8331)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
---
docker-compose.yml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/docker-compose.yml b/docker-compose.yml
index 24edcdf973..53ef096186 100644
--- a/docker-compose.yml
+++ b/docker-compose.yml
@@ -154,7 +154,7 @@ services:
volumes:
- defectdojo_postgres:/var/lib/postgresql/data
rabbitmq:
- image: rabbitmq:3.12.0-alpine@sha256:670efbfec7e9501ff877eca67f0653f421803269e113b4e8cf17cb5965ea415d
+ image: rabbitmq:3.12.1-alpine@sha256:8ec30b33b1bb517145a3c43cd0d377367db0fa4903650e1a026c541f15bfc9a8
profiles:
- mysql-rabbitmq
- postgres-rabbitmq
From b58043674f07b91e05fdc6bbe3bd1faa5288b58e Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Mon, 3 Jul 2023 13:36:35 -0500
Subject: [PATCH 03/85] Bump jira from 3.5.1 to 3.5.2 (#8329)
Bumps [jira](https://github.com/pycontribs/jira) from 3.5.1 to 3.5.2.
- [Release notes](https://github.com/pycontribs/jira/releases)
- [Commits](https://github.com/pycontribs/jira/compare/3.5.1...3.5.2)
---
updated-dependencies:
- dependency-name: jira
dependency-type: direct:production
update-type: version-update:semver-patch
...
Signed-off-by: dependabot[bot]
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
---
requirements.txt | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/requirements.txt b/requirements.txt
index f569e00ea2..deeb48ccb7 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -29,7 +29,7 @@ djangorestframework==3.14.0
gunicorn==20.1.0
html2text==2020.1.16
humanize==4.6.0
-jira==3.5.1
+jira==3.5.2
PyGithub==1.58.2
lxml==4.9.2
Markdown==3.4.3
From 81507a48f020573d9a6051d3b1426e3c009b9ce2 Mon Sep 17 00:00:00 2001
From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com>
Date: Mon, 3 Jul 2023 13:37:40 -0500
Subject: [PATCH 04/85] Update release-drafter/release-drafter action from
v5.23.0 to v5.24.0 (.github/workflows/release-drafter.yml) (#8322)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
---
.github/workflows/release-drafter.yml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/.github/workflows/release-drafter.yml b/.github/workflows/release-drafter.yml
index 060891fac2..786d895516 100644
--- a/.github/workflows/release-drafter.yml
+++ b/.github/workflows/release-drafter.yml
@@ -19,7 +19,7 @@ jobs:
update_release_draft:
runs-on: ubuntu-latest
steps:
- - uses: release-drafter/release-drafter@v5.23.0
+ - uses: release-drafter/release-drafter@v5.24.0
with:
version: ${{github.event.inputs.version}}
env:
From c382877e9f39811c5a8efd459b4792b28c87c6df Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Mon, 3 Jul 2023 13:38:49 -0500
Subject: [PATCH 05/85] Bump vcrpy from 4.3.1 to 5.0.0 (#8316)
Bumps [vcrpy](https://github.com/kevin1024/vcrpy) from 4.3.1 to 5.0.0.
- [Release notes](https://github.com/kevin1024/vcrpy/releases)
- [Changelog](https://github.com/kevin1024/vcrpy/blob/master/docs/changelog.rst)
- [Commits](https://github.com/kevin1024/vcrpy/compare/v4.3.1...v5.0.0)
---
updated-dependencies:
- dependency-name: vcrpy
dependency-type: direct:production
update-type: version-update:semver-major
...
Signed-off-by: dependabot[bot]
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
---
requirements.txt | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/requirements.txt b/requirements.txt
index deeb48ccb7..a9a0e98fe0 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -64,7 +64,7 @@ JSON-log-formatter==0.5.2
django-split-settings==1.2.0
django-debug-toolbar==4.1.0
django-debug-toolbar-request-history==0.1.4
-vcrpy==4.3.1
+vcrpy==5.0.0
vcrpy-unittest==0.1.7
django-tagulous==1.3.3
PyJWT==2.7.0
From 40581b38d34cf95650f234536f5181a6e6868e5b Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Mon, 3 Jul 2023 14:04:44 -0500
Subject: [PATCH 06/85] Bump humanize from 4.6.0 to 4.7.0 (#8324)
Bumps [humanize](https://github.com/python-humanize/humanize) from 4.6.0 to 4.7.0.
- [Release notes](https://github.com/python-humanize/humanize/releases)
- [Commits](https://github.com/python-humanize/humanize/compare/4.6.0...4.7.0)
---
updated-dependencies:
- dependency-name: humanize
dependency-type: direct:production
update-type: version-update:semver-minor
...
Signed-off-by: dependabot[bot]
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
---
requirements.txt | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/requirements.txt b/requirements.txt
index a9a0e98fe0..b4893a8e66 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -28,7 +28,7 @@ Django==4.1.9
djangorestframework==3.14.0
gunicorn==20.1.0
html2text==2020.1.16
-humanize==4.6.0
+humanize==4.7.0
jira==3.5.2
PyGithub==1.58.2
lxml==4.9.2
From 3abcb71f289dab8cefdab8e4272d2dffd60b786d Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Mon, 3 Jul 2023 14:51:27 -0500
Subject: [PATCH 07/85] Bump boto3 from 1.26.159 to 1.26.165 (#8336)
Bumps [boto3](https://github.com/boto/boto3) from 1.26.159 to 1.26.165.
- [Release notes](https://github.com/boto/boto3/releases)
- [Changelog](https://github.com/boto/boto3/blob/develop/CHANGELOG.rst)
- [Commits](https://github.com/boto/boto3/compare/1.26.159...1.26.165)
---
updated-dependencies:
- dependency-name: boto3
dependency-type: direct:production
update-type: version-update:semver-patch
...
Signed-off-by: dependabot[bot]
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
---
requirements.txt | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/requirements.txt b/requirements.txt
index b4893a8e66..48ef7948f2 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -78,7 +78,7 @@ django-ratelimit==4.0.0
argon2-cffi==21.3.0
blackduck==1.1.0
pycurl==7.45.2 # Required for Celery Broker AWS (SQS) support
-boto3==1.26.159 # Required for Celery Broker AWS (SQS) support
+boto3==1.26.165 # Required for Celery Broker AWS (SQS) support
netaddr==0.8.0
vulners==2.0.10
fontawesomefree==6.4.0
From 94097d6519b7d33ed0e3800b499cf622d6a102df Mon Sep 17 00:00:00 2001
From: kiblik
Date: Mon, 3 Jul 2023 20:12:43 +0000
Subject: [PATCH 08/85] Fix: STATICFILES_DIRS warning (#8252)
---
Dockerfile.django-alpine | 5 ++++-
Dockerfile.django-debian | 5 ++++-
2 files changed, 8 insertions(+), 2 deletions(-)
diff --git a/Dockerfile.django-alpine b/Dockerfile.django-alpine
index 88c90df9c9..f777e41722 100644
--- a/Dockerfile.django-alpine
+++ b/Dockerfile.django-alpine
@@ -106,7 +106,10 @@ RUN \
chown ${appuser} /var/run/${appuser} && \
chmod g=u /var/run/${appuser} && \
chmod 775 /*.sh && \
- mkdir -p media/threat && chown -R ${uid} media
+ mkdir -p media/threat && chown -R ${uid} media && \
+ # To avoid warning: (staticfiles.W004) The directory '/app/components/node_modules' in the STATICFILES_DIRS setting does not exist.
+ mkdir -p components/node_modules && \
+ chown ${appuser} components/node_modules
USER ${uid}
ENV \
# Only variables that are not defined in settings.dist.py
diff --git a/Dockerfile.django-debian b/Dockerfile.django-debian
index e03ea67e86..cac385b199 100644
--- a/Dockerfile.django-debian
+++ b/Dockerfile.django-debian
@@ -111,7 +111,10 @@ RUN \
chown ${appuser} /var/run/${appuser} && \
chmod g=u /var/run/${appuser} && \
chmod 775 /*.sh && \
- mkdir -p media/threat && chown -R ${uid} media
+ mkdir -p media/threat && chown -R ${uid} media && \
+ # To avoid warning: (staticfiles.W004) The directory '/app/components/node_modules' in the STATICFILES_DIRS setting does not exist.
+ mkdir -p components/node_modules && \
+ chown ${appuser} components/node_modules
USER ${uid}
ENV \
# Only variables that are not defined in settings.dist.py
From 140e006c95927f8a41611064a034d0e383b0c32f Mon Sep 17 00:00:00 2001
From: Alejandro Tortolero
Date: Mon, 3 Jul 2023 15:14:00 -0500
Subject: [PATCH 09/85] Update files with PEP8 standards in folder dojo/tools
#004 (#8304)
* Update files in folder dojo/tools/hydra with PEP8 standars.
* Update files in folder dojo/tools/huskyci with PEP8 standars.
* Update files in folder dojo/tools/ibm_app with PEP8 standars.
* Update files in folder dojo/tools/immuniweb with PEP8 standars.
* Update files in folder dojo/tools/intsights with PEP8 standars.
* Update files in folder dojo/tools/jfrog_xray_api_summary_artifact with PEP8 standars.
* Update files in folder dojo/tools/jfrog_xray_unified with PEP8 standars.
* Update files in folder dojo/tools/jfrogxray with PEP8 standars.
* Update files in folder dojo/tools/kics with PEP8 standars.
* Update files in folder dojo/tools/kiuwan with PEP8 standars.
* Update files in folder dojo/tools/meterian with PEP8 standars.
* Update files in folder dojo/tools/microfocus_webinspect with PEP8 standars.
* Update files in folder dojo/tools/mobsfscan with PEP8 standars.
* Change BaseException to Exception
---
dojo/tools/huskyci/parser.py | 37 ++--
dojo/tools/hydra/parser.py | 67 ++++--
dojo/tools/ibm_app/parser.py | 123 ++++++-----
dojo/tools/immuniweb/parser.py | 49 +++--
dojo/tools/intsights/parser.py | 194 +++++++++++-------
.../jfrog_xray_api_summary_artifact/parser.py | 105 ++++++----
dojo/tools/jfrog_xray_unified/parser.py | 94 +++++----
dojo/tools/jfrogxray/parser.py | 119 +++++++----
dojo/tools/kics/parser.py | 44 ++--
dojo/tools/kiuwan/parser.py | 106 ++++++----
dojo/tools/kubebench/parser.py | 111 +++++-----
dojo/tools/meterian/parser.py | 70 +++++--
dojo/tools/microfocus_webinspect/parser.py | 69 ++++---
dojo/tools/mobsfscan/parser.py | 44 ++--
14 files changed, 768 insertions(+), 464 deletions(-)
diff --git a/dojo/tools/huskyci/parser.py b/dojo/tools/huskyci/parser.py
index 24d5639640..455204bd52 100644
--- a/dojo/tools/huskyci/parser.py
+++ b/dojo/tools/huskyci/parser.py
@@ -19,7 +19,6 @@ def get_description_for_scan_types(self, scan_type):
return "Import HuskyCI Report vulnerabilities in JSON format."
def get_findings(self, json_output, test):
-
if json_output is None:
return
@@ -31,10 +30,10 @@ def parse_json(self, json_output):
try:
data = json_output.read()
try:
- tree = json.loads(str(data, 'utf-8'))
- except:
+ tree = json.loads(str(data, "utf-8"))
+ except Exception:
tree = json.loads(data)
- except:
+ except Exception:
raise ValueError("Invalid format")
return tree
@@ -42,18 +41,19 @@ def parse_json(self, json_output):
def get_items(self, tree, test):
items = {}
- for language in tree.get('huskyciresults', {}):
- tools_results = tree['huskyciresults'][language]
+ for language in tree.get("huskyciresults", {}):
+ tools_results = tree["huskyciresults"][language]
for tool in tools_results:
severity_results = tools_results[tool]
for severity in severity_results:
vulns = severity_results[severity]
for vuln in vulns:
- vuln['severity'] = severity[0:-5].lower().capitalize()
- if vuln['severity'] not in ('High', 'Medium', 'Low'):
+ vuln["severity"] = severity[0:-5].lower().capitalize()
+ if vuln["severity"] not in ("High", "Medium", "Low"):
continue
unique_key = hashlib.md5(
- str(vuln).encode('utf-8')).hexdigest()
+ str(vuln).encode("utf-8")
+ ).hexdigest()
item = get_item(vuln, test)
items[unique_key] = item
@@ -62,21 +62,21 @@ def get_items(self, tree, test):
def get_item(item_node, test):
# description
- description = item_node.get('details', '')
- if 'code' in item_node:
+ description = item_node.get("details", "")
+ if "code" in item_node:
description += "\nCode: " + item_node.get("code")
- if 'confidence' in item_node:
+ if "confidence" in item_node:
description += "\nConfidence: " + item_node.get("confidence")
- if 'securitytool' in item_node:
+ if "securitytool" in item_node:
description += "\nSecurity Tool: " + item_node.get("securitytool")
finding = Finding(
- title=item_node.get('title'),
+ title=item_node.get("title"),
test=test,
- severity=item_node.get('severity'),
+ severity=item_node.get("severity"),
description=description,
- mitigation='N/A',
- references='',
+ mitigation="N/A",
+ references="",
false_p=False,
duplicate=False,
out_of_scope=False,
@@ -85,6 +85,7 @@ def get_item(item_node, test):
line=item_node.get("line"),
static_finding=True,
dynamic_finding=False,
- impact="No impact provided")
+ impact="No impact provided"
+ )
return finding
diff --git a/dojo/tools/hydra/parser.py b/dojo/tools/hydra/parser.py
index cd4767f96f..f24160ac7f 100644
--- a/dojo/tools/hydra/parser.py
+++ b/dojo/tools/hydra/parser.py
@@ -10,12 +10,14 @@
class HydraScanMetadata:
def __init__(self, generator):
- self.date = generator.get('built', )
- self.command = generator.get('commandline')
- self.schema_version = generator.get('jsonoutputversion')
- self.service_type = generator.get('service')
- self.tool_version = generator.get('version')
- self.server = generator.get('server')
+ self.date = generator.get(
+ "built",
+ )
+ self.command = generator.get("commandline")
+ self.schema_version = generator.get("jsonoutputversion")
+ self.service_type = generator.get("service")
+ self.tool_version = generator.get("version")
+ self.server = generator.get("server")
class HydraParser(object):
@@ -40,7 +42,9 @@ def get_findings(self, json_output, test):
return findings
- def __extract_findings(self, raw_findings, metadata: HydraScanMetadata, test):
+ def __extract_findings(
+ self, raw_findings, metadata: HydraScanMetadata, test
+ ):
findings = []
for raw_finding in raw_findings:
@@ -48,28 +52,47 @@ def __extract_findings(self, raw_findings, metadata: HydraScanMetadata, test):
finding = self.__extract_finding(raw_finding, metadata, test)
findings.append(finding)
except ValueError:
- logger.warning('Error when digesting a finding from hydra! Please revise supplied report, vital information was missing (e.g. host)!')
+ logger.warning(
+ "Error when digesting a finding from hydra! Please revise supplied report, vital information was missing (e.g. host)!"
+ )
return findings
- def __extract_finding(self, raw_finding, metadata: HydraScanMetadata, test) -> Finding:
- host = raw_finding.get('host')
- port = raw_finding.get('port')
- username = raw_finding.get('login')
- password = raw_finding.get('password')
-
- if (host is None) or (port is None) or (username is None) or (password is None):
- raise ValueError("Vital information is missing for this finding! Skipping this finding!")
+ def __extract_finding(
+ self, raw_finding, metadata: HydraScanMetadata, test
+ ) -> Finding:
+ host = raw_finding.get("host")
+ port = raw_finding.get("port")
+ username = raw_finding.get("login")
+ password = raw_finding.get("password")
+
+ if (
+ (host is None)
+ or (port is None)
+ or (username is None)
+ or (password is None)
+ ):
+ raise ValueError(
+ "Vital information is missing for this finding! Skipping this finding!"
+ )
finding = Finding(
test=test,
title="Weak username / password combination found for " + host,
- date=parse_datetime(metadata.date) if metadata.date else date.today(),
+ date=parse_datetime(metadata.date)
+ if metadata.date
+ else date.today(),
severity="High",
- description=host + " on port " + str(port) + " is allowing logins with easy to guess username " + username + " and password " + password,
+ description=host
+ + " on port "
+ + str(port)
+ + " is allowing logins with easy to guess username "
+ + username
+ + " and password "
+ + password,
static_finding=False,
dynamic_finding=True,
- service=metadata.service_type,
+ service=metadata.service_type
)
finding.unsaved_endpoints = [Endpoint(host=host, port=port)]
@@ -79,7 +102,9 @@ def __extract_finding(self, raw_finding, metadata: HydraScanMetadata, test) -> F
def __parse_json(json_output):
report = json.load(json_output)
- if 'generator' not in report or 'results' not in report:
- raise ValueError("Unexpected JSON format provided. That doesn't look like a Hydra scan!")
+ if "generator" not in report or "results" not in report:
+ raise ValueError(
+ "Unexpected JSON format provided. That doesn't look like a Hydra scan!"
+ )
return report
diff --git a/dojo/tools/ibm_app/parser.py b/dojo/tools/ibm_app/parser.py
index 8a1c52fc16..8e4147a228 100644
--- a/dojo/tools/ibm_app/parser.py
+++ b/dojo/tools/ibm_app/parser.py
@@ -9,7 +9,6 @@
class IbmAppParser(object):
-
def get_scan_types(self):
return ["IBM AppScan DAST"]
@@ -20,19 +19,20 @@ def get_description_for_scan_types(self, scan_type):
return "XML file from IBM App Scanner."
def get_findings(self, file, test):
-
ibm_scan_tree = ElementTree.parse(file)
root = ibm_scan_tree.getroot()
# validate XML file
- if 'xml-report' not in root.tag:
- raise ValueError("This does not look like a valid expected Ibm AppScan DAST XML file.")
+ if "xml-report" not in root.tag:
+ raise ValueError(
+ "This does not look like a valid expected Ibm AppScan DAST XML file."
+ )
- issue_list = []
# self.hosts = self.fetch_host_details()
issue_types = self.fetch_issue_types(root)
dupes = dict()
- # Now time to loop through individual issues and perform necessary actions
+ # Now time to loop through individual issues and perform necessary
+ # actions
for issue in root.iter("issue-group"):
for item in issue.iter("item"):
ref_link = ""
@@ -40,34 +40,52 @@ def get_findings(self, file, test):
recommendation_data = ""
issue_data = issue_types[item.find("issue-type/ref").text]
- name = issue_data['name']
+ name = issue_data["name"]
# advisory = issue_data['advisory']
- vulnerability_id = issue_data.get('cve')
+ vulnerability_id = issue_data.get("cve")
- cwe = issue_data.get('cwe')
+ cwe = issue_data.get("cwe")
if cwe:
cwe = int(cwe)
- url = self.get_url(root, item.find('url/ref').text)
-
- severity = item.find('severity').text.capitalize()
- if severity == 'Informational':
- severity = 'Info'
- issue_description = self.fetch_advisory_group(root, issue_data['advisory'])
-
- for fix_recommendation_group in root.iter("fix-recommendation-group"):
- for recommendation in fix_recommendation_group.iter("item"):
- if recommendation.attrib['id'] == issue_data["fix-recommendation"]:
- data = recommendation.find("general/fixRecommendation")
+ url = self.get_url(root, item.find("url/ref").text)
+
+ severity = item.find("severity").text.capitalize()
+ if severity == "Informational":
+ severity = "Info"
+ issue_description = self.fetch_advisory_group(
+ root, issue_data["advisory"]
+ )
+
+ for fix_recommendation_group in root.iter(
+ "fix-recommendation-group"
+ ):
+ for recommendation in fix_recommendation_group.iter(
+ "item"
+ ):
+ if (
+ recommendation.attrib["id"]
+ == issue_data["fix-recommendation"]
+ ):
+ data = recommendation.find(
+ "general/fixRecommendation"
+ )
for data_text in data.iter("text"):
- recommendation_data += data_text.text + "\n" # some texts are being repeated
+ recommendation_data += (
+ data_text.text + "\n"
+ ) # some texts are being repeated
- for link in data.iter('link'):
+ for link in data.iter("link"):
if link is not None:
ref_link += link.text + "\n"
- # Now time to start assigning issues to findings and endpoints
- dupe_key = hashlib.md5(str(issue_description + name + severity).encode('utf-8')).hexdigest()
+ # Now time to start assigning issues to findings and
+ # endpoints
+ dupe_key = hashlib.md5(
+ str(issue_description + name + severity).encode(
+ "utf-8"
+ )
+ ).hexdigest()
# check if finding is a duplicate
if dupe_key in dupes:
finding = dupes[dupe_key] # fetch finding
@@ -75,24 +93,31 @@ def get_findings(self, file, test):
finding.description += issue_description
else: # finding is not a duplicate
# create finding
- finding = Finding(title=name,
- test=test,
- cwe=cwe,
- description=issue_description,
- severity=severity,
- mitigation=recommendation_data,
- references=ref_link,
- dynamic_finding=True)
+ finding = Finding(
+ title=name,
+ test=test,
+ cwe=cwe,
+ description=issue_description,
+ severity=severity,
+ mitigation=recommendation_data,
+ references=ref_link,
+ dynamic_finding=True
+ )
if vulnerability_id:
- finding.unsaved_vulnerability_ids = [vulnerability_id]
+ finding.unsaved_vulnerability_ids = [
+ vulnerability_id
+ ]
finding.unsaved_endpoints = list()
dupes[dupe_key] = finding
# in case empty string is returned as url
# this condition is very rare to occur
- # As most of the actions of any vuln scanner depends on urls
+ # As most of the actions of any vuln scanner depends on
+ # urls
if url:
- finding.unsaved_endpoints.append(Endpoint.from_uri(url))
+ finding.unsaved_endpoints.append(
+ Endpoint.from_uri(url)
+ )
return list(dupes.values())
@@ -101,22 +126,24 @@ def fetch_issue_types(self, root):
issues = {}
for issue_type in root.iter("issue-type-group"):
for item in issue_type.iter("item"):
- issues[item.attrib['id']] = {
- 'name': item.find("name").text,
- 'advisory': item.find("advisory/ref").text,
- 'fix-recommendation': item.find("fix-recommendation/ref").text
+ issues[item.attrib["id"]] = {
+ "name": item.find("name").text,
+ "advisory": item.find("advisory/ref").text,
+ "fix-recommendation": item.find(
+ "fix-recommendation/ref"
+ ).text,
}
cve = item.find("cve").text
if cve is not None:
- issues[item.attrib['id']]['cve'] = cve
+ issues[item.attrib["id"]]["cve"] = cve
# cwe can be a link
cwe = item.find("cwe/link")
if cwe is None:
cwe = item.find("cwe")
if cwe.text is not None:
- issues[item.attrib['id']]['cwe'] = int(cwe.text)
+ issues[item.attrib["id"]]["cwe"] = int(cwe.text)
return issues
@@ -127,14 +154,16 @@ def fetch_advisory_group(self, root, advisory):
"""
for advisory_group in root.iter("advisory-group"):
for item in advisory_group.iter("item"):
- if item.attrib['id'] == advisory:
- return item.find('advisory/testTechnicalDescription/text').text
+ if item.attrib["id"] == advisory:
+ return item.find(
+ "advisory/testTechnicalDescription/text"
+ ).text
return "N/A"
def get_url(self, root, ref):
- for url_group in root.iter('url-group'):
- for item in url_group.iter('item'):
- if item.attrib['id'] == ref:
- return item.find('name').text
+ for url_group in root.iter("url-group"):
+ for item in url_group.iter("item"):
+ if item.attrib["id"] == ref:
+ return item.find("name").text
return None # This case is very rare to occur
diff --git a/dojo/tools/immuniweb/parser.py b/dojo/tools/immuniweb/parser.py
index 2c03bace4a..6265d1f620 100644
--- a/dojo/tools/immuniweb/parser.py
+++ b/dojo/tools/immuniweb/parser.py
@@ -4,11 +4,10 @@
from dojo.models import Endpoint, Finding
-__author__ = 'properam'
+__author__ = "properam"
class ImmuniwebParser(object):
-
def get_scan_types(self):
return ["Immuniweb Scan"]
@@ -19,43 +18,49 @@ def get_description_for_scan_types(self, scan_type):
return "XML Scan Result File from Imuniweb Scan."
def get_findings(self, file, test):
-
ImmuniScanTree = ElementTree.parse(file)
root = ImmuniScanTree.getroot()
# validate XML file
- if 'Vulnerabilities' not in root.tag:
- raise ValueError("This does not look like a valid expected Immuniweb XML file.")
+ if "Vulnerabilities" not in root.tag:
+ raise ValueError(
+ "This does not look like a valid expected Immuniweb XML file."
+ )
dupes = dict()
for vulnerability in root.iter("Vulnerability"):
"""
- The Tags available in XML File are:
- ID, Name, Date, Status,
- Type, CWE_ID, CVE_ID, CVSSv3,
- Risk, URL, Description, PoC
+ The Tags available in XML File are:
+ ID, Name, Date, Status,
+ Type, CWE_ID, CVE_ID, CVSSv3,
+ Risk, URL, Description, PoC
"""
mitigation = "N/A"
impact = "N/A"
- title = vulnerability.find('Name').text
- reference = vulnerability.find('ID').text
- cwe = ''.join(i for i in vulnerability.find('CWE-ID').text if i.isdigit())
+ title = vulnerability.find("Name").text
+ reference = vulnerability.find("ID").text
+ cwe = "".join(
+ i for i in vulnerability.find("CWE-ID").text if i.isdigit()
+ )
if cwe:
cwe = cwe
else:
cwe = None
- vulnerability_id = vulnerability.find('CVE-ID').text
- steps_to_reproduce = vulnerability.find('PoC').text
- # just to make sure severity is in the recognised sentence casing form
- severity = vulnerability.find('Risk').text.capitalize()
+ vulnerability_id = vulnerability.find("CVE-ID").text
+ steps_to_reproduce = vulnerability.find("PoC").text
+ # just to make sure severity is in the recognised sentence casing
+ # form
+ severity = vulnerability.find("Risk").text.capitalize()
# Set 'Warning' severity === 'Informational'
- if severity == 'Warning':
+ if severity == "Warning":
severity = "Informational"
- description = (vulnerability.find('Description').text)
+ description = vulnerability.find("Description").text
url = vulnerability.find("URL").text
- dupe_key = hashlib.md5(str(description + title + severity).encode('utf-8')).hexdigest()
+ dupe_key = hashlib.md5(
+ str(description + title + severity).encode("utf-8")
+ ).hexdigest()
# check if finding is a duplicate
if dupe_key in dupes:
@@ -64,7 +69,8 @@ def get_findings(self, file, test):
finding.description += description
else: # finding is not a duplicate
# create finding
- finding = Finding(title=title,
+ finding = Finding(
+ title=title,
test=test,
description=description,
severity=severity,
@@ -73,7 +79,8 @@ def get_findings(self, file, test):
mitigation=mitigation,
impact=impact,
references=reference,
- dynamic_finding=True)
+ dynamic_finding=True
+ )
if vulnerability_id:
finding.unsaved_vulnerability_ids = [vulnerability_id]
finding.unsaved_endpoints = list()
diff --git a/dojo/tools/intsights/parser.py b/dojo/tools/intsights/parser.py
index 323a9036df..2c97225fae 100644
--- a/dojo/tools/intsights/parser.py
+++ b/dojo/tools/intsights/parser.py
@@ -34,23 +34,38 @@ def _parse_json(self, json_file) -> [dict]:
alerts = []
original_alerts = json.load(json_file)
- for original_alert in original_alerts.get('Alerts', []):
+ for original_alert in original_alerts.get("Alerts", []):
alert = dict()
- alert['alert_id'] = original_alert['_id']
- alert['title'] = original_alert['Details']['Title']
- alert['description'] = original_alert['Details']['Description']
- alert['severity'] = original_alert['Details']['Severity']
- alert['type'] = original_alert['Details']['Type']
- alert['source_date'] = original_alert['Details']['Source'].get("Date", "None provided")
- alert['report_date'] = original_alert.get("FoundDate", "None provided")
- alert['network_type'] = original_alert['Details']['Source'].get('NetworkType')
- alert['source_url'] = original_alert['Details']['Source'].get('URL')
- alert['assets'] = ','.join([item.get('Value') for item in original_alert['Assets']])
- alert['tags'] = original_alert['Details'].get('Tags')
- alert['status'] = 'Closed' if original_alert['Closed'].get('IsClosed') else 'Open'
- alert[
- 'alert_link'] = f'https://dashboard.intsights.com/#/threat-command/alerts?search=' \
- f'{original_alert["_id"]}'
+ alert["alert_id"] = original_alert["_id"]
+ alert["title"] = original_alert["Details"]["Title"]
+ alert["description"] = original_alert["Details"]["Description"]
+ alert["severity"] = original_alert["Details"]["Severity"]
+ alert["type"] = original_alert["Details"]["Type"]
+ alert["source_date"] = original_alert["Details"]["Source"].get(
+ "Date", "None provided"
+ )
+ alert["report_date"] = original_alert.get(
+ "FoundDate", "None provided"
+ )
+ alert["network_type"] = original_alert["Details"]["Source"].get(
+ "NetworkType"
+ )
+ alert["source_url"] = original_alert["Details"]["Source"].get(
+ "URL"
+ )
+ alert["assets"] = ",".join(
+ [item.get("Value") for item in original_alert["Assets"]]
+ )
+ alert["tags"] = original_alert["Details"].get("Tags")
+ alert["status"] = (
+ "Closed"
+ if original_alert["Closed"].get("IsClosed")
+ else "Open"
+ )
+ alert["alert_link"] = (
+ f"https://dashboard.intsights.com/#/threat-command/alerts?search="
+ f'{original_alert["_id"]}'
+ )
alerts.append(alert)
@@ -66,44 +81,73 @@ def _parse_csv(self, csv_file) -> [dict]:
A list of alerts [dict()]
"""
- default_keys = ['Alert ID', 'Title', 'Description', 'Severity', 'Type', 'Source Date (UTC)',
- 'Report Date (UTC)', 'Network Type', 'Source URL', 'Source Name', 'Assets', 'Tags',
- 'Assignees', 'Remediation', 'Status', 'Closed Reason', 'Additional Info', 'Rating',
- 'Alert Link']
+ default_keys = [
+ "Alert ID",
+ "Title",
+ "Description",
+ "Severity",
+ "Type",
+ "Source Date (UTC)",
+ "Report Date (UTC)",
+ "Network Type",
+ "Source URL",
+ "Source Name",
+ "Assets",
+ "Tags",
+ "Assignees",
+ "Remediation",
+ "Status",
+ "Closed Reason",
+ "Additional Info",
+ "Rating",
+ "Alert Link"
+ ]
# These keys require a value. If one ore more of the values is null or empty, the entire Alert is ignored.
# This is to avoid attempting to import incomplete Findings.
- required_keys = ['alert_id', 'title', 'severity', 'status']
+ required_keys = ["alert_id", "title", "severity", "status"]
alerts = []
invalid_alerts = []
content = csv_file.read()
- if type(content) is bytes:
- content = content.decode('utf-8')
- csv_reader = csv.DictReader(io.StringIO(content), delimiter=',', quotechar='"')
+ if isinstance(content, bytes):
+ content = content.decode("utf-8")
+ csv_reader = csv.DictReader(
+ io.StringIO(content), delimiter=",", quotechar='"'
+ )
# Don't bother parsing if the keys don't match exactly what's expected
- if collections.Counter(default_keys) == collections.Counter(csv_reader.fieldnames):
- default_valud = 'None provided'
+ if collections.Counter(default_keys) == collections.Counter(
+ csv_reader.fieldnames
+ ):
+ default_valud = "None provided"
for alert in csv_reader:
- alert['alert_id'] = alert.pop('Alert ID')
- alert['title'] = alert.pop('Title')
- alert['description'] = alert.pop('Description')
- alert['severity'] = alert.pop('Severity')
- alert['type'] = alert.pop('Type', )
- alert['source_date'] = alert.pop('Source Date (UTC)', default_valud)
- alert['report_date'] = alert.pop('Report Date (UTC)', default_valud)
- alert['network_type'] = alert.pop('Network Type', default_valud)
- alert['source_url'] = alert.pop('Source URL', default_valud)
- alert['assets'] = alert.pop('Assets', default_valud)
- alert['tags'] = alert.pop('Tags', default_valud)
- alert['status'] = alert.pop('Status', default_valud)
- alert['alert_link'] = alert.pop('Alert Link')
- alert.pop('Assignees')
- alert.pop('Remediation')
- alert.pop('Closed Reason')
- alert.pop('Rating')
+ alert["alert_id"] = alert.pop("Alert ID")
+ alert["title"] = alert.pop("Title")
+ alert["description"] = alert.pop("Description")
+ alert["severity"] = alert.pop("Severity")
+ alert["type"] = alert.pop(
+ "Type",
+ )
+ alert["source_date"] = alert.pop(
+ "Source Date (UTC)", default_valud
+ )
+ alert["report_date"] = alert.pop(
+ "Report Date (UTC)", default_valud
+ )
+ alert["network_type"] = alert.pop(
+ "Network Type", default_valud
+ )
+ alert["source_url"] = alert.pop("Source URL", default_valud)
+ alert["assets"] = alert.pop("Assets", default_valud)
+ alert["tags"] = alert.pop("Tags", default_valud)
+ alert["status"] = alert.pop("Status", default_valud)
+ alert["alert_link"] = alert.pop("Alert Link")
+ alert.pop("Assignees")
+ alert.pop("Remediation")
+ alert.pop("Closed Reason")
+ alert.pop("Rating")
for key in required_keys:
if not alert[key]:
invalid_alerts.append(alert)
@@ -111,7 +155,9 @@ def _parse_csv(self, csv_file) -> [dict]:
if alert not in invalid_alerts:
alerts.append(alert)
else:
- self._LOGGER.error('The CSV file has one or more missing or unexpected header values')
+ self._LOGGER.error(
+ "The CSV file has one or more missing or unexpected header values"
+ )
return alerts
@@ -123,41 +169,49 @@ def _build_finding_description(self, alert: dict) -> str:
Returns: A markdown formatted description
"""
- description = "\n".join([
- alert["description"],
- f'**Date Found**: `{alert.get("report_date", "None provided")} `',
- f'**Type**: `{alert.get("type", "None provided")} `',
- f'**Source**: `{alert.get("source_url", "None provided")} `',
- f'**Source Date**: ` {alert.get("source_date", "None provided")} `',
- f'**Source Network Type**: `{alert.get("network_type", "None provided")} `',
- f'**Assets Affected**: `{alert.get("assets", "None provided")} `',
- f'**Alert Link**: {alert.get("alert_link", "None provided")}'
- ])
+ description = "\n".join(
+ [
+ alert["description"],
+ f'**Date Found**: `{alert.get("report_date", "None provided")} `',
+ f'**Type**: `{alert.get("type", "None provided")} `',
+ f'**Source**: `{alert.get("source_url", "None provided")} `',
+ f'**Source Date**: ` {alert.get("source_date", "None provided")} `',
+ f'**Source Network Type**: `{alert.get("network_type", "None provided")} `',
+ f'**Assets Affected**: `{alert.get("assets", "None provided")} `',
+ f'**Alert Link**: {alert.get("alert_link", "None provided")}'
+ ]
+ )
return description
def get_findings(self, file, test):
duplicates = dict()
- if file.name.lower().endswith('.json'):
- alerts = self._parse_json(file, )
- elif file.name.lower().endswith('.csv'):
+ if file.name.lower().endswith(".json"):
+ alerts = self._parse_json(
+ file,
+ )
+ elif file.name.lower().endswith(".csv"):
alerts = self._parse_csv(file)
else:
- raise ValueError('Filename extension not recognized. Use .json or .csv')
+ raise ValueError(
+ "Filename extension not recognized. Use .json or .csv"
+ )
for alert in alerts:
- dupe_key = alert['alert_id']
-
- alert = Finding(title=alert['title'],
- test=test,
- active=False if alert['status'] == 'Closed' else True,
- verified=True,
- description=self._build_finding_description(alert),
- severity=alert['severity'],
- references=alert["alert_link"],
- static_finding=False,
- dynamic_finding=True,
- unique_id_from_tool=alert['alert_id'])
+ dupe_key = alert["alert_id"]
+
+ alert = Finding(
+ title=alert["title"],
+ test=test,
+ active=False if alert["status"] == "Closed" else True,
+ verified=True,
+ description=self._build_finding_description(alert),
+ severity=alert["severity"],
+ references=alert["alert_link"],
+ static_finding=False,
+ dynamic_finding=True,
+ unique_id_from_tool=alert["alert_id"]
+ )
duplicates[dupe_key] = alert
diff --git a/dojo/tools/jfrog_xray_api_summary_artifact/parser.py b/dojo/tools/jfrog_xray_api_summary_artifact/parser.py
index a980f31fe0..d8bea2acd9 100644
--- a/dojo/tools/jfrog_xray_api_summary_artifact/parser.py
+++ b/dojo/tools/jfrog_xray_api_summary_artifact/parser.py
@@ -8,16 +8,17 @@
class JFrogXrayApiSummaryArtifactParser(object):
-
# This function return a list of all the scan_type supported by your parser
def get_scan_types(self):
return ["JFrog Xray API Summary Artifact Scan"]
- # This function return a string used to provide some text in the UI (short label)
+ # This function return a string used to provide some text in the UI (short
+ # label)
def get_label_for_scan_types(self, scan_type):
return scan_type
- # This function return a string used to provide some text in the UI (long description)
+ # This function return a string used to provide some text in the UI (long
+ # description)
def get_description_for_scan_types(self, scan_type):
return "Import Xray findings in JSON format from the JFrog Xray API Summary/Artifact JSON response"
@@ -28,56 +29,83 @@ def get_findings(self, json_output, test):
def get_items(self, tree, test):
items = []
- if 'artifacts' in tree:
- artifact_tree = tree['artifacts']
+ if "artifacts" in tree:
+ artifact_tree = tree["artifacts"]
for artifactNode in artifact_tree:
- artifact_general = artifactNode['general']
- artifact_issues = artifactNode['issues']
+ artifact_general = artifactNode["general"]
+ artifact_issues = artifactNode["issues"]
artifact = decode_artifact(artifact_general)
for node in artifact_issues:
- service = decode_service(artifact_general['name'])
- item = get_item(node, str(service), test, artifact.name, artifact.version, artifact.sha256)
+ service = decode_service(artifact_general["name"])
+ item = get_item(
+ node,
+ str(service),
+ test,
+ artifact.name,
+ artifact.version,
+ artifact.sha256,
+ )
items.append(item)
return items
# Retrieve the findings of the affected 1st level component (Artifact)
-def get_item(vulnerability, service, test, artifact_name, artifact_version, artifact_sha256):
+def get_item(
+ vulnerability,
+ service,
+ test,
+ artifact_name,
+ artifact_version,
+ artifact_sha256,
+):
cve = None
cwe = None
cvssv3 = None
impact_path = ImpactPath("", "", "")
- if 'severity' in vulnerability:
- if vulnerability['severity'] == 'Unknown':
+ if "severity" in vulnerability:
+ if vulnerability["severity"] == "Unknown":
severity = "Informational"
else:
- severity = vulnerability['severity'].title()
+ severity = vulnerability["severity"].title()
else:
severity = "Informational"
- # Some entries have no CVE entries, despite they exist. Example CVE-2017-1000502.
- cves = vulnerability.get('cves', [])
+ # Some entries have no CVE entries, despite they exist. Example
+ # CVE-2017-1000502.
+ cves = vulnerability.get("cves", [])
vulnerability_ids = list()
if cves:
- if len(cves[0].get('cwe', [])) > 0:
- cwe = decode_cwe_number(cves[0].get('cwe', [])[0])
- if 'cvss_v3' in cves[0]:
- cvss_v3 = cves[0]['cvss_v3']
+ if len(cves[0].get("cwe", [])) > 0:
+ cwe = decode_cwe_number(cves[0].get("cwe", [])[0])
+ if "cvss_v3" in cves[0]:
+ cvss_v3 = cves[0]["cvss_v3"]
cvssv3 = CVSS3.from_rh_vector(cvss_v3).clean_vector()
- impact_paths = vulnerability.get('impact_path', [])
+ impact_paths = vulnerability.get("impact_path", [])
if len(impact_paths) > 0:
impact_path = decode_impact_path(impact_paths[0])
result = hashlib.sha256()
- if 'issue_id' in vulnerability:
- unique_id = str(artifact_sha256 + impact_path.name + impact_path.version + vulnerability['issue_id'])
- vuln_id_from_tool = vulnerability['issue_id']
+ if "issue_id" in vulnerability:
+ unique_id = str(
+ artifact_sha256
+ + impact_path.name
+ + impact_path.version
+ + vulnerability["issue_id"]
+ )
+ vuln_id_from_tool = vulnerability["issue_id"]
elif cve:
- unique_id = str(artifact_sha256 + impact_path.name + impact_path.version + cve)
+ unique_id = str(
+ artifact_sha256 + impact_path.name + impact_path.version + cve
+ )
else:
- unique_id = str(artifact_sha256 + impact_path.name + impact_path.version + vulnerability['summary'])
+ unique_id = str(
+ artifact_sha256
+ + impact_path.name
+ + impact_path.version
+ + vulnerability["summary"]
+ )
vuln_id_from_tool = ""
result.update(unique_id.encode())
unique_id_from_tool = result.hexdigest()
@@ -85,28 +113,32 @@ def get_item(vulnerability, service, test, artifact_name, artifact_version, arti
finding = Finding(
vuln_id_from_tool=vuln_id_from_tool,
service=service,
- title=vulnerability['summary'],
+ title=vulnerability["summary"],
cwe=cwe,
cvssv3=cvssv3,
severity=severity,
- description=impact_path.name + ":" + impact_path.version + " -> " + vulnerability['description'],
+ description=impact_path.name
+ + ":"
+ + impact_path.version
+ + " -> "
+ + vulnerability["description"],
test=test,
file_path=impact_paths[0],
component_name=artifact_name,
component_version=artifact_version,
static_finding=True,
dynamic_finding=False,
- unique_id_from_tool=unique_id_from_tool
+ unique_id_from_tool=unique_id_from_tool,
)
if vulnerability_ids:
finding.unsaved_vulnerability_ids = vulnerability_ids
# Add vulnerability ids
vulnerability_ids = list()
- if 'cve' in cves[0]:
- vulnerability_ids.append(cves[0]['cve'])
- if 'issue_id' in vulnerability:
- vulnerability_ids.append(vulnerability['issue_id'])
+ if "cve" in cves[0]:
+ vulnerability_ids.append(cves[0]["cve"])
+ if "issue_id" in vulnerability:
+ vulnerability_ids.append(vulnerability["issue_id"])
if vulnerability_ids:
finding.unsaved_vulnerability_ids = vulnerability_ids
@@ -115,10 +147,11 @@ def get_item(vulnerability, service, test, artifact_name, artifact_version, arti
# Regex helpers
+
def decode_service(name):
match = re.match(r".*/(.*):", name, re.IGNORECASE)
if match is None:
- return ''
+ return ""
return match[1]
@@ -126,13 +159,13 @@ def decode_cwe_number(value):
match = re.match(r"CWE-\d+", value, re.IGNORECASE)
if match is None:
return 0
- return int(match[0].rsplit('-')[1])
+ return int(match[0].rsplit("-")[1])
def decode_artifact(artifact_general):
artifact = Artifact("", "", "")
- artifact.sha256 = artifact_general['sha256']
- match = re.match(r"(.*):(.*)", artifact_general['name'], re.IGNORECASE)
+ artifact.sha256 = artifact_general["sha256"]
+ match = re.match(r"(.*):(.*)", artifact_general["name"], re.IGNORECASE)
if match:
artifact.name = match[1]
artifact.version = match[2]
diff --git a/dojo/tools/jfrog_xray_unified/parser.py b/dojo/tools/jfrog_xray_unified/parser.py
index bb48fe815f..23e739101c 100644
--- a/dojo/tools/jfrog_xray_unified/parser.py
+++ b/dojo/tools/jfrog_xray_unified/parser.py
@@ -22,8 +22,8 @@ def get_findings(self, json_output, test):
def get_items(self, tree, test):
items = []
- if 'rows' in tree:
- vulnerabilityTree = tree['rows']
+ if "rows" in tree:
+ vulnerabilityTree = tree["rows"]
for node in vulnerabilityTree:
item = get_item(node, test)
@@ -35,24 +35,28 @@ def get_items(self, tree, test):
def get_item(vulnerability, test):
# Some items have multiple CVEs for some reason, so get the CVE with the highest CVSSv3 score.
- # Note: the xray v2 importer just took the first CVE in the list, that doesn't seem ideal though
+ # Note: the xray v2 importer just took the first CVE in the list, that
+ # doesn't seem ideal though
highestCvssV3Index = 0
highestCvssV3Score = 0
- for thisCveIndex in range(0, len(vulnerability['cves']) - 1):
- # not all cves have cvssv3 scores, so skip these. If no v3 scores, we'll default to index 0
- if 'cvss_v3_score' in vulnerability['cves'][thisCveIndex]:
- thisCvssV3Score = vulnerability['cves'][thisCveIndex]['cvss_v3_score']
+ for thisCveIndex in range(0, len(vulnerability["cves"]) - 1):
+ # not all cves have cvssv3 scores, so skip these. If no v3 scores,
+ # we'll default to index 0
+ if "cvss_v3_score" in vulnerability["cves"][thisCveIndex]:
+ thisCvssV3Score = vulnerability["cves"][thisCveIndex][
+ "cvss_v3_score"
+ ]
if thisCvssV3Score > highestCvssV3Score:
highestCvssV3Index = thisCveIndex
highestCvssV3Score = thisCvssV3Score
# Following the CVSS Scoring per https://nvd.nist.gov/vuln-metrics/cvss
- if 'severity' in vulnerability:
- if vulnerability['severity'] == 'Unknown':
+ if "severity" in vulnerability:
+ if vulnerability["severity"] == "Unknown":
severity = "Info"
else:
- severity = vulnerability['severity'].title()
+ severity = vulnerability["severity"].title()
# TODO: Needs UNKNOWN new status in the model.
else:
severity = "Info"
@@ -66,61 +70,79 @@ def get_item(vulnerability, test):
mitigation = None
extra_desc = ""
- cves = vulnerability.get('cves', [])
+ cves = vulnerability.get("cves", [])
if len(cves) > 0:
worstCve = cves[cveIndex]
- if 'cve' in cves[cveIndex]:
- vulnerability_id = worstCve['cve']
- if 'cvss_v3_vector' in worstCve:
- cvss_v3 = worstCve['cvss_v3_vector']
+ if "cve" in cves[cveIndex]:
+ vulnerability_id = worstCve["cve"]
+ if "cvss_v3_vector" in worstCve:
+ cvss_v3 = worstCve["cvss_v3_vector"]
cvssv3 = cvss_v3
- if 'cvss_v2_vector' in worstCve:
- cvss_v2 = worstCve['cvss_v2_vector']
+ if "cvss_v2_vector" in worstCve:
+ cvss_v2 = worstCve["cvss_v2_vector"]
- if 'fixed_versions' in vulnerability and len(vulnerability['fixed_versions']) > 0:
+ if (
+ "fixed_versions" in vulnerability
+ and len(vulnerability["fixed_versions"]) > 0
+ ):
mitigation = "Versions containing a fix:\n"
- mitigation = mitigation + "\n".join(vulnerability['fixed_versions'])
-
- if 'external_advisory_source' in vulnerability and 'external_advisory_severity' in vulnerability:
- extra_desc = vulnerability['external_advisory_source'] + ": " + vulnerability['external_advisory_severity']
-
- if vulnerability['issue_id']:
- title = vulnerability['issue_id'] + " - " + vulnerability['summary']
+ mitigation = mitigation + "\n".join(vulnerability["fixed_versions"])
+
+ if (
+ "external_advisory_source" in vulnerability
+ and "external_advisory_severity" in vulnerability
+ ):
+ extra_desc = (
+ vulnerability["external_advisory_source"]
+ + ": "
+ + vulnerability["external_advisory_severity"]
+ )
+
+ if vulnerability["issue_id"]:
+ title = vulnerability["issue_id"] + " - " + vulnerability["summary"]
else:
- title = vulnerability['summary']
+ title = vulnerability["summary"]
- references = "\n".join(vulnerability['references'])
+ references = "\n".join(vulnerability["references"])
- scan_time = datetime.strptime(vulnerability['artifact_scan_time'], "%Y-%m-%dT%H:%M:%S%z")
+ scan_time = datetime.strptime(
+ vulnerability["artifact_scan_time"], "%Y-%m-%dT%H:%M:%S%z"
+ )
- # component has several parts separated by colons. Last part is the version, everything else is the name
- splitComponent = vulnerability['vulnerable_component'].split(':')
+ # component has several parts separated by colons. Last part is the
+ # version, everything else is the name
+ splitComponent = vulnerability["vulnerable_component"].split(":")
component_name = ":".join(splitComponent[:-1])
component_version = splitComponent[-1:][0]
# remove package type from component name
component_name = component_name.split("://", 1)[1]
- tags = ["packagetype_" + vulnerability['package_type']]
+ tags = ["packagetype_" + vulnerability["package_type"]]
# create the finding object
finding = Finding(
title=title,
test=test,
severity=severity,
- description=(vulnerability['description'] + "\n\n" + extra_desc).strip(),
+ description=(
+ vulnerability["description"] + "\n\n" + extra_desc
+ ).strip(),
mitigation=mitigation,
component_name=component_name,
component_version=component_version,
- file_path=vulnerability['path'],
- severity_justification="CVSS v3 base score: {}\nCVSS v2 base score: {}".format(cvss_v3, cvss_v2),
+ file_path=vulnerability["path"],
+ severity_justification="CVSS v3 base score: {}\nCVSS v2 base score: {}".format(
+ cvss_v3, cvss_v2
+ ),
static_finding=True,
dynamic_finding=False,
references=references,
impact=severity,
cvssv3=cvssv3,
date=scan_time,
- unique_id_from_tool=vulnerability['issue_id'],
- tags=tags)
+ unique_id_from_tool=vulnerability["issue_id"],
+ tags=tags,
+ )
if vulnerability_id:
finding.unsaved_vulnerability_ids = [vulnerability_id]
diff --git a/dojo/tools/jfrogxray/parser.py b/dojo/tools/jfrogxray/parser.py
index d2a34a6514..9f45abd6be 100644
--- a/dojo/tools/jfrogxray/parser.py
+++ b/dojo/tools/jfrogxray/parser.py
@@ -24,21 +24,32 @@ def get_findings(self, json_output, test):
def get_items(self, tree, test):
items = {}
- if 'data' in tree:
- vulnerabilityTree = tree['data']
+ if "data" in tree:
+ vulnerabilityTree = tree["data"]
for node in vulnerabilityTree:
-
item = get_item(node, test)
title_cve = "No CVE"
- more_details = node.get('component_versions').get('more_details')
- if 'cves' in more_details:
- if 'cve' in more_details.get('cves')[0]:
- title_cve = node.get('component_versions').get('more_details').get('cves')[0].get('cve')
-
- unique_key = node.get('id') + node.get('summary') + node.get('provider') + node.get('source_comp_id') + \
- title_cve
+ more_details = node.get("component_versions").get(
+ "more_details"
+ )
+ if "cves" in more_details:
+ if "cve" in more_details.get("cves")[0]:
+ title_cve = (
+ node.get("component_versions")
+ .get("more_details")
+ .get("cves")[0]
+ .get("cve")
+ )
+
+ unique_key = (
+ node.get("id")
+ + node.get("summary")
+ + node.get("provider")
+ + node.get("source_comp_id")
+ + title_cve
+ )
items[unique_key] = item
return list(items.values())
@@ -48,16 +59,16 @@ def decode_cwe_number(value):
match = re.match(r"CWE-\d+", value, re.IGNORECASE)
if match is None:
return 0
- return int(match[0].rsplit('-')[1])
+ return int(match[0].rsplit("-")[1])
def get_item(vulnerability, test):
# Following the CVSS Scoring per https://nvd.nist.gov/vuln-metrics/cvss
- if 'severity' in vulnerability:
- if vulnerability['severity'] == 'Unknown':
+ if "severity" in vulnerability:
+ if vulnerability["severity"] == "Unknown":
severity = "Info"
else:
- severity = vulnerability['severity'].title()
+ severity = vulnerability["severity"].title()
# TODO: Needs UNKNOWN new status in the model.
else:
severity = "Info"
@@ -68,44 +79,75 @@ def get_item(vulnerability, test):
cvss_v3 = "No CVSS v3 score."
mitigation = None
extra_desc = ""
- # Some entries have no CVE entries, despite they exist. Example CVE-2017-1000502.
- cves = vulnerability['component_versions']['more_details'].get('cves', [])
+ # Some entries have no CVE entries, despite they exist. Example
+ # CVE-2017-1000502.
+ cves = vulnerability["component_versions"]["more_details"].get("cves", [])
if len(cves) > 0:
for item in cves:
- if item.get('cve'):
- vulnerability_ids.append(item.get('cve'))
+ if item.get("cve"):
+ vulnerability_ids.append(item.get("cve"))
# take only the first one for now, limitation of DD model.
- if len(cves[0].get('cwe', [])) > 0:
- cwe = decode_cwe_number(cves[0].get('cwe', [])[0])
- if 'cvss_v3' in cves[0]:
- cvss_v3 = cves[0]['cvss_v3']
+ if len(cves[0].get("cwe", [])) > 0:
+ cwe = decode_cwe_number(cves[0].get("cwe", [])[0])
+ if "cvss_v3" in cves[0]:
+ cvss_v3 = cves[0]["cvss_v3"]
# this dedicated package will clean the vector
cvssv3 = CVSS3.from_rh_vector(cvss_v3).clean_vector()
- if 'fixed_versions' in vulnerability['component_versions']:
+ if "fixed_versions" in vulnerability["component_versions"]:
mitigation = "**Versions containing a fix:**\n"
- mitigation = mitigation + "\n".join(vulnerability['component_versions']['fixed_versions'])
+ mitigation = mitigation + "\n".join(
+ vulnerability["component_versions"]["fixed_versions"]
+ )
- if 'vulnerable_versions' in vulnerability['component_versions']:
+ if "vulnerable_versions" in vulnerability["component_versions"]:
extra_desc = "\n**Versions that are vulnerable:**\n"
- extra_desc += "\n".join(vulnerability['component_versions']['vulnerable_versions'])
-
- provider = vulnerability.get('component_versions').get('more_details').get('provider')
+ extra_desc += "\n".join(
+ vulnerability["component_versions"]["vulnerable_versions"]
+ )
+
+ provider = (
+ vulnerability.get("component_versions")
+ .get("more_details")
+ .get("provider")
+ )
if provider:
extra_desc += f"\n**Provider:** {provider}"
- component_name = vulnerability.get('component')
- component_version = vulnerability.get('source_comp_id')[len(vulnerability.get('source_id', '')) + 1:]
+ component_name = vulnerability.get("component")
+ component_version = vulnerability.get("source_comp_id")[
+ len(vulnerability.get("source_id", "")) + 1:
+ ]
# The 'id' field is empty? (at least in my sample file)
if vulnerability_ids:
- if vulnerability['id']:
- title = vulnerability['id'] + " - " + str(vulnerability_ids[0]) + " - " + component_name + ":" + component_version
+ if vulnerability["id"]:
+ title = (
+ vulnerability["id"]
+ + " - "
+ + str(vulnerability_ids[0])
+ + " - "
+ + component_name
+ + ":"
+ + component_version
+ )
else:
- title = str(vulnerability_ids[0]) + " - " + component_name + ":" + component_version
+ title = (
+ str(vulnerability_ids[0])
+ + " - "
+ + component_name
+ + ":"
+ + component_version
+ )
else:
- if vulnerability['id']:
- title = vulnerability['id'] + " - " + component_name + ":" + component_version
+ if vulnerability["id"]:
+ title = (
+ vulnerability["id"]
+ + " - "
+ + component_name
+ + ":"
+ + component_version
+ )
else:
title = "No CVE - " + component_name + ":" + component_version
@@ -115,14 +157,15 @@ def get_item(vulnerability, test):
cwe=cwe,
test=test,
severity=severity,
- description=(vulnerability['summary'] + extra_desc).strip(),
+ description=(vulnerability["summary"] + extra_desc).strip(),
mitigation=mitigation,
component_name=component_name,
component_version=component_version,
- file_path=vulnerability.get('source_comp_id'),
+ file_path=vulnerability.get("source_comp_id"),
static_finding=True,
dynamic_finding=False,
- cvssv3=cvssv3)
+ cvssv3=cvssv3,
+ )
if vulnerability_ids:
finding.unsaved_vulnerability_ids = vulnerability_ids
return finding
diff --git a/dojo/tools/kics/parser.py b/dojo/tools/kics/parser.py
index a3942cca7e..365a508cb9 100644
--- a/dojo/tools/kics/parser.py
+++ b/dojo/tools/kics/parser.py
@@ -28,36 +28,42 @@ def get_description_for_scan_types(self, scan_type):
def get_findings(self, filename, test):
data = json.load(filename)
dupes = {}
- for query in data['queries']:
- name = query.get('query_name')
- query_url = query.get('query_url')
- if query.get('severity') in self.SEVERITY:
- severity = self.SEVERITY[query.get('severity')]
+ for query in data["queries"]:
+ name = query.get("query_name")
+ query_url = query.get("query_url")
+ if query.get("severity") in self.SEVERITY:
+ severity = self.SEVERITY[query.get("severity")]
else:
severity = "Medium"
- platform = query.get('platform')
- category = query.get('category')
- for item in query.get('files'):
- file_name = item.get('file_name')
- line_number = item.get('line')
- issue_type = item.get('issue_type')
- expected_value = item.get('expected_value')
- actual_value = item.get('actual_value')
+ platform = query.get("platform")
+ category = query.get("category")
+ for item in query.get("files"):
+ file_name = item.get("file_name")
+ line_number = item.get("line")
+ issue_type = item.get("issue_type")
+ expected_value = item.get("expected_value")
+ actual_value = item.get("actual_value")
description = f"{query.get('description','')}\n"
if platform:
- description += f'**Platform:** {platform}\n'
+ description += f"**Platform:** {platform}\n"
if category:
- description += f'**Category:** {category}\n'
+ description += f"**Category:** {category}\n"
if issue_type:
- description += f'**Issue type:** {issue_type}\n'
+ description += f"**Issue type:** {issue_type}\n"
if actual_value:
- description += f'**Actual value:** {actual_value}\n'
- if description.endswith('\n'):
+ description += f"**Actual value:** {actual_value}\n"
+ if description.endswith("\n"):
description = description[:-1]
dupe_key = hashlib.sha256(
- (platform + category + issue_type + file_name + str(line_number)).encode("utf-8")
+ (
+ platform
+ + category
+ + issue_type
+ + file_name
+ + str(line_number)
+ ).encode("utf-8")
).hexdigest()
if dupe_key in dupes:
diff --git a/dojo/tools/kiuwan/parser.py b/dojo/tools/kiuwan/parser.py
index 70cbcdec58..14bbc85f53 100644
--- a/dojo/tools/kiuwan/parser.py
+++ b/dojo/tools/kiuwan/parser.py
@@ -4,28 +4,28 @@
from dojo.models import Finding
-__author__ = 'dr3dd589'
+__author__ = "dr3dd589"
-class Severityfilter():
+class Severityfilter:
def __init__(self):
- self.severity_mapping = {'Very Low': 'Info',
- 'Low': 'Low',
- 'Normal': 'Medium',
- 'High': 'High',
- 'Very High': 'Critical'
- }
+ self.severity_mapping = {
+ "Very Low": "Info",
+ "Low": "Low",
+ "Normal": "Medium",
+ "High": "High",
+ "Very High": "Critical",
+ }
self.severity = None
def eval_column(self, column_value):
if column_value in list(self.severity_mapping.keys()):
self.severity = self.severity_mapping[column_value]
else:
- self.severity = 'Info'
+ self.severity = "Info"
class KiuwanParser(object):
-
def get_scan_types(self):
return ["Kiuwan Scan"]
@@ -37,9 +37,11 @@ def get_description_for_scan_types(self, scan_type):
def get_findings(self, filename, test):
content = filename.read()
- if type(content) is bytes:
- content = content.decode('utf-8')
- reader = csv.DictReader(io.StringIO(content), delimiter=',', quotechar='"')
+ if isinstance(content, bytes):
+ content = content.decode("utf-8")
+ reader = csv.DictReader(
+ io.StringIO(content), delimiter=",", quotechar='"'
+ )
csvarray = []
for row in reader:
@@ -50,33 +52,55 @@ def get_findings(self, filename, test):
finding = Finding(test=test)
findingdict = {}
severityfilter = Severityfilter()
- severityfilter.eval_column(row['Priority'])
- findingdict['severity'] = severityfilter.severity
- findingdict['title'] = row['Rule']
- findingdict['file'] = row['File']
- findingdict['line_number'] = row['Line number']
- findingdict['description'] = "**Vulnerability type** : " + row['Vulnerability type'] + "\n\n" + \
- "**CWE Scope** : " + row['CWE Scope'] + "\n\n" + \
- "**Line number** : " + row['Line number'] + "\n\n" + \
- "**Code at line number** : " + row['Line text'] + "\n\n" + \
- "**Normative** : " + row['Normative'] + "\n\n" + \
- "**Rule code** : " + row['Rule code'] + "\n\n" + \
- "**Status** : " + row['Status'] + "\n\n" + \
- "**Source file** : " + row['Source file'] + "\n\n" + \
- "**Source line number** : " + row['Source line number'] + "\n\n" + \
- "**Code at sorce line number** : " + row['Source line text'] + "\n"
-
- finding.title = findingdict['title']
- finding.file_path = findingdict['file']
- finding.line = findingdict['line_number']
- finding.description = findingdict['description']
+ severityfilter.eval_column(row["Priority"])
+ findingdict["severity"] = severityfilter.severity
+ findingdict["title"] = row["Rule"]
+ findingdict["file"] = row["File"]
+ findingdict["line_number"] = row["Line number"]
+ findingdict["description"] = (
+ "**Vulnerability type** : "
+ + row["Vulnerability type"]
+ + "\n\n"
+ + "**CWE Scope** : "
+ + row["CWE Scope"]
+ + "\n\n"
+ + "**Line number** : "
+ + row["Line number"]
+ + "\n\n"
+ + "**Code at line number** : "
+ + row["Line text"]
+ + "\n\n"
+ + "**Normative** : "
+ + row["Normative"]
+ + "\n\n"
+ + "**Rule code** : "
+ + row["Rule code"]
+ + "\n\n"
+ + "**Status** : "
+ + row["Status"]
+ + "\n\n"
+ + "**Source file** : "
+ + row["Source file"]
+ + "\n\n"
+ + "**Source line number** : "
+ + row["Source line number"]
+ + "\n\n"
+ + "**Code at sorce line number** : "
+ + row["Source line text"]
+ + "\n"
+ )
+
+ finding.title = findingdict["title"]
+ finding.file_path = findingdict["file"]
+ finding.line = findingdict["line_number"]
+ finding.description = findingdict["description"]
finding.references = "Not provided!"
finding.mitigation = "Not provided!"
- finding.severity = findingdict['severity']
+ finding.severity = findingdict["severity"]
finding.static_finding = True
try:
- finding.cwe = int(row['CWE'])
- except:
+ finding.cwe = int(row["CWE"])
+ except Exception:
pass
if finding is not None:
@@ -85,7 +109,15 @@ def get_findings(self, filename, test):
if finding.description is None:
finding.description = ""
- key = hashlib.md5((finding.severity + '|' + finding.title + '|' + finding.description).encode("utf-8")).hexdigest()
+ key = hashlib.md5(
+ (
+ finding.severity
+ + "|"
+ + finding.title
+ + "|"
+ + finding.description
+ ).encode("utf-8")
+ ).hexdigest()
if key not in dupes:
dupes[key] = finding
diff --git a/dojo/tools/kubebench/parser.py b/dojo/tools/kubebench/parser.py
index 6bff6bd088..a54bcaf480 100644
--- a/dojo/tools/kubebench/parser.py
+++ b/dojo/tools/kubebench/parser.py
@@ -4,7 +4,6 @@
class KubeBenchParser(object):
-
def get_scan_types(self):
return ["kube-bench Scan"]
@@ -16,8 +15,8 @@ def get_description_for_scan_types(self, scan_type):
def get_findings(self, json_output, test):
tree = json.load(json_output)
- if 'Controls' in tree:
- return self.get_chapters(tree['Controls'], test)
+ if "Controls" in tree:
+ return self.get_chapters(tree["Controls"], test)
else:
return self.get_chapters(tree, test)
@@ -34,14 +33,14 @@ def get_chapters(self, tree, test):
def get_tests(tree, test):
items_from_tests = []
- description = ''
- if 'id' in tree:
- description += tree['id'] + " "
- if 'text' in tree:
- description += tree['text']
- description += '\n'
+ description = ""
+ if "id" in tree:
+ description += tree["id"] + " "
+ if "text" in tree:
+ description += tree["text"]
+ description += "\n"
- for node in tree['tests']:
+ for node in tree["tests"]:
items_from_results = get_results(node, test, description)
items_from_tests += items_from_results
@@ -51,13 +50,13 @@ def get_tests(tree, test):
def get_results(tree, test, description):
items_from_results = []
- if 'section' in tree:
- description += tree['section'] + ' '
- if 'desc' in tree:
- description += tree['desc']
- description += '\n'
+ if "section" in tree:
+ description += tree["section"] + " "
+ if "desc" in tree:
+ description += tree["desc"]
+ description += "\n"
- for node in tree['results']:
+ for node in tree["results"]:
item = get_item(node, test, description)
if item:
items_from_results.append(item)
@@ -66,53 +65,55 @@ def get_results(tree, test, description):
def get_item(vuln, test, description):
-
- status = vuln.get('status', None)
- reason = vuln.get('reason', None)
+ status = vuln.get("status", None)
+ reason = vuln.get("reason", None)
if status is None:
return None
- # kube-bench doesn't define severities. So we use the status to define the severity
- if status.upper() == 'FAIL':
- severity = 'Medium'
- elif status.upper() == 'WARN' and reason != 'Test marked as a manual test':
- severity = 'Info'
+ # kube-bench doesn't define severities. So we use the status to define the
+ # severity
+ if status.upper() == "FAIL":
+ severity = "Medium"
+ elif status.upper() == "WARN" and reason != "Test marked as a manual test":
+ severity = "Info"
else:
return None
- test_number = vuln.get('test_number', 'Test number not found')
- test_description = vuln.get('test_desc', 'Description not found')
-
- title = test_number + ' - ' + test_description
-
- if 'test_number' in vuln:
- description += vuln['test_number'] + ' '
- if 'test_desc' in vuln:
- description += vuln['test_desc']
- if 'audit' in vuln:
- description += '\n'
- description += 'Audit: {}\n'.format(vuln['audit'])
- if 'reason' in vuln and vuln['reason'] != '':
- description += '\n'
- description += 'Reason: {}\n'.format(vuln['reason'])
- if 'expected_result' in vuln and vuln['expected_result'] != '':
- description += '\n'
- description += 'Expected result: {}\n'.format(vuln['expected_result'])
- if 'actual_value' in vuln and vuln['actual_value'] != '':
- description += '\n'
- description += 'Actual value: {}\n'.format(vuln['actual_value'])
-
- mitigation = vuln.get('remediation', None)
+ test_number = vuln.get("test_number", "Test number not found")
+ test_description = vuln.get("test_desc", "Description not found")
+
+ title = test_number + " - " + test_description
+
+ if "test_number" in vuln:
+ description += vuln["test_number"] + " "
+ if "test_desc" in vuln:
+ description += vuln["test_desc"]
+ if "audit" in vuln:
+ description += "\n"
+ description += "Audit: {}\n".format(vuln["audit"])
+ if "reason" in vuln and vuln["reason"] != "":
+ description += "\n"
+ description += "Reason: {}\n".format(vuln["reason"])
+ if "expected_result" in vuln and vuln["expected_result"] != "":
+ description += "\n"
+ description += "Expected result: {}\n".format(vuln["expected_result"])
+ if "actual_value" in vuln and vuln["actual_value"] != "":
+ description += "\n"
+ description += "Actual value: {}\n".format(vuln["actual_value"])
+
+ mitigation = vuln.get("remediation", None)
vuln_id_from_tool = test_number
- finding = Finding(title=title,
- test=test,
- description=description,
- severity=severity,
- mitigation=mitigation,
- vuln_id_from_tool=vuln_id_from_tool,
- static_finding=True,
- dynamic_finding=False)
+ finding = Finding(
+ title=title,
+ test=test,
+ description=description,
+ severity=severity,
+ mitigation=mitigation,
+ vuln_id_from_tool=vuln_id_from_tool,
+ static_finding=True,
+ dynamic_finding=False,
+ )
return finding
diff --git a/dojo/tools/meterian/parser.py b/dojo/tools/meterian/parser.py
index 223b56785a..e47cb46901 100644
--- a/dojo/tools/meterian/parser.py
+++ b/dojo/tools/meterian/parser.py
@@ -5,7 +5,6 @@
class MeterianParser(object):
-
def get_scan_types(self):
return ["Meterian Scan"]
@@ -20,9 +19,13 @@ def get_findings(self, report, test):
report_json = json.load(report)
security_reports = self.get_security_reports(report_json)
- scan_date = str(datetime.fromisoformat(report_json["timestamp"]).date())
+ scan_date = str(
+ datetime.fromisoformat(report_json["timestamp"]).date()
+ )
for single_security_report in security_reports:
- findings += self.do_get_findings(single_security_report, scan_date, test)
+ findings += self.do_get_findings(
+ single_security_report, scan_date, test
+ )
return findings
@@ -38,21 +41,23 @@ def do_get_findings(self, single_security_report, scan_date, test):
findings = []
language = single_security_report["language"]
for dependency_report in single_security_report["reports"]:
-
lib_name = dependency_report["dependency"]["name"]
lib_ver = dependency_report["dependency"]["version"]
finding_title = lib_name + ":" + lib_ver
for advisory in dependency_report["advices"]:
-
severity = self.get_severity(advisory)
finding = Finding(
title=finding_title,
date=scan_date,
test=test,
severity=severity,
- severity_justification="Issue severity of: **" + severity + "** from a base " +
- "CVSS score of: **" + str(advisory.get('cvss')) + "**",
- description=advisory['description'],
+ severity_justification="Issue severity of: **"
+ + severity
+ + "** from a base "
+ + "CVSS score of: **"
+ + str(advisory.get("cvss"))
+ + "**",
+ description=advisory["description"],
component_name=lib_name,
component_version=lib_ver,
false_p=False,
@@ -62,11 +67,11 @@ def do_get_findings(self, single_security_report, scan_date, test):
static_finding=True,
dynamic_finding=False,
file_path="Manifest file",
- unique_id_from_tool=advisory['id'],
- tags=[language]
+ unique_id_from_tool=advisory["id"],
+ tags=[language],
)
- if 'cve' in advisory:
+ if "cve" in advisory:
if "N/A" != advisory["cve"]:
finding.unsaved_vulnerability_ids = [advisory["cve"]]
@@ -76,11 +81,29 @@ def do_get_findings(self, single_security_report, scan_date, test):
mitigation_msg = "## Remediation\n"
safe_versions = dependency_report["safeVersions"]
if "latestPatch" in safe_versions:
- mitigation_msg += "Upgrade " + lib_name + " to version " + safe_versions["latestPatch"] + " or higher."
+ mitigation_msg += (
+ "Upgrade "
+ + lib_name
+ + " to version "
+ + safe_versions["latestPatch"]
+ + " or higher."
+ )
elif "latestMinor" in safe_versions:
- mitigation_msg += "Upgrade " + lib_name + " to version " + safe_versions["latestMinor"] + " or higher."
+ mitigation_msg += (
+ "Upgrade "
+ + lib_name
+ + " to version "
+ + safe_versions["latestMinor"]
+ + " or higher."
+ )
elif "latestMajor" in safe_versions:
- mitigation_msg += "Upgrade " + lib_name + " to version " + safe_versions["latestMajor"] + "."
+ mitigation_msg += (
+ "Upgrade "
+ + lib_name
+ + " to version "
+ + safe_versions["latestMajor"]
+ + "."
+ )
else:
mitigation_msg = "We were not able to provide a safe version for this library.\nYou should consider replacing this component as it could be an issue for the safety of your application."
finding.mitigation = mitigation_msg
@@ -99,17 +122,21 @@ def do_get_findings(self, single_security_report, scan_date, test):
def get_severity(self, advisory):
# Following the CVSS Scoring per https://nvd.nist.gov/vuln-metrics/cvss
- if 'cvss' in advisory:
- if advisory['cvss'] <= 3.9:
+ if "cvss" in advisory:
+ if advisory["cvss"] <= 3.9:
severity = "Low"
- elif advisory['cvss'] >= 4.0 and advisory['cvss'] <= 6.9:
+ elif advisory["cvss"] >= 4.0 and advisory["cvss"] <= 6.9:
severity = "Medium"
- elif advisory['cvss'] >= 7.0 and advisory['cvss'] <= 8.9:
+ elif advisory["cvss"] >= 7.0 and advisory["cvss"] <= 8.9:
severity = "High"
else:
severity = "Critical"
else:
- if advisory["severity"] == "SUGGEST" or advisory["severity"] == "NA" or advisory["severity"] == "NONE":
+ if (
+ advisory["severity"] == "SUGGEST"
+ or advisory["severity"] == "NA"
+ or advisory["severity"] == "NONE"
+ ):
severity = "Info"
else:
severity = advisory["severity"].title()
@@ -119,7 +146,10 @@ def get_severity(self, advisory):
def get_reference_url(self, link_obj):
url = link_obj["url"]
if link_obj["type"] == "CVE":
- url = "https://cve.mitre.org/cgi-bin/cvename.cgi?name=" + link_obj["url"]
+ url = (
+ "https://cve.mitre.org/cgi-bin/cvename.cgi?name="
+ + link_obj["url"]
+ )
elif link_obj["type"] == "NVD":
url = "https://nvd.nist.gov/vuln/detail/" + link_obj["url"]
diff --git a/dojo/tools/microfocus_webinspect/parser.py b/dojo/tools/microfocus_webinspect/parser.py
index fcec9c5897..114e11d59c 100644
--- a/dojo/tools/microfocus_webinspect/parser.py
+++ b/dojo/tools/microfocus_webinspect/parser.py
@@ -23,41 +23,52 @@ def get_findings(self, file, test):
tree = parse(file)
# get root of tree.
root = tree.getroot()
- if 'Sessions' not in root.tag:
- raise ValueError("This doesn't seem to be a valid Webinspect xml file.")
+ if "Sessions" not in root.tag:
+ raise ValueError(
+ "This doesn't seem to be a valid Webinspect xml file."
+ )
dupes = dict()
for session in root:
- url = session.find('URL').text
+ url = session.find("URL").text
endpoint = Endpoint.from_uri(url)
- issues = session.find('Issues')
- for issue in issues.findall('Issue'):
+ issues = session.find("Issues")
+ for issue in issues.findall("Issue"):
mitigation = None
reference = None
- severity = MicrofocusWebinspectParser.convert_severity(issue.find('Severity').text)
- for content in issue.findall('ReportSection'):
- name = content.find('Name').text
- if 'Summary' in name:
- if content.find('SectionText').text:
- description = content.find('SectionText').text
- if 'Fix' in name:
- if content.find('SectionText').text:
- mitigation = content.find('SectionText').text
- if 'Reference' in name:
- if name and content.find('SectionText').text:
- reference = html2text.html2text(content.find('SectionText').text)
+ severity = MicrofocusWebinspectParser.convert_severity(
+ issue.find("Severity").text
+ )
+ for content in issue.findall("ReportSection"):
+ name = content.find("Name").text
+ if "Summary" in name:
+ if content.find("SectionText").text:
+ description = content.find("SectionText").text
+ if "Fix" in name:
+ if content.find("SectionText").text:
+ mitigation = content.find("SectionText").text
+ if "Reference" in name:
+ if name and content.find("SectionText").text:
+ reference = html2text.html2text(
+ content.find("SectionText").text
+ )
cwe = 0
description = ""
- classifications = issue.find('Classifications')
- for content in classifications.findall('Classification'):
+ classifications = issue.find("Classifications")
+ for content in classifications.findall("Classification"):
# detect CWE number
# TODO support more than one CWE number
- if "kind" in content.attrib and "CWE" == content.attrib["kind"]:
- cwe = MicrofocusWebinspectParser.get_cwe(content.attrib['identifier'])
+ if (
+ "kind" in content.attrib
+ and "CWE" == content.attrib["kind"]
+ ):
+ cwe = MicrofocusWebinspectParser.get_cwe(
+ content.attrib["identifier"]
+ )
description += "\n\n" + content.text + "\n"
finding = Finding(
- title=issue.findtext('Name'),
+ title=issue.findtext("Name"),
test=test,
cwe=cwe,
description=description,
@@ -74,11 +85,15 @@ def get_findings(self, file, test):
finding.unsaved_endpoints = [endpoint]
# make dupe hash key
- dupe_key = hashlib.sha256("|".join([
- finding.description,
- finding.title,
- finding.severity,
- ]).encode('utf-8')).hexdigest()
+ dupe_key = hashlib.sha256(
+ "|".join(
+ [
+ finding.description,
+ finding.title,
+ finding.severity,
+ ]
+ ).encode("utf-8")
+ ).hexdigest()
# check if dupes are present.
if dupe_key in dupes:
find = dupes[dupe_key]
diff --git a/dojo/tools/mobsfscan/parser.py b/dojo/tools/mobsfscan/parser.py
index 9b5dc57de1..58514eaea8 100644
--- a/dojo/tools/mobsfscan/parser.py
+++ b/dojo/tools/mobsfscan/parser.py
@@ -26,23 +26,29 @@ def get_description_for_scan_types(self, scan_type):
def get_findings(self, filename, test):
data = json.load(filename)
- if len(data.get('results')) == 0:
+ if len(data.get("results")) == 0:
return []
else:
dupes = {}
- for key, item in data.get('results').items():
- metadata = item.get('metadata')
- cwe = int(re.match(r'(cwe|CWE)-([0-9]+)', metadata.get('cwe')).group(2))
- masvs = metadata.get('masvs')
- owasp_mobile = metadata.get('owasp-mobile')
- description = "\n".join([
- f"**Description:** `{metadata.get('description')}`",
- f"**OWASP MASVS:** `{masvs}`",
- f"**OWASP Mobile:** `{owasp_mobile}`",
- ])
- references = metadata.get('reference')
- if metadata.get('severity') in self.SEVERITY:
- severity = self.SEVERITY[metadata.get('severity')]
+ for key, item in data.get("results").items():
+ metadata = item.get("metadata")
+ cwe = int(
+ re.match(r"(cwe|CWE)-([0-9]+)", metadata.get("cwe")).group(
+ 2
+ )
+ )
+ masvs = metadata.get("masvs")
+ owasp_mobile = metadata.get("owasp-mobile")
+ description = "\n".join(
+ [
+ f"**Description:** `{metadata.get('description')}`",
+ f"**OWASP MASVS:** `{masvs}`",
+ f"**OWASP Mobile:** `{owasp_mobile}`",
+ ]
+ )
+ references = metadata.get("reference")
+ if metadata.get("severity") in self.SEVERITY:
+ severity = self.SEVERITY[metadata.get("severity")]
else:
severity = "Info"
@@ -55,15 +61,15 @@ def get_findings(self, filename, test):
description=description,
references=references,
)
- if item.get('files'):
- for file in item.get('files'):
- file_path = file.get('file_path')
- line = file.get('match_lines')[0]
+ if item.get("files"):
+ for file in item.get("files"):
+ file_path = file.get("file_path")
+ line = file.get("match_lines")[0]
finding.file_path = file_path
finding.line = line
dupe_key = hashlib.sha256(
- (key + str(cwe) + masvs + owasp_mobile).encode('utf-8')
+ (key + str(cwe) + masvs + owasp_mobile).encode("utf-8")
).hexdigest()
if dupe_key in dupes:
From c78499722dc78d57f03327a13dd58a274f4ec8bd Mon Sep 17 00:00:00 2001
From: Alejandro Tortolero
Date: Mon, 3 Jul 2023 15:14:40 -0500
Subject: [PATCH 10/85] Update files with PEP8 standards in folder dojo/tools
#005 (#8305)
* Update files in folder dojo/tools/mozilla_observatory with PEP8 standars.
* Update files in folder dojo/tools/netsparker with PEP8 standars.
* Update files in folder dojo/tools/neuvector with PEP8 standars.
* Update files in folder dojo/tools/neuvector_compliance with PEP8 standars.
* Update files in folder dojo/tools/nexpose with PEP8 standars.
* Update files in folder dojo/tools/nikto with PEP8 standars.
* Update files in folder dojo/tools/nmap with PEP8 standars.
* Update files in folder dojo/tools/npm_audit with PEP8 standars.
* Update files in folder dojo/tools/nsp with PEP8 standars.
* Update files in folder dojo/tools/nuclei with PEP8 standars.
* Update files in folder dojo/tools/openscap with PEP8 standars.
* Update files in folder dojo/tools/openvas_csv with PEP8 standars.
* Update files in folder dojo/tools/ort with PEP8 standars.
* Update files in folder dojo/tools/ossindex_devaudit with PEP8 standars.
* Update files in folder dojo/tools/outpost24 with PEP8 standars.
* Update files in folder dojo/tools/php_security_audit_v2 with PEP8 standars.
* Update files in folder dojo/tools/php_symfony_security_check with PEP8 standars.
* Update files in folder dojo/tools/pip_audit with PEP8 standars.
* Update files in folder dojo/tools/pmd with PEP8 standars.
* Update files in folder dojo/tools/popeye with PEP8 standars.
* Update files in folder dojo/tools/pwn_sast with PEP8 standars.
* Update files in folder dojo/tools/qualys with PEP8 standars.
* Update files in folder dojo/tools/qualys_infrascan_webgui with PEP8 standars.
* Update files in folder dojo/tools/qualys_webapp with PEP8 standars.
* Update files in folder dojo/tools/retirejs with PEP8 standars.
* Update files in folder dojo/tools/risk_recon with PEP8 standars.
* Update files in folder dojo/tools/rubocop with PEP8 standars.
* Update files in folder dojo/tools/rusty_hog with PEP8 standars.
* Change BaseException to Exception
---
dojo/tools/mozilla_observatory/parser.py | 22 +-
dojo/tools/netsparker/parser.py | 50 +--
dojo/tools/neuvector/parser.py | 100 ++++--
dojo/tools/neuvector_compliance/parser.py | 114 ++++---
dojo/tools/nexpose/__init__.py | 2 +-
dojo/tools/nexpose/parser.py | 311 ++++++++++--------
dojo/tools/nikto/__init__.py | 2 +-
dojo/tools/nikto/parser.py | 73 ++--
dojo/tools/nmap/__init__.py | 2 +-
dojo/tools/nmap/parser.py | 143 +++++---
dojo/tools/npm_audit/parser.py | 140 ++++----
dojo/tools/nsp/parser.py | 66 ++--
dojo/tools/nuclei/parser.py | 144 ++++----
dojo/tools/openscap/parser.py | 90 +++--
dojo/tools/openvas_csv/parser.py | 92 +++---
dojo/tools/ort/parser.py | 121 ++++---
dojo/tools/ossindex_devaudit/parser.py | 90 ++---
dojo/tools/outpost24/parser.py | 74 +++--
dojo/tools/php_security_audit_v2/parser.py | 26 +-
.../php_symfony_security_check/parser.py | 74 +++--
dojo/tools/pip_audit/parser.py | 28 +-
dojo/tools/pmd/parser.py | 29 +-
dojo/tools/popeye/parser.py | 47 ++-
dojo/tools/pwn_sast/parser.py | 65 ++--
dojo/tools/qualys/csv_parser.py | 87 +++--
dojo/tools/qualys/parser.py | 296 +++++++++--------
dojo/tools/qualys_infrascan_webgui/parser.py | 124 +++----
dojo/tools/qualys_webapp/parser.py | 304 +++++++++++------
dojo/tools/retirejs/parser.py | 57 ++--
dojo/tools/risk_recon/api.py | 55 ++--
dojo/tools/risk_recon/parser.py | 84 +++--
dojo/tools/rubocop/parser.py | 1 -
dojo/tools/rusty_hog/parser.py | 180 ++++++----
33 files changed, 1840 insertions(+), 1253 deletions(-)
diff --git a/dojo/tools/mozilla_observatory/parser.py b/dojo/tools/mozilla_observatory/parser.py
index 0a268e5e52..72e6a6d623 100644
--- a/dojo/tools/mozilla_observatory/parser.py
+++ b/dojo/tools/mozilla_observatory/parser.py
@@ -34,21 +34,25 @@ def get_findings(self, file, test):
for key in nodes:
node = nodes[key]
- description = "\n".join([
- "**Score Description** : `" + node['score_description'] + "`",
- "**Result** : `" + node['result'] + "`"
- "**expectation** : " + str(node.get('expectation')) + "`",
- ])
+ description = "\n".join(
+ [
+ "**Score Description** : `"
+ + node["score_description"]
+ + "`",
+ "**Result** : `" + node["result"] + "`"
+ "**expectation** : " + str(node.get("expectation")) + "`",
+ ]
+ )
finding = Finding(
- title=node['score_description'],
+ title=node["score_description"],
test=test,
- active=not node['pass'],
+ active=not node["pass"],
description=description,
- severity=self.get_severity(int(node['score_modifier'])),
+ severity=self.get_severity(int(node["score_modifier"])),
static_finding=False,
dynamic_finding=True,
- vuln_id_from_tool=node.get('name', key)
+ vuln_id_from_tool=node.get("name", key),
)
findings.append(finding)
diff --git a/dojo/tools/netsparker/parser.py b/dojo/tools/netsparker/parser.py
index efc382ce37..9b4b2d3113 100644
--- a/dojo/tools/netsparker/parser.py
+++ b/dojo/tools/netsparker/parser.py
@@ -7,7 +7,6 @@
class NetsparkerParser(object):
-
def get_scan_types(self):
return ["Netsparker Scan"]
@@ -20,26 +19,27 @@ def get_description_for_scan_types(self, scan_type):
def get_findings(self, filename, test):
tree = filename.read()
try:
- data = json.loads(str(tree, 'utf-8-sig'))
- except:
+ data = json.loads(str(tree, "utf-8-sig"))
+ except Exception:
data = json.loads(tree)
dupes = dict()
- scan_date = datetime.datetime.strptime(data["Generated"], "%d/%m/%Y %H:%M %p").date()
+ scan_date = datetime.datetime.strptime(
+ data["Generated"], "%d/%m/%Y %H:%M %p"
+ ).date()
for item in data["Vulnerabilities"]:
-
title = item["Name"]
findingdetail = html2text.html2text(item.get("Description", ""))
if "Cwe" in item["Classification"]:
try:
- cwe = int(item["Classification"]["Cwe"].split(',')[0])
- except:
+ cwe = int(item["Classification"]["Cwe"].split(",")[0])
+ except Exception:
cwe = None
else:
cwe = None
sev = item["Severity"]
- if sev not in ['Info', 'Low', 'Medium', 'High', 'Critical']:
- sev = 'Info'
+ if sev not in ["Info", "Low", "Medium", "High", "Critical"]:
+ sev = "Info"
mitigation = html2text.html2text(item.get("RemedialProcedure", ""))
references = html2text.html2text(item.get("RemedyReferences", ""))
url = item["Url"]
@@ -48,16 +48,18 @@ def get_findings(self, filename, test):
request = item["HttpRequest"]["Content"]
response = item["HttpResponse"]["Content"]
- finding = Finding(title=title,
- test=test,
- description=findingdetail,
- severity=sev.title(),
- mitigation=mitigation,
- impact=impact,
- date=scan_date,
- references=references,
- cwe=cwe,
- static_finding=True)
+ finding = Finding(
+ title=title,
+ test=test,
+ description=findingdetail,
+ severity=sev.title(),
+ mitigation=mitigation,
+ impact=impact,
+ date=scan_date,
+ references=references,
+ cwe=cwe,
+ static_finding=True,
+ )
if item["State"].find("FalsePositive") != -1:
finding.active = False
@@ -69,8 +71,14 @@ def get_findings(self, filename, test):
if item["State"].find("AcceptedRisk") != -1:
finding.risk_accepted = True
- if (item["Classification"] is not None) and (item["Classification"]["Cvss"] is not None) and (item["Classification"]["Cvss"]["Vector"] is not None):
- cvss_objects = cvss_parser.parse_cvss_from_text(item["Classification"]["Cvss"]["Vector"])
+ if (
+ (item["Classification"] is not None)
+ and (item["Classification"]["Cvss"] is not None)
+ and (item["Classification"]["Cvss"]["Vector"] is not None)
+ ):
+ cvss_objects = cvss_parser.parse_cvss_from_text(
+ item["Classification"]["Cvss"]["Vector"]
+ )
if len(cvss_objects) > 0:
finding.cvssv3 = cvss_objects[0].clean_vector()
diff --git a/dojo/tools/neuvector/parser.py b/dojo/tools/neuvector/parser.py
index 2607cfc1ef..17be763568 100644
--- a/dojo/tools/neuvector/parser.py
+++ b/dojo/tools/neuvector/parser.py
@@ -5,9 +5,9 @@
logger = logging.getLogger(__name__)
-NEUVECTOR_SCAN_NAME = 'NeuVector (REST)'
-NEUVECTOR_IMAGE_SCAN_ENGAGEMENT_NAME = 'NV image scan'
-NEUVECTOR_CONTAINER_SCAN_ENGAGEMENT_NAME = 'NV container scan'
+NEUVECTOR_SCAN_NAME = "NeuVector (REST)"
+NEUVECTOR_IMAGE_SCAN_ENGAGEMENT_NAME = "NV image scan"
+NEUVECTOR_CONTAINER_SCAN_ENGAGEMENT_NAME = "NV container scan"
class NeuVectorJsonParser(object):
@@ -22,59 +22,92 @@ def parse_json(self, json_output):
try:
data = json_output.read()
try:
- tree = json.loads(str(data, 'utf-8'))
- except:
+ tree = json.loads(str(data, "utf-8"))
+ except Exception:
tree = json.loads(data)
- except:
+ except Exception:
raise ValueError("Invalid format")
return tree
def get_items(self, tree, test):
items = {}
- if 'report' in tree:
- vulnerabilityTree = tree.get('report').get('vulnerabilities', [])
+ if "report" in tree:
+ vulnerabilityTree = tree.get("report").get("vulnerabilities", [])
for node in vulnerabilityTree:
item = get_item(node, test)
- package_name = node.get('package_name')
+ package_name = node.get("package_name")
if len(package_name) > 64:
package_name = package_name[-64:]
- unique_key = node.get('name') + str(package_name + str(
- node.get('package_version')) + str(node.get('severity')))
+ unique_key = node.get("name") + str(
+ package_name
+ + str(node.get("package_version"))
+ + str(node.get("severity"))
+ )
items[unique_key] = item
return list(items.values())
def get_item(vulnerability, test):
- severity = convert_severity(vulnerability.get('severity')) if 'severity' in vulnerability else "Info"
- vector = vulnerability.get('vectors_v3') if 'vectors_v3' in vulnerability else "CVSSv3 vector not provided. "
- fixed_version = vulnerability.get('fixed_version') if 'fixed_version' in vulnerability else "There seems to be no fix yet. Please check description field."
- score_v3 = vulnerability.get('score_v3') if 'score_v3' in vulnerability else "No CVSSv3 score yet."
- package_name = vulnerability.get('package_name')
+ severity = (
+ convert_severity(vulnerability.get("severity"))
+ if "severity" in vulnerability
+ else "Info"
+ )
+ vector = (
+ vulnerability.get("vectors_v3")
+ if "vectors_v3" in vulnerability
+ else "CVSSv3 vector not provided. "
+ )
+ fixed_version = (
+ vulnerability.get("fixed_version")
+ if "fixed_version" in vulnerability
+ else "There seems to be no fix yet. Please check description field."
+ )
+ score_v3 = (
+ vulnerability.get("score_v3")
+ if "score_v3" in vulnerability
+ else "No CVSSv3 score yet."
+ )
+ package_name = vulnerability.get("package_name")
if len(package_name) > 64:
package_name = package_name[-64:]
- description = vulnerability.get('description') if 'description' in vulnerability else ""
- link = vulnerability.get('link') if 'link' in vulnerability else ""
+ description = (
+ vulnerability.get("description")
+ if "description" in vulnerability
+ else ""
+ )
+ link = vulnerability.get("link") if "link" in vulnerability else ""
# create the finding object
finding = Finding(
- title=vulnerability.get('name') + ": " + package_name + " - " + vulnerability.get('package_version'),
+ title=vulnerability.get("name")
+ + ": "
+ + package_name
+ + " - "
+ + vulnerability.get("package_version"),
test=test,
severity=severity,
- description=description + " Vulnerable Package: " +
- package_name + "
Current Version: " + str(
- vulnerability['package_version']) + "
",
+ description=description
+ + " Vulnerable Package: "
+ + package_name
+ + "
Current Version: "
+ + str(vulnerability["package_version"])
+ + "
",
mitigation=fixed_version.title(),
references=link,
component_name=package_name,
- component_version=vulnerability.get('package_version'),
+ component_version=vulnerability.get("package_version"),
false_p=False,
duplicate=False,
out_of_scope=False,
mitigated=None,
- severity_justification="{} (CVSS v3 base score: {})\n".format(vector, score_v3),
- impact=severity)
- finding.unsaved_vulnerability_ids = [vulnerability.get('name')]
+ severity_justification="{} (CVSS v3 base score: {})\n".format(
+ vector, score_v3
+ ),
+ impact=severity,
+ )
+ finding.unsaved_vulnerability_ids = [vulnerability.get("name")]
finding.description = finding.description.strip()
return finding
@@ -82,22 +115,21 @@ def get_item(vulnerability, test):
# see neuvector/share/types.go
def convert_severity(severity):
- if severity.lower() == 'critical':
+ if severity.lower() == "critical":
return "Critical"
- elif severity.lower() == 'high':
+ elif severity.lower() == "high":
return "High"
- elif severity.lower() == 'medium':
+ elif severity.lower() == "medium":
return "Medium"
- elif severity.lower() == 'low':
+ elif severity.lower() == "low":
return "Low"
- elif severity == '':
+ elif severity == "":
return "Info"
else:
return severity.title()
class NeuVectorParser(object):
-
def get_scan_types(self):
return [NEUVECTOR_SCAN_NAME]
@@ -111,7 +143,7 @@ def get_findings(self, filename, test):
if filename is None:
return list()
- if filename.name.lower().endswith('.json'):
+ if filename.name.lower().endswith(".json"):
return NeuVectorJsonParser().parse(filename, test)
else:
- raise ValueError('Unknown File Format')
+ raise ValueError("Unknown File Format")
diff --git a/dojo/tools/neuvector_compliance/parser.py b/dojo/tools/neuvector_compliance/parser.py
index 16570caf3a..74e5e515fd 100644
--- a/dojo/tools/neuvector_compliance/parser.py
+++ b/dojo/tools/neuvector_compliance/parser.py
@@ -4,7 +4,7 @@
from dojo.models import Finding
-NEUVECTOR_SCAN_NAME = 'NeuVector (compliance)'
+NEUVECTOR_SCAN_NAME = "NeuVector (compliance)"
def parse(json_output, test):
@@ -19,10 +19,10 @@ def parse_json(json_output):
try:
data = json_output.read()
try:
- tree = json.loads(str(data, 'utf-8'))
- except:
+ tree = json.loads(str(data, "utf-8"))
+ except Exception:
tree = json.loads(data)
- except:
+ except Exception:
raise ValueError("Invalid format")
return tree
@@ -36,98 +36,106 @@ def get_items(tree, test):
# /v1/host/{id}/compliance or similar. thus, we need to support items in a
# bit different leafs.
testsTree = None
- if 'report' in tree:
- testsTree = tree.get('report').get('checks', [])
+ if "report" in tree:
+ testsTree = tree.get("report").get("checks", [])
else:
- testsTree = tree.get('items', [])
+ testsTree = tree.get("items", [])
for node in testsTree:
item = get_item(node, test)
- unique_key = node.get('type') + node.get('category') + node.get('test_number') + node.get('description')
- unique_key = hashlib.md5(unique_key.encode('utf-8')).hexdigest()
+ unique_key = (
+ node.get("type")
+ + node.get("category")
+ + node.get("test_number")
+ + node.get("description")
+ )
+ unique_key = hashlib.md5(unique_key.encode("utf-8")).hexdigest()
items[unique_key] = item
return list(items.values())
def get_item(node, test):
- if 'test_number' not in node:
+ if "test_number" not in node:
return None
- if 'category' not in node:
+ if "category" not in node:
return None
- if 'description' not in node:
+ if "description" not in node:
return None
- if 'level' not in node:
+ if "level" not in node:
return None
- test_number = node.get('test_number')
- test_description = node.get('description').rstrip()
+ test_number = node.get("test_number")
+ test_description = node.get("description").rstrip()
- title = test_number + ' - ' + test_description
+ title = test_number + " - " + test_description
- test_severity = node.get('level')
+ test_severity = node.get("level")
severity = convert_severity(test_severity)
- mitigation = node.get('remediation', '').rstrip()
+ mitigation = node.get("remediation", "").rstrip()
- category = node.get('category')
+ category = node.get("category")
- vuln_id_from_tool = category + '_' + test_number
+ vuln_id_from_tool = category + "_" + test_number
- test_profile = node.get('profile', 'profile unknown')
+ test_profile = node.get("profile", "profile unknown")
- full_description = '{} ({}), {}:\n'.format(test_number, category, test_profile)
- full_description += '{}\n'.format(test_description)
- full_description += 'Audit: {}\n'.format(test_severity)
- if 'evidence' in node:
- full_description += 'Evidence:\n{}\n'.format(node.get('evidence'))
- if 'location' in node:
- full_description += 'Location:\n{}\n'.format(node.get('location'))
- full_description += 'Mitigation:\n{}\n'.format(mitigation)
+ full_description = "{} ({}), {}:\n".format(
+ test_number, category, test_profile
+ )
+ full_description += "{}\n".format(test_description)
+ full_description += "Audit: {}\n".format(test_severity)
+ if "evidence" in node:
+ full_description += "Evidence:\n{}\n".format(node.get("evidence"))
+ if "location" in node:
+ full_description += "Location:\n{}\n".format(node.get("location"))
+ full_description += "Mitigation:\n{}\n".format(mitigation)
- tags = node.get('tags', [])
+ tags = node.get("tags", [])
if len(tags) > 0:
- full_description += 'Tags:\n'
+ full_description += "Tags:\n"
for t in tags:
- full_description += '{}\n'.format(str(t).rstrip())
+ full_description += "{}\n".format(str(t).rstrip())
- messages = node.get('message', [])
+ messages = node.get("message", [])
if len(messages) > 0:
- full_description += 'Messages:\n'
+ full_description += "Messages:\n"
for m in messages:
- full_description += '{}\n'.format(str(m).rstrip())
-
- finding = Finding(title=title,
- test=test,
- description=full_description,
- severity=severity,
- mitigation=mitigation,
- vuln_id_from_tool=vuln_id_from_tool,
- static_finding=True,
- dynamic_finding=False)
+ full_description += "{}\n".format(str(m).rstrip())
+
+ finding = Finding(
+ title=title,
+ test=test,
+ description=full_description,
+ severity=severity,
+ mitigation=mitigation,
+ vuln_id_from_tool=vuln_id_from_tool,
+ static_finding=True,
+ dynamic_finding=False,
+ )
return finding
# see neuvector/share/clus_apis.go
def convert_severity(severity):
- if severity.lower() == 'high':
+ if severity.lower() == "high":
return "High"
- elif severity.lower() == 'warn':
+ elif severity.lower() == "warn":
return "Medium"
- elif severity.lower() == 'info':
+ elif severity.lower() == "info":
return "Low"
- elif severity.lower() == 'pass':
+ elif severity.lower() == "pass":
return "Info"
- elif severity.lower() == 'note':
+ elif severity.lower() == "note":
return "Info"
- elif severity.lower() == 'error':
+ elif severity.lower() == "error":
return "Info"
else:
return severity.title()
class NeuVectorComplianceParser(object):
-
def get_scan_types(self):
return [NEUVECTOR_SCAN_NAME]
@@ -141,7 +149,7 @@ def get_findings(self, filename, test):
if filename is None:
return list()
- if filename.name.lower().endswith('.json'):
+ if filename.name.lower().endswith(".json"):
return parse(filename, test)
else:
- raise ValueError('Unknown File Format')
+ raise ValueError("Unknown File Format")
diff --git a/dojo/tools/nexpose/__init__.py b/dojo/tools/nexpose/__init__.py
index 369f2551a3..69e743a006 100644
--- a/dojo/tools/nexpose/__init__.py
+++ b/dojo/tools/nexpose/__init__.py
@@ -1 +1 @@
-__author__ = 'jay7958'
+__author__ = "jay7958"
diff --git a/dojo/tools/nexpose/parser.py b/dojo/tools/nexpose/parser.py
index cee5bb4ae9..fc7a434440 100644
--- a/dojo/tools/nexpose/parser.py
+++ b/dojo/tools/nexpose/parser.py
@@ -40,8 +40,7 @@ def parse_html_type(self, node):
ret = ""
tag = node.tag.lower()
- if tag == 'containerblockelement':
-
+ if tag == "containerblockelement":
if len(list(node)) > 0:
for child in list(node):
ret += self.parse_html_type(child)
@@ -52,19 +51,25 @@ def parse_html_type(self, node):
ret += str(node.tail).strip() + ""
else:
ret += ""
- if tag == 'listitem':
+ if tag == "listitem":
if len(list(node)) > 0:
for child in list(node):
ret += self.parse_html_type(child)
else:
if node.text:
ret += "" + str(node.text).strip() + ""
- if tag == 'orderedlist':
+ if tag == "orderedlist":
i = 1
for item in list(node):
- ret += "" + str(i) + " " + self.parse_html_type(item) + "
"
+ ret += (
+ ""
+ + str(i)
+ + " "
+ + self.parse_html_type(item)
+ + "
"
+ )
i += 1
- if tag == 'paragraph':
+ if tag == "paragraph":
if len(list(node)) > 0:
for child in list(node):
ret += self.parse_html_type(child)
@@ -75,12 +80,12 @@ def parse_html_type(self, node):
ret += str(node.tail).strip() + "
"
else:
ret += ""
- if tag == 'unorderedlist':
+ if tag == "unorderedlist":
for item in list(node):
unorderedlist = self.parse_html_type(item)
if unorderedlist not in ret:
ret += "* " + unorderedlist
- if tag == 'urllink':
+ if tag == "urllink":
if node.text:
ret += str(node.text).strip() + " "
last = ""
@@ -101,17 +106,24 @@ def parse_tests_type(self, node, vulnsDefinitions):
"""
vulns = list()
- for tests in node.findall('tests'):
- for test in tests.findall('test'):
- if test.get('id') in vulnsDefinitions and (
- test.get('status') in ['vulnerable-exploited', 'vulnerable-version', 'vulnerable-potential']):
- vuln = vulnsDefinitions[test.get('id').lower()]
+ for tests in node.findall("tests"):
+ for test in tests.findall("test"):
+ if test.get("id") in vulnsDefinitions and (
+ test.get("status")
+ in [
+ "vulnerable-exploited",
+ "vulnerable-version",
+ "vulnerable-potential",
+ ]
+ ):
+ vuln = vulnsDefinitions[test.get("id").lower()]
for desc in list(test):
- if 'pluginOutput' in vuln:
- vuln['pluginOutput'] += "\n\n" + \
- self.parse_html_type(desc)
+ if "pluginOutput" in vuln:
+ vuln[
+ "pluginOutput"
+ ] += "\n\n" + self.parse_html_type(desc)
else:
- vuln['pluginOutput'] = self.parse_html_type(desc)
+ vuln["pluginOutput"] = self.parse_html_type(desc)
vulns.append(vuln)
return vulns
@@ -122,109 +134,137 @@ def get_vuln_definitions(self, tree):
"""
vulns = dict()
url_index = 0
- for vulnsDef in tree.findall('VulnerabilityDefinitions'):
- for vulnDef in vulnsDef.findall('vulnerability'):
- vid = vulnDef.get('id').lower()
- severity_chk = int(vulnDef.get('severity'))
+ for vulnsDef in tree.findall("VulnerabilityDefinitions"):
+ for vulnDef in vulnsDef.findall("vulnerability"):
+ vid = vulnDef.get("id").lower()
+ severity_chk = int(vulnDef.get("severity"))
if severity_chk >= 9:
- sev = 'Critical'
+ sev = "Critical"
elif severity_chk >= 7:
- sev = 'High'
+ sev = "High"
elif severity_chk >= 4:
- sev = 'Medium'
+ sev = "Medium"
elif 0 < severity_chk < 4:
- sev = 'Low'
+ sev = "Low"
else:
- sev = 'Info'
+ sev = "Info"
vuln = {
- 'desc': "",
- 'name': vulnDef.get('title'),
- 'vector': vulnDef.get('cvssVector'), # this is CVSS v2
- 'refs': dict(),
- 'resolution': "",
- 'severity': sev,
- 'tags': list()
+ "desc": "",
+ "name": vulnDef.get("title"),
+ "vector": vulnDef.get("cvssVector"), # this is CVSS v2
+ "refs": dict(),
+ "resolution": "",
+ "severity": sev,
+ "tags": list(),
}
for item in list(vulnDef):
- if item.tag == 'description':
+ if item.tag == "description":
for htmlType in list(item):
- vuln['desc'] += self.parse_html_type(htmlType)
+ vuln["desc"] += self.parse_html_type(htmlType)
- elif item.tag == 'exploits':
+ elif item.tag == "exploits":
for exploit in list(item):
- vuln['refs'][exploit.get('title')] = str(exploit.get('title')).strip() + ' ' + \
- str(exploit.get('link')).strip()
+ vuln["refs"][exploit.get("title")] = (
+ str(exploit.get("title")).strip()
+ + " "
+ + str(exploit.get("link")).strip()
+ )
- elif item.tag == 'references':
+ elif item.tag == "references":
for ref in list(item):
- if 'URL' in ref.get('source'):
- vuln['refs'][ref.get('source') + str(url_index)] = str(ref.text).strip()
+ if "URL" in ref.get("source"):
+ vuln["refs"][
+ ref.get("source") + str(url_index)
+ ] = str(ref.text).strip()
url_index += 1
else:
- vuln['refs'][ref.get('source')] = str(ref.text).strip()
+ vuln["refs"][ref.get("source")] = str(
+ ref.text
+ ).strip()
- elif item.tag == 'solution':
+ elif item.tag == "solution":
for htmlType in list(item):
- vuln['resolution'] += self.parse_html_type(htmlType)
+ vuln["resolution"] += self.parse_html_type(
+ htmlType
+ )
# there is currently no method to register tags in vulns
- elif item.tag == 'tags':
+ elif item.tag == "tags":
for tag in list(item):
- vuln['tags'].append(tag.text.lower())
+ vuln["tags"].append(tag.text.lower())
vulns[vid] = vuln
return vulns
def get_items(self, tree, vulns, test):
hosts = list()
- for nodes in tree.findall('nodes'):
- for node in nodes.findall('node'):
+ for nodes in tree.findall("nodes"):
+ for node in nodes.findall("node"):
host = dict()
- host['name'] = node.get('address')
- host['hostnames'] = set()
- host['os'] = ""
- host['services'] = list()
- host['vulns'] = self.parse_tests_type(node, vulns)
-
- host['vulns'].append({
- 'name': 'Host Up',
- 'desc': 'Host is up because it replied on ICMP request or some TCP/UDP port is up',
- 'severity': 'Info',
- })
-
- for names in node.findall('names'):
- for name in names.findall('name'):
- host['hostnames'].add(name.text)
-
- for endpoints in node.findall('endpoints'):
- for endpoint in endpoints.findall('endpoint'):
+ host["name"] = node.get("address")
+ host["hostnames"] = set()
+ host["os"] = ""
+ host["services"] = list()
+ host["vulns"] = self.parse_tests_type(node, vulns)
+
+ host["vulns"].append(
+ {
+ "name": "Host Up",
+ "desc": "Host is up because it replied on ICMP request or some TCP/UDP port is up",
+ "severity": "Info",
+ }
+ )
+
+ for names in node.findall("names"):
+ for name in names.findall("name"):
+ host["hostnames"].add(name.text)
+
+ for endpoints in node.findall("endpoints"):
+ for endpoint in endpoints.findall("endpoint"):
svc = {
- 'protocol': endpoint.get('protocol'),
- 'port': int(endpoint.get('port')),
- 'status': endpoint.get('status'),
+ "protocol": endpoint.get("protocol"),
+ "port": int(endpoint.get("port")),
+ "status": endpoint.get("status"),
}
- for services in endpoint.findall('services'):
- for service in services.findall('service'):
- svc['name'] = service.get('name', '').lower()
- svc['vulns'] = self.parse_tests_type(service, vulns)
-
- for configs in service.findall('configurations'):
- for config in configs.findall('config'):
- if "banner" in config.get('name'):
- svc['version'] = config.get('name')
-
- svc['vulns'].append({
- 'name': 'Open port {}/{}'.format(svc['protocol'].upper(), svc['port']),
- 'desc': '{}/{} port is open with "{}" service'.format(svc['protocol'],
- svc['port'],
- service.get('name')),
- 'severity': 'Info',
- 'tags': [
- re.sub("[^A-Za-z0-9]+", "-", service.get('name').lower()).rstrip('-')
- ] if service.get('name') != "" else []
- })
-
- host['services'].append(svc)
+ for services in endpoint.findall("services"):
+ for service in services.findall("service"):
+ svc["name"] = service.get("name", "").lower()
+ svc["vulns"] = self.parse_tests_type(
+ service, vulns
+ )
+
+ for configs in service.findall(
+ "configurations"
+ ):
+ for config in configs.findall("config"):
+ if "banner" in config.get("name"):
+ svc["version"] = config.get("name")
+
+ svc["vulns"].append(
+ {
+ "name": "Open port {}/{}".format(
+ svc["protocol"].upper(),
+ svc["port"],
+ ),
+ "desc": '{}/{} port is open with "{}" service'.format(
+ svc["protocol"],
+ svc["port"],
+ service.get("name"),
+ ),
+ "severity": "Info",
+ "tags": [
+ re.sub(
+ "[^A-Za-z0-9]+",
+ "-",
+ service.get("name").lower(),
+ ).rstrip("-")
+ ]
+ if service.get("name") != ""
+ else [],
+ }
+ )
+
+ host["services"].append(svc)
hosts.append(host)
@@ -232,82 +272,89 @@ def get_items(self, tree, vulns, test):
for host in hosts:
# manage findings by node only
- for vuln in host['vulns']:
- dupe_key = vuln['severity'] + vuln['name']
+ for vuln in host["vulns"]:
+ dupe_key = vuln["severity"] + vuln["name"]
find = self.findings(dupe_key, dupes, test, vuln)
- endpoint = Endpoint(host=host['name'])
+ endpoint = Endpoint(host=host["name"])
find.unsaved_endpoints.append(endpoint)
- find.unsaved_tags = vuln.get('tags', [])
+ find.unsaved_tags = vuln.get("tags", [])
# manage findings by service
- for service in host['services']:
- for vuln in service['vulns']:
- dupe_key = vuln['severity'] + vuln['name']
+ for service in host["services"]:
+ for vuln in service["vulns"]:
+ dupe_key = vuln["severity"] + vuln["name"]
find = self.findings(dupe_key, dupes, test, vuln)
endpoint = Endpoint(
- host=host['name'],
- port=service['port'],
- protocol=service['name'] if service['name'] in SCHEME_PORT_MAP else service['protocol'],
- fragment=service['protocol'].lower() if service['name'] == "dns" else None
- # A little dirty hack but in case of DNS it is important to know if vulnerability is on TCP or UDP
+ host=host["name"],
+ port=service["port"],
+ protocol=service["name"]
+ if service["name"] in SCHEME_PORT_MAP
+ else service["protocol"],
+ fragment=service["protocol"].lower()
+ if service["name"] == "dns"
+ else None
+ # A little dirty hack but in case of DNS it is
+ # important to know if vulnerability is on TCP or UDP
)
find.unsaved_endpoints.append(endpoint)
- find.unsaved_tags = vuln.get('tags', [])
+ find.unsaved_tags = vuln.get("tags", [])
return list(dupes.values())
@staticmethod
def findings(dupe_key, dupes, test, vuln):
- """
-
-
- """
+ """ """
if dupe_key in dupes:
find = dupes[dupe_key]
- dupe_text = html2text.html2text(vuln.get('pluginOutput', ''))
+ dupe_text = html2text.html2text(vuln.get("pluginOutput", ""))
if dupe_text not in find.description:
find.description += "\n\n" + dupe_text
else:
- find = Finding(title=vuln['name'],
- description=html2text.html2text(
- vuln['desc'].strip()) + "\n\n" + html2text.html2text(vuln.get('pluginOutput', '').strip()),
- severity=vuln['severity'],
- mitigation=html2text.html2text(vuln.get('resolution')) if vuln.get('resolution') else None,
- impact=vuln.get('vector') if vuln.get('vector') else None,
- test=test,
- false_p=False,
- duplicate=False,
- out_of_scope=False,
- mitigated=None,
- dynamic_finding=True)
+ find = Finding(
+ title=vuln["name"],
+ description=html2text.html2text(vuln["desc"].strip())
+ + "\n\n"
+ + html2text.html2text(vuln.get("pluginOutput", "").strip()),
+ severity=vuln["severity"],
+ mitigation=html2text.html2text(vuln.get("resolution"))
+ if vuln.get("resolution")
+ else None,
+ impact=vuln.get("vector") if vuln.get("vector") else None,
+ test=test,
+ false_p=False,
+ duplicate=False,
+ out_of_scope=False,
+ mitigated=None,
+ dynamic_finding=True,
+ )
# build references
- refs = ''
- for ref in vuln.get('refs', {}):
- if ref.startswith('BID'):
+ refs = ""
+ for ref in vuln.get("refs", {}):
+ if ref.startswith("BID"):
refs += f" * [{vuln['refs'][ref]}](https://www.securityfocus.com/bid/{vuln['refs'][ref]})"
- elif ref.startswith('CA'):
+ elif ref.startswith("CA"):
refs += f" * [{vuln['refs'][ref]}](https://www.cert.org/advisories/{vuln['refs'][ref]}.html)"
- elif ref.startswith('CERT-VN'):
+ elif ref.startswith("CERT-VN"):
refs += f" * [{vuln['refs'][ref]}](https://www.kb.cert.org/vuls/id/{vuln['refs'][ref]}.html)"
- elif ref.startswith('CVE'):
+ elif ref.startswith("CVE"):
refs += f" * [{vuln['refs'][ref]}](https://cve.mitre.org/cgi-bin/cvename.cgi?name={vuln['refs'][ref]})"
- elif ref.startswith('DEBIAN'):
+ elif ref.startswith("DEBIAN"):
refs += f" * [{vuln['refs'][ref]}](https://security-tracker.debian.org/tracker/{vuln['refs'][ref]})"
- elif ref.startswith('XF'):
+ elif ref.startswith("XF"):
refs += f" * [{vuln['refs'][ref]}](https://exchange.xforce.ibmcloud.com/vulnerabilities/{vuln['refs'][ref]})"
- elif ref.startswith('URL'):
+ elif ref.startswith("URL"):
refs += f" * URL: {vuln['refs'][ref]}"
else:
refs += f" * {ref}: {vuln['refs'][ref]}"
refs += "\n"
find.references = refs
# update CVE
- if "CVE" in vuln.get('refs', {}):
- find.unsaved_vulnerability_ids = [vuln['refs']['CVE']]
+ if "CVE" in vuln.get("refs", {}):
+ find.unsaved_vulnerability_ids = [vuln["refs"]["CVE"]]
find.unsaved_endpoints = list()
dupes[dupe_key] = find
return find
diff --git a/dojo/tools/nikto/__init__.py b/dojo/tools/nikto/__init__.py
index 369f2551a3..69e743a006 100644
--- a/dojo/tools/nikto/__init__.py
+++ b/dojo/tools/nikto/__init__.py
@@ -1 +1 @@
-__author__ = 'jay7958'
+__author__ = "jay7958"
diff --git a/dojo/tools/nikto/parser.py b/dojo/tools/nikto/parser.py
index 37d969ab86..5092ba44d3 100644
--- a/dojo/tools/nikto/parser.py
+++ b/dojo/tools/nikto/parser.py
@@ -1,4 +1,3 @@
-
import hashlib
import logging
import re
@@ -30,47 +29,53 @@ def get_label_for_scan_types(self, scan_type):
return scan_type # no custom label for now
def get_description_for_scan_types(self, scan_type):
- return "XML output (old and new nxvmlversion=\"1.2\" type) or JSON output"
+ return (
+ 'XML output (old and new nxvmlversion="1.2" type) or JSON output'
+ )
def get_findings(self, filename, test):
- if filename.name.lower().endswith('.xml'):
+ if filename.name.lower().endswith(".xml"):
return self.process_xml(filename, test)
- elif filename.name.lower().endswith('.json'):
+ elif filename.name.lower().endswith(".json"):
return self.process_json(filename, test)
else:
- raise ValueError('Unknown File Format')
+ raise ValueError("Unknown File Format")
def process_json(self, file, test):
data = json.load(file)
dupes = dict()
- host = data.get('host')
- port = data.get('port')
+ host = data.get("host")
+ port = data.get("port")
if port is not None:
port = int(port)
- for vulnerability in data.get('vulnerabilities', []):
+ for vulnerability in data.get("vulnerabilities", []):
finding = Finding(
- title=vulnerability.get('msg'),
+ title=vulnerability.get("msg"),
severity="Info", # Nikto doesn't assign severity, default to Info
- description="\n".join([
- f"**id:** `{vulnerability.get('id')}`",
- f"**msg:** `{vulnerability.get('msg')}`",
- f"**HTTP Method:** `{vulnerability.get('method')}`",
- f"**OSVDB:** `{vulnerability.get('OSVDB')}`",
- ]),
- vuln_id_from_tool=vulnerability.get('id'),
+ description="\n".join(
+ [
+ f"**id:** `{vulnerability.get('id')}`",
+ f"**msg:** `{vulnerability.get('msg')}`",
+ f"**HTTP Method:** `{vulnerability.get('method')}`",
+ f"**OSVDB:** `{vulnerability.get('OSVDB')}`",
+ ]
+ ),
+ vuln_id_from_tool=vulnerability.get("id"),
nb_occurences=1,
)
# manage if we have an ID from OSVDB
- if "OSVDB" in vulnerability and "0" != vulnerability.get('OSVDB'):
- finding.unique_id_from_tool = "OSVDB-" + vulnerability.get('OSVDB')
+ if "OSVDB" in vulnerability and "0" != vulnerability.get("OSVDB"):
+ finding.unique_id_from_tool = "OSVDB-" + vulnerability.get(
+ "OSVDB"
+ )
finding.description += "\n*This finding is marked as medium as there is a link to OSVDB*"
finding.severity = "Medium"
# build the endpoint
endpoint = Endpoint(
host=host,
port=port,
- path=vulnerability.get('url'),
+ path=vulnerability.get("url"),
)
finding.unsaved_endpoints = [endpoint]
@@ -80,8 +85,12 @@ def process_json(self, file, test):
find = dupes[dupe_key]
find.description += "\n-----\n" + finding.description
find.unsaved_endpoints.append(endpoint)
- find.unique_id_from_tool = None # as it is an aggregated finding we erase ids
- find.vuln_id_from_tool = None # as it is an aggregated finding we erase ids
+ find.unique_id_from_tool = (
+ None # as it is an aggregated finding we erase ids
+ )
+ find.vuln_id_from_tool = (
+ None # as it is an aggregated finding we erase ids
+ )
find.nb_occurences += 1
else:
dupes[dupe_key] = finding
@@ -93,41 +102,45 @@ def process_xml(self, file, test):
tree = ET.parse(file)
root = tree.getroot()
- scan = root.find('scandetails')
+ scan = root.find("scandetails")
if scan is not None:
self.process_scandetail(scan, test, dupes)
else:
# New versions of Nikto have a new file type (nxvmlversion="1.2") which adds an additional niktoscan tag
- # This find statement below is to support new file format while not breaking older Nikto scan files versions.
- for scan in root.findall('./niktoscan/scandetails'):
+ # This find statement below is to support new file format while not
+ # breaking older Nikto scan files versions.
+ for scan in root.findall("./niktoscan/scandetails"):
self.process_scandetail(scan, test, dupes)
return list(dupes.values())
def process_scandetail(self, scan, test, dupes):
- for item in scan.findall('item'):
+ for item in scan.findall("item"):
# Title
titleText = None
description = item.findtext("description")
# Cut the title down to the first sentence
sentences = re.split(
- r'(? 0:
titleText = sentences[0][:900]
else:
titleText = description[:900]
# Description
- description = "\n".join([
+ description = "\n".join(
+ [
f"**Host:** `{item.findtext('iplink')}`",
f"**Description:** `{item.findtext('description')}`",
f"**HTTP Method:** `{item.attrib.get('method')}`",
- ])
+ ]
+ )
# Manage severity the same way with JSON
severity = "Info" # Nikto doesn't assign severity, default to Info
- if item.get('osvdbid') is not None and "0" != item.get('osvdbid'):
+ if item.get("osvdbid") is not None and "0" != item.get("osvdbid"):
severity = "Medium"
finding = Finding(
@@ -137,7 +150,7 @@ def process_scandetail(self, scan, test, dupes):
severity=severity,
dynamic_finding=True,
static_finding=False,
- vuln_id_from_tool=item.attrib.get('id'),
+ vuln_id_from_tool=item.attrib.get("id"),
nb_occurences=1,
)
diff --git a/dojo/tools/nmap/__init__.py b/dojo/tools/nmap/__init__.py
index 43f000e0f3..a7849c0c39 100644
--- a/dojo/tools/nmap/__init__.py
+++ b/dojo/tools/nmap/__init__.py
@@ -1 +1 @@
-__author__ = 'patriknordlen'
+__author__ = "patriknordlen"
diff --git a/dojo/tools/nmap/parser.py b/dojo/tools/nmap/parser.py
index 5ac1f42290..171795126c 100755
--- a/dojo/tools/nmap/parser.py
+++ b/dojo/tools/nmap/parser.py
@@ -6,7 +6,6 @@
class NmapParser(object):
-
def get_scan_types(self):
return ["Nmap Scan"]
@@ -20,23 +19,29 @@ def get_findings(self, file, test):
tree = parse(file)
root = tree.getroot()
dupes = dict()
- if 'nmaprun' not in root.tag:
+ if "nmaprun" not in root.tag:
raise ValueError("This doesn't seem to be a valid Nmap xml file.")
report_date = None
try:
- report_date = datetime.datetime.fromtimestamp(int(root.attrib['start']))
+ report_date = datetime.datetime.fromtimestamp(
+ int(root.attrib["start"])
+ )
except ValueError:
pass
for host in root.findall("host"):
host_info = "### Host\n\n"
- ip = host.find("address[@addrtype='ipv4']").attrib['addr']
+ ip = host.find("address[@addrtype='ipv4']").attrib["addr"]
if ip is not None:
host_info += "**IP Address:** %s\n" % ip
- fqdn = host.find("hostnames/hostname[@type='PTR']").attrib['name'] if host.find("hostnames/hostname[@type='PTR']") is not None else None
+ fqdn = (
+ host.find("hostnames/hostname[@type='PTR']").attrib["name"]
+ if host.find("hostnames/hostname[@type='PTR']") is not None
+ else None
+ )
if fqdn is not None:
host_info += "**FQDN:** %s\n" % fqdn
@@ -44,44 +49,70 @@ def get_findings(self, file, test):
for os in host.iter("os"):
for os_match in os.iter("osmatch"):
- if 'name' in os_match.attrib:
- host_info += "**Host OS:** %s\n" % os_match.attrib['name']
- if 'accuracy' in os_match.attrib:
- host_info += "**Accuracy:** {0}%\n".format(os_match.attrib['accuracy'])
+ if "name" in os_match.attrib:
+ host_info += (
+ "**Host OS:** %s\n" % os_match.attrib["name"]
+ )
+ if "accuracy" in os_match.attrib:
+ host_info += "**Accuracy:** {0}%\n".format(
+ os_match.attrib["accuracy"]
+ )
host_info += "\n\n"
for port_element in host.findall("ports/port"):
- protocol = port_element.attrib['protocol']
- endpoint = Endpoint(host=fqdn if fqdn else ip, protocol=protocol)
- if 'portid' in port_element.attrib and port_element.attrib['portid'].isdigit():
- endpoint.port = int(port_element.attrib['portid'])
+ protocol = port_element.attrib["protocol"]
+ endpoint = Endpoint(
+ host=fqdn if fqdn else ip, protocol=protocol
+ )
+ if (
+ "portid" in port_element.attrib
+ and port_element.attrib["portid"].isdigit()
+ ):
+ endpoint.port = int(port_element.attrib["portid"])
# filter on open ports
- if 'open' != port_element.find("state").attrib.get('state'):
+ if "open" != port_element.find("state").attrib.get("state"):
continue
title = "Open port: %s/%s" % (endpoint.port, endpoint.protocol)
description = host_info
- description += "**Port/Protocol:** %s/%s\n" % (endpoint.port, endpoint.protocol)
+ description += "**Port/Protocol:** %s/%s\n" % (
+ endpoint.port,
+ endpoint.protocol,
+ )
service_info = "\n\n"
- if port_element.find('service') is not None:
- if 'product' in port_element.find('service').attrib:
- service_info += "**Product:** %s\n" % port_element.find('service').attrib['product']
-
- if 'version' in port_element.find('service').attrib:
- service_info += "**Version:** %s\n" % port_element.find('service').attrib['version']
-
- if 'extrainfo' in port_element.find('service').attrib:
- service_info += "**Extra Info:** %s\n" % port_element.find('service').attrib['extrainfo']
+ if port_element.find("service") is not None:
+ if "product" in port_element.find("service").attrib:
+ service_info += (
+ "**Product:** %s\n"
+ % port_element.find("service").attrib["product"]
+ )
+
+ if "version" in port_element.find("service").attrib:
+ service_info += (
+ "**Version:** %s\n"
+ % port_element.find("service").attrib["version"]
+ )
+
+ if "extrainfo" in port_element.find("service").attrib:
+ service_info += (
+ "**Extra Info:** %s\n"
+ % port_element.find("service").attrib["extrainfo"]
+ )
description += service_info
description += "\n\n"
- # manage some script like https://github.com/vulnersCom/nmap-vulners
- for script_element in port_element.findall('script[@id="vulners"]'):
- self.manage_vulner_script(test, dupes, script_element, endpoint, report_date)
+ # manage some script like
+ # https://github.com/vulnersCom/nmap-vulners
+ for script_element in port_element.findall(
+ 'script[@id="vulners"]'
+ ):
+ self.manage_vulner_script(
+ test, dupes, script_element, endpoint, report_date
+ )
severity = "Info"
dupe_key = "nmap:" + str(endpoint.port)
@@ -90,13 +121,14 @@ def get_findings(self, file, test):
if description is not None:
find.description += description
else:
- find = Finding(title=title,
- test=test,
- description=description,
- severity=severity,
- mitigation="N/A",
- impact="No impact provided",
- )
+ find = Finding(
+ title=title,
+ test=test,
+ description=description,
+ severity=severity,
+ mitigation="N/A",
+ impact="No impact provided",
+ )
find.unsaved_endpoints = list()
dupes[dupe_key] = find
if report_date:
@@ -124,37 +156,52 @@ def convert_cvss_score(self, raw_value):
else:
return "Critical"
- def manage_vulner_script(self, test, dupes, script_element, endpoint, report_date=None):
- for component_element in script_element.findall('table'):
- component_cpe = CPE(component_element.attrib['key'])
- for vuln in component_element.findall('table'):
+ def manage_vulner_script(
+ self, test, dupes, script_element, endpoint, report_date=None
+ ):
+ for component_element in script_element.findall("table"):
+ component_cpe = CPE(component_element.attrib["key"])
+ for vuln in component_element.findall("table"):
# convert elements in dict
vuln_attributes = dict()
- for elem in vuln.findall('elem'):
- vuln_attributes[elem.attrib['key'].lower()] = elem.text
+ for elem in vuln.findall("elem"):
+ vuln_attributes[elem.attrib["key"].lower()] = elem.text
- vuln_id = vuln_attributes['id']
+ vuln_id = vuln_attributes["id"]
description = "### Vulnerability\n\n"
description += "**ID**: `" + str(vuln_id) + "`\n"
description += "**CPE**: " + str(component_cpe) + "\n"
for attribute in vuln_attributes:
- description += "**" + attribute + "**: `" + vuln_attributes[attribute] + "`\n"
- severity = self.convert_cvss_score(vuln_attributes['cvss'])
+ description += (
+ "**"
+ + attribute
+ + "**: `"
+ + vuln_attributes[attribute]
+ + "`\n"
+ )
+ severity = self.convert_cvss_score(vuln_attributes["cvss"])
finding = Finding(
title=vuln_id,
test=test,
description=description,
severity=severity,
- component_name=component_cpe.get_product()[0] if len(component_cpe.get_product()) > 0 else '',
- component_version=component_cpe.get_version()[0] if len(component_cpe.get_version()) > 0 else '',
+ component_name=component_cpe.get_product()[0]
+ if len(component_cpe.get_product()) > 0
+ else "",
+ component_version=component_cpe.get_version()[0]
+ if len(component_cpe.get_version()) > 0
+ else "",
vuln_id_from_tool=vuln_id,
nb_occurences=1,
)
finding.unsaved_endpoints = [endpoint]
# manage if CVE is in metadata
- if "type" in vuln_attributes and "cve" == vuln_attributes["type"]:
+ if (
+ "type" in vuln_attributes
+ and "cve" == vuln_attributes["type"]
+ ):
finding.unsaved_vulnerability_ids = [vuln_attributes["id"]]
if report_date:
@@ -164,7 +211,9 @@ def manage_vulner_script(self, test, dupes, script_element, endpoint, report_dat
if dupe_key in dupes:
find = dupes[dupe_key]
if description is not None:
- find.description += "\n-----\n\n" + finding.description # fives '-' produces an horizontal line
+ find.description += (
+ "\n-----\n\n" + finding.description
+ ) # fives '-' produces an horizontal line
find.unsaved_endpoints.extend(finding.unsaved_endpoints)
find.nb_occurences += finding.nb_occurences
else:
diff --git a/dojo/tools/npm_audit/parser.py b/dojo/tools/npm_audit/parser.py
index 94aa5fae93..968d00e0c9 100644
--- a/dojo/tools/npm_audit/parser.py
+++ b/dojo/tools/npm_audit/parser.py
@@ -9,7 +9,6 @@
class NpmAuditParser(object):
-
def get_scan_types(self):
return ["NPM Audit Scan"]
@@ -29,22 +28,26 @@ def parse_json(self, json_output):
try:
data = json_output.read()
try:
- tree = json.loads(str(data, 'utf-8'))
- except:
+ tree = json.loads(str(data, "utf-8"))
+ except Exception:
tree = json.loads(data)
- except:
+ except Exception:
raise ValueError("Invalid format, unable to parse json.")
- if tree.get('auditReportVersion'):
- raise ValueError('npm7 with auditReportVersion 2 or higher not yet supported as it lacks the most important fields in the reports')
+ if tree.get("auditReportVersion"):
+ raise ValueError(
+ "npm7 with auditReportVersion 2 or higher not yet supported as it lacks the most important fields in the reports"
+ )
- if tree.get('error'):
- error = tree.get('error')
- code = error['code']
- summary = error['summary']
- raise ValueError('npm audit report contains errors: %s, %s', code, summary)
+ if tree.get("error"):
+ error = tree.get("error")
+ code = error["code"]
+ summary = error["summary"]
+ raise ValueError(
+ "npm audit report contains errors: %s, %s", code, summary
+ )
- subtree = tree.get('advisories')
+ subtree = tree.get("advisories")
return subtree
@@ -53,74 +56,97 @@ def get_items(self, tree, test):
for key, node in tree.items():
item = get_item(node, test)
- unique_key = str(node['id']) + str(node['module_name'])
+ unique_key = str(node["id"]) + str(node["module_name"])
items[unique_key] = item
return list(items.values())
def censor_path_hashes(path):
- """ https://github.com/npm/npm/issues/20739 for dependencies installed from git, npm audit replaces the name with a (random?) hash """
+ """https://github.com/npm/npm/issues/20739 for dependencies installed from git, npm audit replaces the name with a (random?) hash"""
""" this hash changes on every run of npm audit, so defect dojo might think it's a new finding every run """
""" we strip the hash and replace it with 'censored_by_npm_audit` """
if not path:
return None
- return re.sub('[a-f0-9]{64}', 'censored_by_npm_audit', path)
+ return re.sub("[a-f0-9]{64}", "censored_by_npm_audit", path)
def get_item(item_node, test):
-
- if item_node['severity'] == 'low':
- severity = 'Low'
- elif item_node['severity'] == 'moderate':
- severity = 'Medium'
- elif item_node['severity'] == 'high':
- severity = 'High'
- elif item_node['severity'] == 'critical':
- severity = 'Critical'
+ if item_node["severity"] == "low":
+ severity = "Low"
+ elif item_node["severity"] == "moderate":
+ severity = "Medium"
+ elif item_node["severity"] == "high":
+ severity = "High"
+ elif item_node["severity"] == "critical":
+ severity = "Critical"
else:
- severity = 'Info'
+ severity = "Info"
- paths = ''
+ paths = ""
component_version = None
- for npm_finding in item_node['findings']:
+ for npm_finding in item_node["findings"]:
# use first version as component_version
- component_version = npm_finding['version'] if not component_version else component_version
- paths += "\n - " + str(npm_finding['version']) + ":" + str(','.join(npm_finding['paths'][:25]))
- if len(npm_finding['paths']) > 25:
+ component_version = (
+ npm_finding["version"]
+ if not component_version
+ else component_version
+ )
+ paths += (
+ "\n - "
+ + str(npm_finding["version"])
+ + ":"
+ + str(",".join(npm_finding["paths"][:25]))
+ )
+ if len(npm_finding["paths"]) > 25:
paths += "\n - ..... (list of paths truncated after 25 paths)"
cwe = get_npm_cwe(item_node)
- dojo_finding = Finding(title=item_node['title'] + " - " + "(" + item_node['module_name'] + ", " + item_node['vulnerable_versions'] + ")",
- test=test,
- severity=severity,
- file_path=censor_path_hashes(item_node['findings'][0]['paths'][0]),
- description=item_node['url'] + "\n" +
- item_node['overview'] + "\n Vulnerable Module: " +
- item_node['module_name'] + "\n Vulnerable Versions: " +
- str(item_node['vulnerable_versions']) + "\n Patched Version: " +
- str(item_node['patched_versions']) + "\n Vulnerable Paths: " +
- str(paths) + "\n CWE: " +
- str(item_node['cwe']) + "\n Access: " +
- str(item_node['access']),
- cwe=cwe,
- mitigation=item_node['recommendation'],
- references=item_node['url'],
- component_name=item_node['module_name'],
- component_version=component_version,
- false_p=False,
- duplicate=False,
- out_of_scope=False,
- mitigated=None,
- impact="No impact provided",
- static_finding=True,
- dynamic_finding=False)
-
- if len(item_node['cves']) > 0:
+ dojo_finding = Finding(
+ title=item_node["title"]
+ + " - "
+ + "("
+ + item_node["module_name"]
+ + ", "
+ + item_node["vulnerable_versions"]
+ + ")",
+ test=test,
+ severity=severity,
+ file_path=censor_path_hashes(item_node["findings"][0]["paths"][0]),
+ description=item_node["url"]
+ + "\n"
+ + item_node["overview"]
+ + "\n Vulnerable Module: "
+ + item_node["module_name"]
+ + "\n Vulnerable Versions: "
+ + str(item_node["vulnerable_versions"])
+ + "\n Patched Version: "
+ + str(item_node["patched_versions"])
+ + "\n Vulnerable Paths: "
+ + str(paths)
+ + "\n CWE: "
+ + str(item_node["cwe"])
+ + "\n Access: "
+ + str(item_node["access"]),
+ cwe=cwe,
+ mitigation=item_node["recommendation"],
+ references=item_node["url"],
+ component_name=item_node["module_name"],
+ component_version=component_version,
+ false_p=False,
+ duplicate=False,
+ out_of_scope=False,
+ mitigated=None,
+ impact="No impact provided",
+ static_finding=True,
+ dynamic_finding=False,
+ )
+
+ if len(item_node["cves"]) > 0:
dojo_finding.unsaved_vulnerability_ids = list()
- for vulnerability_id in item_node['cves']:
+ for vulnerability_id in item_node["cves"]:
dojo_finding.unsaved_vulnerability_ids.append(vulnerability_id)
return dojo_finding
diff --git a/dojo/tools/nsp/parser.py b/dojo/tools/nsp/parser.py
index e628916c9e..40a7dcb66a 100644
--- a/dojo/tools/nsp/parser.py
+++ b/dojo/tools/nsp/parser.py
@@ -4,7 +4,6 @@
class NspParser(object):
-
def get_scan_types(self):
return ["Node Security Platform Scan"]
@@ -25,10 +24,10 @@ def parse_json(self, json_output):
try:
data = json_output.read()
try:
- tree = json.loads(str(data, 'utf-8'))
- except:
+ tree = json.loads(str(data, "utf-8"))
+ except Exception:
tree = json.loads(data)
- except:
+ except Exception:
raise ValueError("Invalid format")
return tree
@@ -38,41 +37,56 @@ def get_items(self, tree, test):
for node in tree:
item = get_item(node, test)
- unique_key = node['title'] + str(node['path'])
+ unique_key = node["title"] + str(node["path"])
items[unique_key] = item
return list(items.values())
def get_item(item_node, test):
-
# Following the CVSS Scoring per https://nvd.nist.gov/vuln-metrics/cvss
- if item_node['cvss_score'] <= 3.9:
+ if item_node["cvss_score"] <= 3.9:
severity = "Low"
- elif item_node['cvss_score'] > 4.0 and item_node['cvss_score'] <= 6.9:
+ elif item_node["cvss_score"] > 4.0 and item_node["cvss_score"] <= 6.9:
severity = "Medium"
- elif item_node['cvss_score'] > 7.0 and item_node['cvss_score'] <= 8.9:
+ elif item_node["cvss_score"] > 7.0 and item_node["cvss_score"] <= 8.9:
severity = "High"
else:
severity = "Critical"
- finding = Finding(title=item_node['title'] + " - " + "(" + item_node['module'] + ", " + item_node['version'] + ")",
- test=test,
- severity=severity,
- description=item_node['overview'] + "\n Vulnerable Module: " +
- item_node['module'] + "\n Vulnerable Versions: " +
- str(item_node['vulnerable_versions']) + "\n Current Version: " +
- str(item_node['version']) + "\n Patched Version: " +
- str(item_node['patched_versions']) + "\n Vulnerable Path: " + " > ".join(item_node['path']) + "\n CVSS Score: " +
- str(item_node['cvss_score']) + "\n CVSS Vector: " +
- str(item_node['cvss_vector']),
- mitigation=item_node['recommendation'],
- references=item_node['advisory'],
- false_p=False,
- duplicate=False,
- out_of_scope=False,
- mitigated=None,
- impact="No impact provided")
+ finding = Finding(
+ title=item_node["title"]
+ + " - "
+ + "("
+ + item_node["module"]
+ + ", "
+ + item_node["version"]
+ + ")",
+ test=test,
+ severity=severity,
+ description=item_node["overview"]
+ + "\n Vulnerable Module: "
+ + item_node["module"]
+ + "\n Vulnerable Versions: "
+ + str(item_node["vulnerable_versions"])
+ + "\n Current Version: "
+ + str(item_node["version"])
+ + "\n Patched Version: "
+ + str(item_node["patched_versions"])
+ + "\n Vulnerable Path: "
+ + " > ".join(item_node["path"])
+ + "\n CVSS Score: "
+ + str(item_node["cvss_score"])
+ + "\n CVSS Vector: "
+ + str(item_node["cvss_vector"]),
+ mitigation=item_node["recommendation"],
+ references=item_node["advisory"],
+ false_p=False,
+ duplicate=False,
+ out_of_scope=False,
+ mitigated=None,
+ impact="No impact provided",
+ )
return finding
diff --git a/dojo/tools/nuclei/parser.py b/dojo/tools/nuclei/parser.py
index 21f6d07ffc..782f0cf578 100644
--- a/dojo/tools/nuclei/parser.py
+++ b/dojo/tools/nuclei/parser.py
@@ -14,7 +14,7 @@ class NucleiParser(object):
A class that can be used to parse the nuclei (https://github.com/projectdiscovery/nuclei) JSON report file
"""
- DEFAULT_SEVERITY = 'Low'
+ DEFAULT_SEVERITY = "Low"
def get_scan_types(self):
return ["Nuclei Scan"]
@@ -32,23 +32,26 @@ def get_findings(self, filename, test):
dupes = {}
for item in data:
- logger.debug('Item %s.', str(item))
- template_id = item.get('templateID', item.get('template-id', ''))
- info = item.get('info')
- name = info.get('name')
- severity = info.get('severity').title()
+ logger.debug("Item %s.", str(item))
+ template_id = item.get("templateID", item.get("template-id", ""))
+ info = item.get("info")
+ name = info.get("name")
+ severity = info.get("severity").title()
if severity not in Finding.SEVERITIES:
- logger.debug('Unsupported severity value "%s", change to "%s"',
- severity, self.DEFAULT_SEVERITY)
+ logger.debug(
+ 'Unsupported severity value "%s", change to "%s"',
+ severity,
+ self.DEFAULT_SEVERITY,
+ )
severity = self.DEFAULT_SEVERITY
- item_type = item.get('type')
+ item_type = item.get("type")
if item_type is None:
- item_type = ''
- matched = item.get('matched', item.get('matched-at', ''))
- if '://' in matched:
+ item_type = ""
+ matched = item.get("matched", item.get("matched-at", ""))
+ if "://" in matched:
endpoint = Endpoint.from_uri(matched)
else:
- endpoint = Endpoint.from_uri('//' + matched)
+ endpoint = Endpoint.from_uri("//" + matched)
finding = Finding(
title=f"{name}",
@@ -57,72 +60,97 @@ def get_findings(self, filename, test):
nb_occurences=1,
vuln_id_from_tool=template_id,
)
- if item.get('timestamp'):
- finding.date = date_parser.parse(item.get('timestamp'))
- if info.get('description'):
- finding.description = info.get('description')
- if item.get('extracted-results'):
- finding.description += "\n**Results:**\n" + '\n'.join(item.get('extracted-results'))
- if info.get('tags'):
- finding.unsaved_tags = info.get('tags')
- if info.get('reference'):
- reference = info.get('reference')
- if type(reference) is list:
- finding.references = '\n'.join(info.get('reference'))
+ if item.get("timestamp"):
+ finding.date = date_parser.parse(item.get("timestamp"))
+ if info.get("description"):
+ finding.description = info.get("description")
+ if item.get("extracted-results"):
+ finding.description += "\n**Results:**\n" + "\n".join(
+ item.get("extracted-results")
+ )
+ if info.get("tags"):
+ finding.unsaved_tags = info.get("tags")
+ if info.get("reference"):
+ reference = info.get("reference")
+ if isinstance(reference, list):
+ finding.references = "\n".join(info.get("reference"))
else:
- finding.references = info.get('reference')
+ finding.references = info.get("reference")
finding.unsaved_endpoints.append(endpoint)
- classification = info.get('classification')
+ classification = info.get("classification")
if classification:
- if 'cve-id' in classification and classification['cve-id']:
- cve_ids = classification['cve-id']
- finding.unsaved_vulnerability_ids = list(map(lambda x: x.upper(), cve_ids))
- if ('cwe-id' in classification and classification['cwe-id']
- and len(classification['cwe-id']) > 0):
- cwe = classification['cwe-id'][0]
+ if "cve-id" in classification and classification["cve-id"]:
+ cve_ids = classification["cve-id"]
+ finding.unsaved_vulnerability_ids = list(
+ map(lambda x: x.upper(), cve_ids)
+ )
+ if (
+ "cwe-id" in classification
+ and classification["cwe-id"]
+ and len(classification["cwe-id"]) > 0
+ ):
+ cwe = classification["cwe-id"][0]
finding.cwe = int(cwe[4:])
- if 'cvss-metrics' in classification and classification['cvss-metrics']:
+ if (
+ "cvss-metrics" in classification
+ and classification["cvss-metrics"]
+ ):
cvss_objects = cvss_parser.parse_cvss_from_text(
- classification['cvss-metrics'])
+ classification["cvss-metrics"]
+ )
if len(cvss_objects) > 0:
finding.cvssv3 = cvss_objects[0].clean_vector()
- if 'cvss-score' in classification and classification['cvss-score']:
- finding.cvssv3_score = classification['cvss-score']
+ if (
+ "cvss-score" in classification
+ and classification["cvss-score"]
+ ):
+ finding.cvssv3_score = classification["cvss-score"]
- matcher = item.get('matcher-name', item.get('matcher_name'))
+ matcher = item.get("matcher-name", item.get("matcher_name"))
if matcher:
finding.component_name = matcher
else:
- matcher = ''
-
- if info.get('remediation'):
- finding.mitigation = info.get('remediation')
-
- host = item.get('host', '')
-
- if item.get('curl-command'):
- finding.steps_to_reproduce = 'curl command to reproduce the request:\n`' + \
- item.get('curl-command') + '`'
-
- if item.get('request'):
- finding.unsaved_request = item.get('request')
- if item.get('response'):
- finding.unsaved_response = item.get('response')
-
- logger.debug('dupe keys %s, %s, %s, %s.', template_id, item_type, matcher, host)
+ matcher = ""
+
+ if info.get("remediation"):
+ finding.mitigation = info.get("remediation")
+
+ host = item.get("host", "")
+
+ if item.get("curl-command"):
+ finding.steps_to_reproduce = (
+ "curl command to reproduce the request:\n`"
+ + item.get("curl-command")
+ + "`"
+ )
+
+ if item.get("request"):
+ finding.unsaved_request = item.get("request")
+ if item.get("response"):
+ finding.unsaved_response = item.get("response")
+
+ logger.debug(
+ "dupe keys %s, %s, %s, %s.",
+ template_id,
+ item_type,
+ matcher,
+ host,
+ )
dupe_key = hashlib.sha256(
- (template_id + item_type + matcher + endpoint.host).encode('utf-8')
+ (template_id + item_type + matcher + endpoint.host).encode(
+ "utf-8"
+ )
).hexdigest()
if dupe_key in dupes:
- logger.debug('dupe_key %s exists.', str(dupe_key))
+ logger.debug("dupe_key %s exists.", str(dupe_key))
finding = dupes[dupe_key]
if endpoint not in finding.unsaved_endpoints:
finding.unsaved_endpoints.append(endpoint)
- logger.debug('Appended endpoint %s', endpoint)
+ logger.debug("Appended endpoint %s", endpoint)
finding.nb_occurences += 1
else:
dupes[dupe_key] = finding
diff --git a/dojo/tools/openscap/parser.py b/dojo/tools/openscap/parser.py
index 784c335103..9f3ba66132 100644
--- a/dojo/tools/openscap/parser.py
+++ b/dojo/tools/openscap/parser.py
@@ -9,7 +9,6 @@
class OpenscapParser(object):
-
def get_scan_types(self):
return ["Openscap Vulnerability Scan"]
@@ -26,52 +25,75 @@ def get_findings(self, file, test):
namespace = self.get_namespace(root)
# check if xml file hash correct root or not.
- if 'Benchmark' not in root.tag:
- raise ValueError("This doesn't seem to be a valid Openscap vulnerability scan xml file.")
- if 'http://checklists.nist.gov/xccdf/' not in namespace:
- raise ValueError("This doesn't seem to be a valid Openscap vulnerability scan xml file.")
+ if "Benchmark" not in root.tag:
+ raise ValueError(
+ "This doesn't seem to be a valid Openscap vulnerability scan xml file."
+ )
+ if "http://checklists.nist.gov/xccdf/" not in namespace:
+ raise ValueError(
+ "This doesn't seem to be a valid Openscap vulnerability scan xml file."
+ )
# read rules
rules = {}
- for rule in root.findall('.//{0}Rule'.format(namespace)):
- rules[rule.attrib['id']] = {
- "title": rule.findtext('./{0}title'.format(namespace))
+ for rule in root.findall(".//{0}Rule".format(namespace)):
+ rules[rule.attrib["id"]] = {
+ "title": rule.findtext("./{0}title".format(namespace))
}
# go to test result
- test_result = tree.find('./{0}TestResult'.format(namespace))
+ test_result = tree.find("./{0}TestResult".format(namespace))
ips = []
# append all target in a list.
- for ip in test_result.findall('./{0}target'.format(namespace)):
+ for ip in test_result.findall("./{0}target".format(namespace)):
ips.append(ip.text)
- for ip in test_result.findall('./{0}target-address'.format(namespace)):
+ for ip in test_result.findall("./{0}target-address".format(namespace)):
ips.append(ip.text)
dupes = dict()
- # run both rule, and rule-result in parallel so that we can get title for failed test from rule.
- for rule_result in test_result.findall('./{0}rule-result'.format(namespace)):
- result = rule_result.findtext('./{0}result'.format(namespace))
+ # run both rule, and rule-result in parallel so that we can get title
+ # for failed test from rule.
+ for rule_result in test_result.findall(
+ "./{0}rule-result".format(namespace)
+ ):
+ result = rule_result.findtext("./{0}result".format(namespace))
# find only failed report.
if "fail" in result:
# get rule corresponding to rule-result
- rule = rules[rule_result.attrib['idref']]
- title = rule['title']
- description = "\n".join([
- "**IdRef:** `" + rule_result.attrib['idref'] + "`",
- "**Title:** `" + title + "`",
- ])
+ rule = rules[rule_result.attrib["idref"]]
+ title = rule["title"]
+ description = "\n".join(
+ [
+ "**IdRef:** `" + rule_result.attrib["idref"] + "`",
+ "**Title:** `" + title + "`",
+ ]
+ )
vulnerability_ids = []
- for vulnerability_id in rule_result.findall("./{0}ident[@system='http://cve.mitre.org']".format(namespace)):
+ for vulnerability_id in rule_result.findall(
+ "./{0}ident[@system='http://cve.mitre.org']".format(
+ namespace
+ )
+ ):
vulnerability_ids.append(vulnerability_id.text)
# get severity.
- severity = rule_result.attrib.get('severity', 'medium').lower().capitalize()
+ severity = (
+ rule_result.attrib.get("severity", "medium")
+ .lower()
+ .capitalize()
+ )
# according to the spec 'unknown' is a possible value
- if severity == 'Unknown':
- severity = 'Info'
+ if severity == "Unknown":
+ severity = "Info"
references = ""
# get references.
- for check_content in rule_result.findall('./{0}check/{0}check-content-ref'.format(namespace)):
- references += "**name:** : " + check_content.attrib['name'] + "\n"
- references += "**href** : " + check_content.attrib['href'] + "\n"
+ for check_content in rule_result.findall(
+ "./{0}check/{0}check-content-ref".format(namespace)
+ ):
+ references += (
+ "**name:** : " + check_content.attrib["name"] + "\n"
+ )
+ references += (
+ "**href** : " + check_content.attrib["href"] + "\n"
+ )
finding = Finding(
title=title,
@@ -80,7 +102,7 @@ def get_findings(self, file, test):
references=references,
dynamic_finding=True,
static_finding=False,
- unique_id_from_tool=rule_result.attrib['idref'],
+ unique_id_from_tool=rule_result.attrib["idref"],
)
if vulnerability_ids:
finding.unsaved_vulnerability_ids = vulnerability_ids
@@ -90,13 +112,15 @@ def get_findings(self, file, test):
validate_ipv46_address(ip)
endpoint = Endpoint(host=ip)
except ValidationError:
- if '://' in ip:
+ if "://" in ip:
endpoint = Endpoint.from_uri(ip)
else:
- endpoint = Endpoint.from_uri('//' + ip)
+ endpoint = Endpoint.from_uri("//" + ip)
finding.unsaved_endpoints.append(endpoint)
- dupe_key = hashlib.sha256(references.encode('utf-8')).hexdigest()
+ dupe_key = hashlib.sha256(
+ references.encode("utf-8")
+ ).hexdigest()
if dupe_key in dupes:
find = dupes[dupe_key]
if finding.references:
@@ -109,5 +133,5 @@ def get_findings(self, file, test):
def get_namespace(self, element):
"""Extract namespace present in XML file."""
- m = re.match(r'\{.*\}', element.tag)
- return m.group(0) if m else ''
+ m = re.match(r"\{.*\}", element.tag)
+ return m.group(0) if m else ""
diff --git a/dojo/tools/openvas_csv/parser.py b/dojo/tools/openvas_csv/parser.py
index 8dd5cd0e35..04d6166b23 100644
--- a/dojo/tools/openvas_csv/parser.py
+++ b/dojo/tools/openvas_csv/parser.py
@@ -1,4 +1,3 @@
-
import csv
import hashlib
import io
@@ -9,7 +8,6 @@
class ColumnMappingStrategy(object):
-
mapped_column = None
def __init__(self):
@@ -20,25 +18,26 @@ def map_column_value(self, finding, column_value):
@staticmethod
def evaluate_bool_value(column_value):
- if column_value.lower() == 'true':
+ if column_value.lower() == "true":
return True
- elif column_value.lower() == 'false':
+ elif column_value.lower() == "false":
return False
else:
return None
def process_column(self, column_name, column_value, finding):
-
- if column_name.lower() == self.mapped_column and column_value is not None:
+ if (
+ column_name.lower() == self.mapped_column
+ and column_value is not None
+ ):
self.map_column_value(finding, column_value)
elif self.successor is not None:
self.successor.process_column(column_name, column_value, finding)
class DateColumnMappingStrategy(ColumnMappingStrategy):
-
def __init__(self):
- self.mapped_column = 'timestamp'
+ self.mapped_column = "timestamp"
super(DateColumnMappingStrategy, self).__init__()
def map_column_value(self, finding, column_value):
@@ -46,9 +45,8 @@ def map_column_value(self, finding, column_value):
class TitleColumnMappingStrategy(ColumnMappingStrategy):
-
def __init__(self):
- self.mapped_column = 'nvt name'
+ self.mapped_column = "nvt name"
super(TitleColumnMappingStrategy, self).__init__()
def map_column_value(self, finding, column_value):
@@ -56,9 +54,8 @@ def map_column_value(self, finding, column_value):
class CweColumnMappingStrategy(ColumnMappingStrategy):
-
def __init__(self):
- self.mapped_column = 'cweid'
+ self.mapped_column = "cweid"
super(CweColumnMappingStrategy, self).__init__()
def map_column_value(self, finding, column_value):
@@ -67,9 +64,8 @@ def map_column_value(self, finding, column_value):
class PortColumnMappingStrategy(ColumnMappingStrategy):
-
def __init__(self):
- self.mapped_column = 'port'
+ self.mapped_column = "port"
super(PortColumnMappingStrategy, self).__init__()
def map_column_value(self, finding, column_value):
@@ -78,9 +74,8 @@ def map_column_value(self, finding, column_value):
class ProtocolColumnMappingStrategy(ColumnMappingStrategy):
-
def __init__(self):
- self.mapped_column = 'port protocol'
+ self.mapped_column = "port protocol"
super(ProtocolColumnMappingStrategy, self).__init__()
def map_column_value(self, finding, column_value):
@@ -89,20 +84,20 @@ def map_column_value(self, finding, column_value):
class IpColumnMappingStrategy(ColumnMappingStrategy):
-
def __init__(self):
- self.mapped_column = 'ip'
+ self.mapped_column = "ip"
super(IpColumnMappingStrategy, self).__init__()
def map_column_value(self, finding, column_value):
- if not finding.unsaved_endpoints[0].host: # process only if host is not already defined (by field hostname)
+ if not finding.unsaved_endpoints[
+ 0
+ ].host: # process only if host is not already defined (by field hostname)
finding.unsaved_endpoints[0].host = column_value
class HostnameColumnMappingStrategy(ColumnMappingStrategy):
-
def __init__(self):
- self.mapped_column = 'hostname'
+ self.mapped_column = "hostname"
super(HostnameColumnMappingStrategy, self).__init__()
def map_column_value(self, finding, column_value):
@@ -111,27 +106,25 @@ def map_column_value(self, finding, column_value):
class SeverityColumnMappingStrategy(ColumnMappingStrategy):
-
@staticmethod
def is_valid_severity(severity):
- valid_severity = ('Info', 'Low', 'Medium', 'High', 'Critical')
+ valid_severity = ("Info", "Low", "Medium", "High", "Critical")
return severity in valid_severity
def __init__(self):
- self.mapped_column = 'severity'
+ self.mapped_column = "severity"
super(SeverityColumnMappingStrategy, self).__init__()
def map_column_value(self, finding, column_value):
if self.is_valid_severity(column_value):
finding.severity = column_value
else:
- finding.severity = 'Info'
+ finding.severity = "Info"
class DescriptionColumnMappingStrategy(ColumnMappingStrategy):
-
def __init__(self):
- self.mapped_column = 'summary'
+ self.mapped_column = "summary"
super(DescriptionColumnMappingStrategy, self).__init__()
def map_column_value(self, finding, column_value):
@@ -139,9 +132,8 @@ def map_column_value(self, finding, column_value):
class MitigationColumnMappingStrategy(ColumnMappingStrategy):
-
def __init__(self):
- self.mapped_column = 'solution'
+ self.mapped_column = "solution"
super(MitigationColumnMappingStrategy, self).__init__()
def map_column_value(self, finding, column_value):
@@ -149,9 +141,8 @@ def map_column_value(self, finding, column_value):
class ImpactColumnMappingStrategy(ColumnMappingStrategy):
-
def __init__(self):
- self.mapped_column = 'vulnerability insight'
+ self.mapped_column = "vulnerability insight"
super(ImpactColumnMappingStrategy, self).__init__()
def map_column_value(self, finding, column_value):
@@ -159,9 +150,8 @@ def map_column_value(self, finding, column_value):
class ReferencesColumnMappingStrategy(ColumnMappingStrategy):
-
def __init__(self):
- self.mapped_column = 'specific result'
+ self.mapped_column = "specific result"
super(ReferencesColumnMappingStrategy, self).__init__()
def map_column_value(self, finding, column_value):
@@ -169,9 +159,8 @@ def map_column_value(self, finding, column_value):
class ActiveColumnMappingStrategy(ColumnMappingStrategy):
-
def __init__(self):
- self.mapped_column = 'active'
+ self.mapped_column = "active"
super(ActiveColumnMappingStrategy, self).__init__()
def map_column_value(self, finding, column_value):
@@ -179,9 +168,8 @@ def map_column_value(self, finding, column_value):
class VerifiedColumnMappingStrategy(ColumnMappingStrategy):
-
def __init__(self):
- self.mapped_column = 'verified'
+ self.mapped_column = "verified"
super(VerifiedColumnMappingStrategy, self).__init__()
def map_column_value(self, finding, column_value):
@@ -189,9 +177,8 @@ def map_column_value(self, finding, column_value):
class FalsePositiveColumnMappingStrategy(ColumnMappingStrategy):
-
def __init__(self):
- self.mapped_column = 'falsepositive'
+ self.mapped_column = "falsepositive"
super(FalsePositiveColumnMappingStrategy, self).__init__()
def map_column_value(self, finding, column_value):
@@ -199,9 +186,8 @@ def map_column_value(self, finding, column_value):
class DuplicateColumnMappingStrategy(ColumnMappingStrategy):
-
def __init__(self):
- self.mapped_column = 'duplicate'
+ self.mapped_column = "duplicate"
super(DuplicateColumnMappingStrategy, self).__init__()
def map_column_value(self, finding, column_value):
@@ -209,7 +195,6 @@ def map_column_value(self, finding, column_value):
class OpenVASCsvParser(object):
-
def create_chain(self):
date_column_strategy = DateColumnMappingStrategy()
title_column_strategy = TitleColumnMappingStrategy()
@@ -264,15 +249,14 @@ def get_description_for_scan_types(self, scan_type):
return "Import OpenVAS Scan in CSV format. Export as CSV Results on OpenVAS."
def get_findings(self, filename, test):
-
column_names = dict()
dupes = dict()
chain = self.create_chain()
content = filename.read()
- if type(content) is bytes:
- content = content.decode('utf-8')
- reader = csv.reader(io.StringIO(content), delimiter=',', quotechar='"')
+ if isinstance(content, bytes):
+ content = content.decode("utf-8")
+ reader = csv.reader(io.StringIO(content), delimiter=",", quotechar='"')
row_number = 0
for row in reader:
@@ -286,7 +270,9 @@ def get_findings(self, filename, test):
column_number = 0
for column in row:
- chain.process_column(column_names[column_number], column, finding)
+ chain.process_column(
+ column_names[column_number], column, finding
+ )
column_number += 1
if finding is not None and row_number > 0:
@@ -295,7 +281,17 @@ def get_findings(self, filename, test):
if finding.description is None:
finding.description = ""
- key = hashlib.sha256((str(finding.unsaved_endpoints[0]) + '|' + finding.severity + '|' + finding.title + '|' + finding.description).encode('utf-8')).hexdigest()
+ key = hashlib.sha256(
+ (
+ str(finding.unsaved_endpoints[0])
+ + "|"
+ + finding.severity
+ + "|"
+ + finding.title
+ + "|"
+ + finding.description
+ ).encode("utf-8")
+ ).hexdigest()
if key not in dupes:
dupes[key] = finding
diff --git a/dojo/tools/ort/parser.py b/dojo/tools/ort/parser.py
index 30ffcb853f..d2811d3e17 100644
--- a/dojo/tools/ort/parser.py
+++ b/dojo/tools/ort/parser.py
@@ -18,7 +18,6 @@ def get_description_for_scan_types(self, scan_type):
return "Import Outpost24 endpoint vulnerability scan in XML format."
def get_findings(self, json_output, test):
-
if json_output is None:
return list()
@@ -32,27 +31,32 @@ def parse_json(self, json_output):
try:
data = json_output.read()
try:
- tree = json.loads(str(data, 'utf-8'))
- except:
+ tree = json.loads(str(data, "utf-8"))
+ except Exception:
tree = json.loads(data)
- except:
+ except Exception:
raise ValueError("Invalid format")
return tree
def get_items(self, evaluatedModel, test):
items = {}
- packages = evaluatedModel['packages']
- dependency_trees = evaluatedModel['dependency_trees']
- rule_violations = evaluatedModel['rule_violations']
- licenses = evaluatedModel['licenses']
- rule_violations_unresolved = get_unresolved_rule_violations(rule_violations)
- rule_violations_models = get_rule_violation_models(rule_violations_unresolved, packages, licenses,
- dependency_trees)
+ packages = evaluatedModel["packages"]
+ dependency_trees = evaluatedModel["dependency_trees"]
+ rule_violations = evaluatedModel["rule_violations"]
+ licenses = evaluatedModel["licenses"]
+ rule_violations_unresolved = get_unresolved_rule_violations(
+ rule_violations
+ )
+ rule_violations_models = get_rule_violation_models(
+ rule_violations_unresolved, packages, licenses, dependency_trees
+ )
for model in rule_violations_models:
item = get_item(model, test)
- unique_key = hashlib.md5((item.title + item.references).encode()).hexdigest()
+ unique_key = hashlib.md5(
+ (item.title + item.references).encode()
+ ).hexdigest()
items[unique_key] = item
return list(items.values())
@@ -67,16 +71,16 @@ def get_unresolved_rule_violations(rule_violations):
def is_rule_violation_unresolved(rule_violation):
- return 'resolutions' not in rule_violation
+ return "resolutions" not in rule_violation
def find_in_dependency_tree(tree, package_id):
- if 'pkg' in tree and tree['pkg'] == package_id:
+ if "pkg" in tree and tree["pkg"] == package_id:
return True
else:
- if 'children' in tree:
+ if "children" in tree:
found_in_child = False
- for child in tree['children']:
+ for child in tree["children"]:
if found_in_child:
break
else:
@@ -90,57 +94,69 @@ def get_project_ids_for_package(dependency_trees, package_id):
project_ids = []
for project in dependency_trees:
if find_in_dependency_tree(project, package_id):
- project_ids.append(project['pkg'])
+ project_ids.append(project["pkg"])
return project_ids
def get_name_id_for_package(packages, package__id):
name = ""
for package in packages:
- if package['_id'] == package__id:
- name = package['id']
+ if package["_id"] == package__id:
+ name = package["id"]
break
return name
-def get_rule_violation_models(rule_violations_unresolved, packages, licenses, dependency_trees):
+def get_rule_violation_models(
+ rule_violations_unresolved, packages, licenses, dependency_trees
+):
models = []
for violation in rule_violations_unresolved:
- models.append(get_rule_violation_model(violation, packages, licenses, dependency_trees))
+ models.append(
+ get_rule_violation_model(
+ violation, packages, licenses, dependency_trees
+ )
+ )
return models
-def get_rule_violation_model(rule_violation_unresolved, packages, licenses, dependency_trees):
- project_ids = get_project_ids_for_package(dependency_trees, rule_violation_unresolved['pkg'])
+def get_rule_violation_model(
+ rule_violation_unresolved, packages, licenses, dependency_trees
+):
+ project_ids = get_project_ids_for_package(
+ dependency_trees, rule_violation_unresolved["pkg"]
+ )
project_names = []
for id in project_ids:
project_names.append(get_name_id_for_package(packages, id))
- package = find_package_by_id(packages, rule_violation_unresolved['pkg'])
- if 'license' in rule_violation_unresolved:
- license_tmp = rule_violation_unresolved['license']
+ package = find_package_by_id(packages, rule_violation_unresolved["pkg"])
+ if "license" in rule_violation_unresolved:
+ license_tmp = rule_violation_unresolved["license"]
else:
- license_tmp = 'unset'
- if 'license_source' not in rule_violation_unresolved:
- rule_violation_unresolved['license_source'] = 'unset'
+ license_tmp = "unset"
+ if "license_source" not in rule_violation_unresolved:
+ rule_violation_unresolved["license_source"] = "unset"
license_id = find_license_id(licenses, license_tmp)
- return RuleViolationModel(package, license_id, project_names, rule_violation_unresolved)
+ return RuleViolationModel(
+ package, license_id, project_names, rule_violation_unresolved
+ )
def find_package_by_id(packages, pkg_id):
package = None
for pkg in packages:
- if pkg['_id'] == pkg_id:
+ if pkg["_id"] == pkg_id:
package = pkg
break
return package
def find_license_id(licenses, license_id):
- id = ''
+ id = ""
for lic in licenses:
- if lic['_id'] == license_id:
- id = lic['id']
+ if lic["_id"] == license_id:
+ id = lic["id"]
break
return id
@@ -155,12 +171,14 @@ def get_item(model, test):
severity = get_severity(model.rule_violation)
- finding = Finding(title=model.rule_violation['rule'],
- test=test,
- references=model.rule_violation['message'],
- description=desc,
- severity=severity,
- static_finding=True)
+ finding = Finding(
+ title=model.rule_violation["rule"],
+ test=test,
+ references=model.rule_violation["message"],
+ description=desc,
+ severity=severity,
+ static_finding=True,
+ )
return finding
@@ -173,20 +191,17 @@ def get_item(model, test):
# projects: []
# rule_violation: dict
-RuleViolationModel = namedtuple('RuleViolationModel', [
- 'pkg',
- 'license_id',
- 'projects',
- 'rule_violation'
-])
+RuleViolationModel = namedtuple(
+ "RuleViolationModel", ["pkg", "license_id", "projects", "rule_violation"]
+)
def get_severity(rule_violation):
- if rule_violation['severity'] == 'ERROR':
- return 'High'
- elif rule_violation['severity'] == 'WARNING':
- return 'Medium'
- elif rule_violation['severity'] == 'HINT':
- return 'Info'
+ if rule_violation["severity"] == "ERROR":
+ return "High"
+ elif rule_violation["severity"] == "WARNING":
+ return "Medium"
+ elif rule_violation["severity"] == "HINT":
+ return "Info"
else:
- return 'Critical'
+ return "Critical"
diff --git a/dojo/tools/ossindex_devaudit/parser.py b/dojo/tools/ossindex_devaudit/parser.py
index 7cee84546b..8d04bac2d4 100644
--- a/dojo/tools/ossindex_devaudit/parser.py
+++ b/dojo/tools/ossindex_devaudit/parser.py
@@ -20,7 +20,6 @@ def get_description_for_scan_types(self, scan_type):
return "Import OssIndex Devaudit SCA Scan in json format."
def get_findings(self, json_file, test):
-
tree = self.parse_json(json_file)
if tree:
@@ -39,66 +38,77 @@ def parse_json(self, json_file):
return tree
def get_items(self, tree, test):
-
items = {}
results = {key: value for (key, value) in tree.items()}
- for package in results.get('Packages', []):
- package_data = package['Package']
- if len(package.get('Vulnerabilities', [])) > 0:
- for vulnerability in package.get('Vulnerabilities', []):
+ for package in results.get("Packages", []):
+ package_data = package["Package"]
+ if len(package.get("Vulnerabilities", [])) > 0:
+ for vulnerability in package.get("Vulnerabilities", []):
item = get_item(
- dependency_name=package_data['name'],
- dependency_version=package_data['version'],
- dependency_source=package_data['pm'],
+ dependency_name=package_data["name"],
+ dependency_version=package_data["version"],
+ dependency_source=package_data["pm"],
vulnerability=vulnerability,
- test=test
+ test=test,
)
- unique_key = vulnerability['id']
+ unique_key = vulnerability["id"]
items[unique_key] = item
return items.values()
-def get_item(dependency_name, dependency_version, dependency_source, vulnerability, test):
-
- cwe_data = vulnerability.get('cwe', 'CWE-1035')
- if cwe_data is None or cwe_data.startswith('CWE') is False:
- cwe_data = 'CWE-1035'
+def get_item(
+ dependency_name, dependency_version, dependency_source, vulnerability, test
+):
+ cwe_data = vulnerability.get("cwe", "CWE-1035")
+ if cwe_data is None or cwe_data.startswith("CWE") is False:
+ cwe_data = "CWE-1035"
try:
- cwe = int(cwe_data.split('-')[1])
+ cwe = int(cwe_data.split("-")[1])
except ValueError:
- raise ValueError('Attempting to convert the CWE value to an integer failed')
-
- finding = Finding(title=dependency_source + ":" + dependency_name + " - " + "(" + dependency_version + ", " + cwe_data + ")",
- test=test,
- severity=get_severity(vulnerability.get('cvssScore', '')),
- description=vulnerability['title'],
- cwe=cwe,
- cvssv3=vulnerability['cvssVector'].replace('CVSS:3.0', ''),
- mitigation='Upgrade the component to the latest non-vulnerable version, or remove the package if it is not in use.',
- references=vulnerability.get('reference', ''),
- false_p=False,
- duplicate=False,
- out_of_scope=False,
- mitigated=None,
- static_finding=False,
- dynamic_finding=False,
- impact="No impact provided by scan")
+ raise ValueError(
+ "Attempting to convert the CWE value to an integer failed"
+ )
+
+ finding = Finding(
+ title=dependency_source
+ + ":"
+ + dependency_name
+ + " - "
+ + "("
+ + dependency_version
+ + ", "
+ + cwe_data
+ + ")",
+ test=test,
+ severity=get_severity(vulnerability.get("cvssScore", "")),
+ description=vulnerability["title"],
+ cwe=cwe,
+ cvssv3=vulnerability["cvssVector"].replace("CVSS:3.0", ""),
+ mitigation="Upgrade the component to the latest non-vulnerable version, or remove the package if it is not in use.",
+ references=vulnerability.get("reference", ""),
+ false_p=False,
+ duplicate=False,
+ out_of_scope=False,
+ mitigated=None,
+ static_finding=False,
+ dynamic_finding=False,
+ impact="No impact provided by scan",
+ )
return finding
def get_severity(cvss_score):
-
- result = 'Info'
+ result = "Info"
if cvss_score != "":
ratings = [
- ('Critical', 9.0, 10.0),
- ('High', 7.0, 8.9),
- ('Medium', 4.0, 6.9),
- ('Low', 0.1, 3.9)
+ ("Critical", 9.0, 10.0),
+ ("High", 7.0, 8.9),
+ ("Medium", 4.0, 6.9),
+ ("Low", 0.1, 3.9),
]
for severity, low, high in ratings:
diff --git a/dojo/tools/outpost24/parser.py b/dojo/tools/outpost24/parser.py
index 13be837541..8fd244cc42 100644
--- a/dojo/tools/outpost24/parser.py
+++ b/dojo/tools/outpost24/parser.py
@@ -8,7 +8,6 @@
class Outpost24Parser(object):
-
def get_scan_types(self):
return ["Outpost24 Scan"]
@@ -21,55 +20,72 @@ def get_description_for_scan_types(self, scan_type):
def get_findings(self, file, test):
tree = ElementTree.parse(file)
items = list()
- for detail in tree.iterfind('.//detaillist/detail'):
+ for detail in tree.iterfind(".//detaillist/detail"):
# finding details
- title = detail.findtext('name')
+ title = detail.findtext("name")
# date = detail.findtext('date') # can be used for Finding.date?
- vulnerability_id = detail.findtext('./cve/id')
- url = detail.findtext('./referencelist/reference/[type=\'solution\']/../url')
- description = detail.findtext('description')
- mitigation = detail.findtext('solution')
- impact = detail.findtext('information')
- cvss_score = detail.findtext('cvss_v3_score') or detail.findtext('cvss_score')
+ vulnerability_id = detail.findtext("./cve/id")
+ url = detail.findtext(
+ "./referencelist/reference/[type='solution']/../url"
+ )
+ description = detail.findtext("description")
+ mitigation = detail.findtext("solution")
+ impact = detail.findtext("information")
+ cvss_score = detail.findtext("cvss_v3_score") or detail.findtext(
+ "cvss_score"
+ )
if not cvss_score:
cvss_score = 0
if cvss_score:
score = float(cvss_score)
if score < 4:
- severity = 'Low'
+ severity = "Low"
elif score < 7:
- severity = 'Medium'
+ severity = "Medium"
elif score < 9:
- severity = 'High'
+ severity = "High"
else:
- severity = 'Critical'
+ severity = "Critical"
else:
- risk = int(detail.findtext('risk'))
+ risk = int(detail.findtext("risk"))
if risk == 0:
- severity = 'Low'
+ severity = "Low"
elif risk == 1:
- severity = 'Medium'
+ severity = "Medium"
elif risk == 2:
- severity = 'High'
+ severity = "High"
else:
- severity = 'Critical'
- cvss_description = detail.findtext('cvss_vector_description')
- cvss_vector = detail.findtext('cvss_v3_vector') or detail.findtext('cvss_vector')
- severity_justification = "{}\n{}".format(cvss_score, cvss_description)
- finding = Finding(title=title, test=test, url=url, description=description, mitigation=mitigation,
- impact=impact, severity=severity,
- severity_justification=severity_justification)
+ severity = "Critical"
+ cvss_description = detail.findtext("cvss_vector_description")
+ cvss_vector = detail.findtext("cvss_v3_vector") or detail.findtext(
+ "cvss_vector"
+ )
+ severity_justification = "{}\n{}".format(
+ cvss_score, cvss_description
+ )
+ finding = Finding(
+ title=title,
+ test=test,
+ url=url,
+ description=description,
+ mitigation=mitigation,
+ impact=impact,
+ severity=severity,
+ severity_justification=severity_justification,
+ )
if vulnerability_id:
finding.unsaved_vulnerability_ids = [vulnerability_id]
# endpoint details
- host = detail.findtext('ip')
+ host = detail.findtext("ip")
if host:
- protocol = detail.findtext('./portinfo/service')
+ protocol = detail.findtext("./portinfo/service")
try:
- port = int(detail.findtext('./portinfo/portnumber'))
- except ValueError as ve:
+ port = int(detail.findtext("./portinfo/portnumber"))
+ except ValueError:
logger.debug("General port given. Assigning 0 as default.")
port = 0
- finding.unsaved_endpoints.append(Endpoint(protocol=protocol, host=host, port=port))
+ finding.unsaved_endpoints.append(
+ Endpoint(protocol=protocol, host=host, port=port)
+ )
items.append(finding)
return items
diff --git a/dojo/tools/php_security_audit_v2/parser.py b/dojo/tools/php_security_audit_v2/parser.py
index 4df82d3de5..f1ee8022c1 100644
--- a/dojo/tools/php_security_audit_v2/parser.py
+++ b/dojo/tools/php_security_audit_v2/parser.py
@@ -5,7 +5,6 @@
class PhpSecurityAuditV2Parser(object):
-
def get_scan_types(self):
return ["PHP Security Audit v2"]
@@ -18,8 +17,8 @@ def get_description_for_scan_types(self, scan_type):
def get_findings(self, filename, test):
tree = filename.read()
try:
- data = json.loads(str(tree, 'utf-8'))
- except:
+ data = json.loads(str(tree, "utf-8"))
+ except Exception:
data = json.loads(tree)
dupes = dict()
@@ -36,9 +35,16 @@ def get_findings(self, filename, test):
findingdetail += "Rule Source: " + issue["source"] + "\n"
findingdetail += "Details: " + issue["message"] + "\n"
- sev = PhpSecurityAuditV2Parser.get_severity_word(issue["severity"])
+ sev = PhpSecurityAuditV2Parser.get_severity_word(
+ issue["severity"]
+ )
- dupe_key = title + filepath + str(issue["line"]) + str(issue["column"])
+ dupe_key = (
+ title
+ + filepath
+ + str(issue["line"])
+ + str(issue["column"])
+ )
if dupe_key in dupes:
find = dupes[dupe_key]
@@ -57,7 +63,7 @@ def get_findings(self, filename, test):
)
dupes[dupe_key] = find
- findingdetail = ''
+ findingdetail = ""
return list(dupes.values())
@@ -66,10 +72,10 @@ def get_severity_word(severity):
sev = math.ceil(severity / 2)
if sev == 5:
- return 'Critical'
+ return "Critical"
elif sev == 4:
- return 'High'
+ return "High"
elif sev == 3:
- return 'Medium'
+ return "Medium"
else:
- return 'Low'
+ return "Low"
diff --git a/dojo/tools/php_symfony_security_check/parser.py b/dojo/tools/php_symfony_security_check/parser.py
index fbc2e8d8b5..c5fb511880 100644
--- a/dojo/tools/php_symfony_security_check/parser.py
+++ b/dojo/tools/php_symfony_security_check/parser.py
@@ -4,7 +4,6 @@
class PhpSymfonySecurityCheckParser(object):
-
def get_scan_types(self):
return ["PHP Symfony Security Check"]
@@ -24,10 +23,10 @@ def parse_json(self, json_file):
try:
data = json_file.read()
try:
- tree = json.loads(str(data, 'utf-8'))
- except:
+ tree = json.loads(str(data, "utf-8"))
+ except Exception:
tree = json.loads(data)
- except:
+ except Exception:
raise Exception("Invalid format")
return tree
@@ -36,41 +35,54 @@ def get_items(self, tree, test):
items = {}
for dependency_name, dependency_data in list(tree.items()):
- advisories = dependency_data.get('advisories')
- dependency_version = dependency_data['version']
- if dependency_version and dependency_version.startswith('v'):
+ advisories = dependency_data.get("advisories")
+ dependency_version = dependency_data["version"]
+ if dependency_version and dependency_version.startswith("v"):
dependency_version = dependency_version[1:]
for advisory in advisories:
- item = get_item(dependency_name, dependency_version, advisory, test)
- unique_key = str(dependency_name) + str(dependency_data['version'] + str(advisory['cve']))
+ item = get_item(
+ dependency_name, dependency_version, advisory, test
+ )
+ unique_key = str(dependency_name) + str(
+ dependency_data["version"] + str(advisory["cve"])
+ )
items[unique_key] = item
return list(items.values())
def get_item(dependency_name, dependency_version, advisory, test):
-
- finding = Finding(title=dependency_name + " - " + "(" + dependency_version + ", " + advisory['cve'] + ")",
- test=test,
- # TODO decide how to handle the fact we don't have a severity. None will lead to problems handling minimum severity on import
- severity='Info',
- description=advisory['title'],
- # TODO Decide if the default '1035: vulnerable 3rd party component' is OK to use?
- cwe=1035,
- mitigation='upgrade',
- references=advisory['link'],
- false_p=False,
- duplicate=False,
- out_of_scope=False,
- mitigated=None,
- impact="No impact provided",
- static_finding=True,
- dynamic_finding=False,
- component_name=dependency_name,
- component_version=dependency_version)
-
- if advisory['cve']:
- finding.unsaved_vulnerability_ids = [advisory['cve']]
+ finding = Finding(
+ title=dependency_name
+ + " - "
+ + "("
+ + dependency_version
+ + ", "
+ + advisory["cve"]
+ + ")",
+ test=test,
+ # TODO decide how to handle the fact we don't have a severity. None
+ # will lead to problems handling minimum severity on import
+ severity="Info",
+ description=advisory["title"],
+ # TODO Decide if the default '1035: vulnerable 3rd party component' is
+ # OK to use?
+ cwe=1035,
+ mitigation="upgrade",
+ references=advisory["link"],
+ false_p=False,
+ duplicate=False,
+ out_of_scope=False,
+ mitigated=None,
+ impact="No impact provided",
+ static_finding=True,
+ dynamic_finding=False,
+ component_name=dependency_name,
+ component_version=dependency_version,
+ )
+
+ if advisory["cve"]:
+ finding.unsaved_vulnerability_ids = [advisory["cve"]]
return finding
diff --git a/dojo/tools/pip_audit/parser.py b/dojo/tools/pip_audit/parser.py
index 7c2871a05c..726667987f 100644
--- a/dojo/tools/pip_audit/parser.py
+++ b/dojo/tools/pip_audit/parser.py
@@ -4,7 +4,6 @@
class PipAuditParser:
-
def get_scan_types(self):
return ["pip-audit Scan"]
@@ -18,39 +17,40 @@ def requires_file(self, scan_type):
return True
def get_findings(self, scan_file, test):
-
data = json.load(scan_file)
findings = list()
for item in data:
- vulnerabilities = item.get('vulns', [])
+ vulnerabilities = item.get("vulns", [])
if vulnerabilities:
- component_name = item['name']
- component_version = item.get('version')
+ component_name = item["name"]
+ component_version = item.get("version")
for vulnerability in vulnerabilities:
- vuln_id = vulnerability.get('id')
- vuln_fix_versions = vulnerability.get('fix_versions')
- vuln_description = vulnerability.get('description')
+ vuln_id = vulnerability.get("id")
+ vuln_fix_versions = vulnerability.get("fix_versions")
+ vuln_description = vulnerability.get("description")
- title = f'{vuln_id} in {component_name}:{component_version}'
+ title = (
+ f"{vuln_id} in {component_name}:{component_version}"
+ )
- description = ''
+ description = ""
description += vuln_description
mitigation = None
if vuln_fix_versions:
- mitigation = 'Upgrade to version:'
+ mitigation = "Upgrade to version:"
if len(vuln_fix_versions) == 1:
- mitigation += f' {vuln_fix_versions[0]}'
+ mitigation += f" {vuln_fix_versions[0]}"
else:
for fix_version in vuln_fix_versions:
- mitigation += f'\n- {fix_version}'
+ mitigation += f"\n- {fix_version}"
finding = Finding(
test=test,
title=title,
cwe=1352,
- severity='Medium',
+ severity="Medium",
description=description,
mitigation=mitigation,
component_name=component_name,
diff --git a/dojo/tools/pmd/parser.py b/dojo/tools/pmd/parser.py
index 22296ebe8c..d3f8c5eda2 100644
--- a/dojo/tools/pmd/parser.py
+++ b/dojo/tools/pmd/parser.py
@@ -5,7 +5,6 @@
class PmdParser(object):
-
def get_scan_types(self):
return ["PMD Scan"]
@@ -19,9 +18,11 @@ def get_findings(self, filename, test):
dupes = dict()
content = filename.read()
- if type(content) is bytes:
- content = content.decode('utf-8')
- reader = list(csv.DictReader(io.StringIO(content), delimiter=',', quotechar='"'))
+ if isinstance(content, bytes):
+ content = content.decode("utf-8")
+ reader = list(
+ csv.DictReader(io.StringIO(content), delimiter=",", quotechar='"')
+ )
for row in reader:
finding = Finding(test=test)
@@ -40,7 +41,9 @@ def get_findings(self, filename, test):
priority = "Info"
finding.severity = priority
- description = "Description: {}\n".format(row['Description'].strip())
+ description = "Description: {}\n".format(
+ row["Description"].strip()
+ )
description += "Rule set: {}\n".format(row["Rule set"].strip())
description += "Problem: {}\n".format(row["Problem"].strip())
description += "Package: {}\n".format(row["Package"].strip())
@@ -50,12 +53,16 @@ def get_findings(self, filename, test):
finding.impact = "No impact provided"
finding.mitigation = "No mitigation provided"
- key = hashlib.sha256("|".join([
- finding.title,
- finding.description,
- finding.file_path,
- finding.line
- ]).encode("utf-8")).hexdigest()
+ key = hashlib.sha256(
+ "|".join(
+ [
+ finding.title,
+ finding.description,
+ finding.file_path,
+ finding.line,
+ ]
+ ).encode("utf-8")
+ ).hexdigest()
if key not in dupes:
dupes[key] = finding
diff --git a/dojo/tools/popeye/parser.py b/dojo/tools/popeye/parser.py
index 6c49a27fb5..67e176a911 100644
--- a/dojo/tools/popeye/parser.py
+++ b/dojo/tools/popeye/parser.py
@@ -22,20 +22,41 @@ def get_findings(self, file, test):
data = json.load(file)
dupes = dict()
- for sanitizer in data['popeye']['sanitizers']:
- issues = sanitizer.get('issues')
+ for sanitizer in data["popeye"]["sanitizers"]:
+ issues = sanitizer.get("issues")
if issues:
for issue_group, issue_list in issues.items():
for issue in issue_list:
- if issue['level'] != 0:
- title = sanitizer['sanitizer'] + " " + issue_group + " " + issue['message']
- severity = self.get_defect_dojo_severity(issue['level'])
- description = "**Sanitizer** : " + sanitizer['sanitizer'] + "\n\n" + \
- "**Resource** : " + issue_group + "\n\n" + \
- "**Group** : " + issue['group'] + "\n\n" + \
- "**Severity** : " + self.get_popeye_level_string(issue['level']) + "\n\n" + \
- "**Message** : " + issue['message']
- vuln_id_from_tool = re.search(r'\[(POP-\d+)\].+', issue['message']).group(1)
+ if issue["level"] != 0:
+ title = (
+ sanitizer["sanitizer"]
+ + " "
+ + issue_group
+ + " "
+ + issue["message"]
+ )
+ severity = self.get_defect_dojo_severity(
+ issue["level"]
+ )
+ description = (
+ "**Sanitizer** : "
+ + sanitizer["sanitizer"]
+ + "\n\n"
+ + "**Resource** : "
+ + issue_group
+ + "\n\n"
+ + "**Group** : "
+ + issue["group"]
+ + "\n\n"
+ + "**Severity** : "
+ + self.get_popeye_level_string(issue["level"])
+ + "\n\n"
+ + "**Message** : "
+ + issue["message"]
+ )
+ vuln_id_from_tool = re.search(
+ r"\[(POP-\d+)\].+", issue["message"]
+ ).group(1)
finding = Finding(
title=title,
test=test,
@@ -46,7 +67,9 @@ def get_findings(self, file, test):
vuln_id_from_tool=vuln_id_from_tool,
)
# internal de-duplication
- dupe_key = hashlib.sha256(str(description + title).encode('utf-8')).hexdigest()
+ dupe_key = hashlib.sha256(
+ str(description + title).encode("utf-8")
+ ).hexdigest()
if dupe_key not in dupes:
dupes[dupe_key] = finding
return list(dupes.values())
diff --git a/dojo/tools/pwn_sast/parser.py b/dojo/tools/pwn_sast/parser.py
index d25ebaff62..f86b8cbd2a 100644
--- a/dojo/tools/pwn_sast/parser.py
+++ b/dojo/tools/pwn_sast/parser.py
@@ -19,17 +19,16 @@ def get_description_for_scan_types(self, scan_type):
return "Import pwn_sast Driver findings in JSON format."
def get_findings(self, filename, test):
-
results = json.load(filename)
if results is not None:
- report_name = results.get("report_name")
+ results.get("report_name")
data_arr = results.get("data")
findings = {}
for data_hash in data_arr:
- timestamp = data_hash.get("timestamp")
+ data_hash.get("timestamp")
security_references = data_hash.get("security_references")
if security_references is not None:
@@ -54,37 +53,45 @@ def get_findings(self, filename, test):
offending_file = None
line_no_and_contents = data_hash.get("line_no_and_contents")
- test_case_filter = data_hash.get("test_case_filter")
- steps_to_reproduce = "\n".join([
- "Install pwn_sast Driver via: https://github.com/0dayinc/pwn#installation",
- "Execute the pwn_sast Driver via:",
- f"```pwn_sast --dir-path . --uri-source-root {git_repo_root_uri} -s```"
- ])
+ data_hash.get("test_case_filter")
+ steps_to_reproduce = "\n".join(
+ [
+ "Install pwn_sast Driver via: https://github.com/0dayinc/pwn#installation",
+ "Execute the pwn_sast Driver via:",
+ f"```pwn_sast --dir-path . --uri-source-root {git_repo_root_uri} -s```",
+ ]
+ )
for line in line_no_and_contents:
offending_uri = f"{git_repo_root_uri}/{offending_file}"
line_no = line.get("line_no")
contents = line.get("contents")
author = line.get("author")
- severity = 'Info'
- description = "\n".join([
- f"SAST Module: {sast_module}",
- f"Offending URI: {offending_uri}",
- f"Line: {line_no}",
- f"Committed By: {author}",
- "Line Contents:",
- f"```{contents}```"
- ])
-
- impact = "\n".join([
- f"Security Control Impacted: {section}",
- f"NIST 800-53 Security Control Details: {nist_800_53_uri}",
- f"CWE Details: {cwe_uri}"
- ])
-
- mitigation = "\n".join([
- f"NIST 800-53 Security Control Details / Mitigation Strategy: {nist_800_53_uri}",
- ])
+ severity = "Info"
+ description = "\n".join(
+ [
+ f"SAST Module: {sast_module}",
+ f"Offending URI: {offending_uri}",
+ f"Line: {line_no}",
+ f"Committed By: {author}",
+ "Line Contents:",
+ f"```{contents}```",
+ ]
+ )
+
+ impact = "\n".join(
+ [
+ f"Security Control Impacted: {section}",
+ f"NIST 800-53 Security Control Details: {nist_800_53_uri}",
+ f"CWE Details: {cwe_uri}",
+ ]
+ )
+
+ mitigation = "\n".join(
+ [
+ f"NIST 800-53 Security Control Details / Mitigation Strategy: {nist_800_53_uri}",
+ ]
+ )
unique_finding_key = hashlib.sha256(
(offending_uri + contents).encode("utf-8")
@@ -106,7 +113,7 @@ def get_findings(self, filename, test):
cwe=cwe_id,
nb_occurences=1,
steps_to_reproduce=steps_to_reproduce,
- file_path=offending_file
+ file_path=offending_file,
)
findings[unique_finding_key] = finding
diff --git a/dojo/tools/qualys/csv_parser.py b/dojo/tools/qualys/csv_parser.py
index e7377153dd..e210c7aea9 100644
--- a/dojo/tools/qualys/csv_parser.py
+++ b/dojo/tools/qualys/csv_parser.py
@@ -19,11 +19,9 @@ def parse_csv(csv_file) -> [Finding]:
content = csv_file.read()
if isinstance(content, bytes):
- content = content.decode('utf-8')
+ content = content.decode("utf-8")
csv_reader = csv.DictReader(
- io.StringIO(content),
- delimiter=',',
- quotechar='"'
+ io.StringIO(content), delimiter=",", quotechar='"'
)
report_findings = get_report_findings(csv_reader)
@@ -45,7 +43,7 @@ def get_report_findings(csv_reader) -> [dict]:
report_findings = []
for row in csv_reader:
- if row.get('Title') and row['Title'] != 'Title':
+ if row.get("Title") and row["Title"] != "Title":
report_findings.append(row)
return report_findings
@@ -64,27 +62,31 @@ def _extract_cvss_vectors(cvss_base, cvss_temporal):
A CVSS3 Vector including both Base and Temporal if available
"""
- vector_pattern = r'^\d{1,2}.\d \((.*)\)'
- cvss_vector = 'CVSS:3.0/'
+ vector_pattern = r"^\d{1,2}.\d \((.*)\)"
+ cvss_vector = "CVSS:3.0/"
if cvss_base:
try:
cvss_vector += re.search(vector_pattern, cvss_base).group(1)
except IndexError:
- _logger.error(f'CVSS3 Base Vector not found in {cvss_base}')
+ _logger.error(f"CVSS3 Base Vector not found in {cvss_base}")
except AttributeError:
- _logger.error(f'CVSS3 Base Vector not found in {cvss_base}')
+ _logger.error(f"CVSS3 Base Vector not found in {cvss_base}")
if cvss_temporal:
try:
- cvss_temporal_vector = re.search(vector_pattern, cvss_temporal).group(1)
- cvss_vector += '/'
+ cvss_temporal_vector = re.search(
+ vector_pattern, cvss_temporal
+ ).group(1)
+ cvss_vector += "/"
cvss_vector += cvss_temporal_vector
except IndexError:
_logger.error(
- f'CVSS3 Temporal Vector not found in {cvss_base}')
+ f"CVSS3 Temporal Vector not found in {cvss_base}"
+ )
except AttributeError:
_logger.error(
- f'CVSS3 Temporal Vector not found in {cvss_base}')
+ f"CVSS3 Temporal Vector not found in {cvss_base}"
+ )
return cvss_vector
@@ -98,42 +100,55 @@ def build_findings_from_dict(report_findings: [dict]) -> [Finding]:
"""
severity_lookup = {
- '1': 'Info',
- '2': 'Low',
- '3': 'Medium',
- '4': 'High',
- '5': 'Critical'}
+ "1": "Info",
+ "2": "Low",
+ "3": "Medium",
+ "4": "High",
+ "5": "Critical",
+ }
dojo_findings = []
for report_finding in report_findings:
- if report_finding.get('FQDN'):
- endpoint = Endpoint.from_uri(report_finding.get('FQDN'))
+ if report_finding.get("FQDN"):
+ endpoint = Endpoint.from_uri(report_finding.get("FQDN"))
else:
- endpoint = Endpoint(host=report_finding['IP'])
+ endpoint = Endpoint(host=report_finding["IP"])
finding = Finding(
title=f"QID-{report_finding['QID']} | {report_finding['Title']}",
- mitigation=report_finding['Solution'],
+ mitigation=report_finding["Solution"],
description=f"{report_finding['Threat']}\nResult Evidence: \n{report_finding.get('Threat', 'Not available')}",
- severity=severity_lookup.get(report_finding['Severity'], 'Info'),
- impact=report_finding['Impact'],
- date=datetime.strptime(report_finding['Last Detected'], "%m/%d/%Y %H:%M:%S").date(),
- vuln_id_from_tool=report_finding['QID'],
+ severity=severity_lookup.get(report_finding["Severity"], "Info"),
+ impact=report_finding["Impact"],
+ date=datetime.strptime(
+ report_finding["Last Detected"], "%m/%d/%Y %H:%M:%S"
+ ).date(),
+ vuln_id_from_tool=report_finding["QID"],
cvssv3=_extract_cvss_vectors(
- report_finding['CVSS3 Base'],
- report_finding['CVSS3 Temporal']))
-
- cve_data = report_finding.get('CVE ID')
- finding.unsaved_vulnerability_ids = cve_data.split(',') if ',' in cve_data else [cve_data]
-
- # Qualys reports regression findings as active, but with a Date Last Fixed.
- if report_finding['Date Last Fixed']:
- finding.mitigated = datetime.strptime(report_finding['Date Last Fixed'], "%m/%d/%Y %H:%M:%S")
+ report_finding["CVSS3 Base"], report_finding["CVSS3 Temporal"]
+ ),
+ )
+
+ cve_data = report_finding.get("CVE ID")
+ finding.unsaved_vulnerability_ids = (
+ cve_data.split(",") if "," in cve_data else [cve_data]
+ )
+
+ # Qualys reports regression findings as active, but with a Date Last
+ # Fixed.
+ if report_finding["Date Last Fixed"]:
+ finding.mitigated = datetime.strptime(
+ report_finding["Date Last Fixed"], "%m/%d/%Y %H:%M:%S"
+ )
finding.is_mitigated = True
else:
finding.is_mitigated = False
- finding.active = report_finding['Vuln Status'] in ('Active', 'Re-Opened', 'New')
+ finding.active = report_finding["Vuln Status"] in (
+ "Active",
+ "Re-Opened",
+ "New",
+ )
if finding.active:
finding.mitigated = None
diff --git a/dojo/tools/qualys/parser.py b/dojo/tools/qualys/parser.py
index a757cb4733..d86c7f4c50 100644
--- a/dojo/tools/qualys/parser.py
+++ b/dojo/tools/qualys/parser.py
@@ -10,41 +10,43 @@
logger = logging.getLogger(__name__)
-CUSTOM_HEADERS = {'CVSS_score': 'CVSS Score',
- 'ip_address': 'IP Address',
- 'fqdn': 'FQDN',
- 'os': 'OS',
- 'port_status': 'Port',
- 'vuln_name': 'Vulnerability',
- 'vuln_description': 'Description',
- 'solution': 'Solution',
- 'links': 'Links',
- 'cve': 'CVE',
- 'vuln_severity': 'Severity',
- 'QID': 'QID',
- 'first_found': 'First Found',
- 'last_found': 'Last Found',
- 'found_times': 'Found Times',
- 'category': 'Category'
- }
-
-REPORT_HEADERS = ['CVSS_score',
- 'ip_address',
- 'fqdn',
- 'os',
- 'port_status',
- 'vuln_name',
- 'vuln_description',
- 'solution',
- 'links',
- 'cve',
- 'Severity',
- 'QID',
- 'first_found',
- 'last_found',
- 'found_times',
- 'category',
- ]
+CUSTOM_HEADERS = {
+ "CVSS_score": "CVSS Score",
+ "ip_address": "IP Address",
+ "fqdn": "FQDN",
+ "os": "OS",
+ "port_status": "Port",
+ "vuln_name": "Vulnerability",
+ "vuln_description": "Description",
+ "solution": "Solution",
+ "links": "Links",
+ "cve": "CVE",
+ "vuln_severity": "Severity",
+ "QID": "QID",
+ "first_found": "First Found",
+ "last_found": "Last Found",
+ "found_times": "Found Times",
+ "category": "Category",
+}
+
+REPORT_HEADERS = [
+ "CVSS_score",
+ "ip_address",
+ "fqdn",
+ "os",
+ "port_status",
+ "vuln_name",
+ "vuln_description",
+ "solution",
+ "links",
+ "cve",
+ "Severity",
+ "QID",
+ "first_found",
+ "last_found",
+ "found_times",
+ "category",
+]
def htmltext(blob):
@@ -59,11 +61,13 @@ def split_cvss(value, _temp):
return
if len(value) > 4:
split = value.split(" (")
- _temp['CVSS_value'] = float(split[0])
+ _temp["CVSS_value"] = float(split[0])
# remove ")" at the end
- _temp['CVSS_vector'] = CVSS3("CVSS:3.0/" + split[1][:-1]).clean_vector()
+ _temp["CVSS_vector"] = CVSS3(
+ "CVSS:3.0/" + split[1][:-1]
+ ).clean_vector()
else:
- _temp['CVSS_value'] = float(value)
+ _temp["CVSS_value"] = float(value)
def parse_finding(host, tree):
@@ -71,158 +75,173 @@ def parse_finding(host, tree):
issue_row = {}
# IP ADDRESS
- issue_row['ip_address'] = host.findtext('IP')
+ issue_row["ip_address"] = host.findtext("IP")
# FQDN
- issue_row['fqdn'] = host.findtext('DNS')
+ issue_row["fqdn"] = host.findtext("DNS")
# Create Endpoint
- if issue_row['fqdn']:
- ep = Endpoint(host=issue_row['fqdn'])
+ if issue_row["fqdn"]:
+ ep = Endpoint(host=issue_row["fqdn"])
else:
- ep = Endpoint(host=issue_row['ip_address'])
+ ep = Endpoint(host=issue_row["ip_address"])
# OS NAME
- issue_row['os'] = host.findtext('OPERATING_SYSTEM')
+ issue_row["os"] = host.findtext("OPERATING_SYSTEM")
# Scan details
- for vuln_details in host.iterfind('VULN_INFO_LIST/VULN_INFO'):
+ for vuln_details in host.iterfind("VULN_INFO_LIST/VULN_INFO"):
_temp = issue_row
# Port
- _gid = vuln_details.find('QID').attrib['id']
- _port = vuln_details.findtext('PORT')
- _temp['port_status'] = _port
-
- _category = str(vuln_details.findtext('CATEGORY'))
- _result = str(vuln_details.findtext('RESULT'))
- _first_found = str(vuln_details.findtext('FIRST_FOUND'))
- _last_found = str(vuln_details.findtext('LAST_FOUND'))
- _times_found = str(vuln_details.findtext('TIMES_FOUND'))
-
- _temp['date'] = datetime.datetime.strptime(vuln_details.findtext('LAST_FOUND'), "%Y-%m-%dT%H:%M:%SZ").date()
+ _gid = vuln_details.find("QID").attrib["id"]
+ _port = vuln_details.findtext("PORT")
+ _temp["port_status"] = _port
+
+ _category = str(vuln_details.findtext("CATEGORY"))
+ _result = str(vuln_details.findtext("RESULT"))
+ _first_found = str(vuln_details.findtext("FIRST_FOUND"))
+ _last_found = str(vuln_details.findtext("LAST_FOUND"))
+ _times_found = str(vuln_details.findtext("TIMES_FOUND"))
+
+ _temp["date"] = datetime.datetime.strptime(
+ vuln_details.findtext("LAST_FOUND"), "%Y-%m-%dT%H:%M:%SZ"
+ ).date()
# Vuln_status
- status = vuln_details.findtext('VULN_STATUS')
+ status = vuln_details.findtext("VULN_STATUS")
if status == "Active" or status == "Re-Opened" or status == "New":
- _temp['active'] = True
- _temp['mitigated'] = False
- _temp['mitigation_date'] = None
+ _temp["active"] = True
+ _temp["mitigated"] = False
+ _temp["mitigation_date"] = None
else:
- _temp['active'] = False
- _temp['mitigated'] = True
- last_fixed = vuln_details.findtext('LAST_FIXED')
+ _temp["active"] = False
+ _temp["mitigated"] = True
+ last_fixed = vuln_details.findtext("LAST_FIXED")
if last_fixed is not None:
- _temp['mitigation_date'] = datetime.datetime.strptime(last_fixed, "%Y-%m-%dT%H:%M:%SZ").date()
+ _temp["mitigation_date"] = datetime.datetime.strptime(
+ last_fixed, "%Y-%m-%dT%H:%M:%SZ"
+ ).date()
else:
- _temp['mitigation_date'] = None
+ _temp["mitigation_date"] = None
# read cvss value if present
- cvss3 = vuln_details.findtext('CVSS3_FINAL')
+ cvss3 = vuln_details.findtext("CVSS3_FINAL")
if cvss3 is not None and cvss3 != "-":
split_cvss(cvss3, _temp)
else:
- cvss2 = vuln_details.findtext('CVSS_FINAL')
+ cvss2 = vuln_details.findtext("CVSS_FINAL")
if cvss2 is not None and cvss2 != "-":
split_cvss(cvss2, _temp)
# DefectDojo does not support cvssv2
- _temp['CVSS_vector'] = None
+ _temp["CVSS_vector"] = None
- search = ".//GLOSSARY/VULN_DETAILS_LIST/VULN_DETAILS[@id='{}']".format(_gid)
+ search = ".//GLOSSARY/VULN_DETAILS_LIST/VULN_DETAILS[@id='{}']".format(
+ _gid
+ )
vuln_item = tree.find(search)
if vuln_item is not None:
finding = Finding()
# Vuln name
- _temp['vuln_name'] = vuln_item.findtext('TITLE')
+ _temp["vuln_name"] = vuln_item.findtext("TITLE")
# Vuln Description
- _description = str(vuln_item.findtext('THREAT'))
+ _description = str(vuln_item.findtext("THREAT"))
# Solution Strips Heading Workaround(s)
# _temp['solution'] = re.sub('Workaround(s)?:.+\n', '', htmltext(vuln_item.findtext('SOLUTION')))
- _temp['solution'] = htmltext(vuln_item.findtext('SOLUTION'))
+ _temp["solution"] = htmltext(vuln_item.findtext("SOLUTION"))
# Vuln_description
- _temp['vuln_description'] = "\n".join([htmltext(_description),
- htmltext("Category: " + _category),
- htmltext("QID: " + str(_gid)),
- htmltext("Port: " + str(_port)),
- htmltext("Result Evidence: " + _result),
- htmltext("First Found: " + _first_found),
- htmltext("Last Found: " + _last_found),
- htmltext("Times Found: " + _times_found),
- ])
+ _temp["vuln_description"] = "\n".join(
+ [
+ htmltext(_description),
+ htmltext("Category: " + _category),
+ htmltext("QID: " + str(_gid)),
+ htmltext("Port: " + str(_port)),
+ htmltext("Result Evidence: " + _result),
+ htmltext("First Found: " + _first_found),
+ htmltext("Last Found: " + _last_found),
+ htmltext("Times Found: " + _times_found),
+ ]
+ )
# Impact description
- _temp['IMPACT'] = htmltext(vuln_item.findtext('IMPACT'))
+ _temp["IMPACT"] = htmltext(vuln_item.findtext("IMPACT"))
# read cvss value if present and not already read from vuln
- if _temp.get('CVSS_value') is None:
- cvss3 = vuln_item.findtext('CVSS3_SCORE/CVSS3_BASE')
- cvss2 = vuln_item.findtext('CVSS_SCORE/CVSS_BASE')
+ if _temp.get("CVSS_value") is None:
+ cvss3 = vuln_item.findtext("CVSS3_SCORE/CVSS3_BASE")
+ cvss2 = vuln_item.findtext("CVSS_SCORE/CVSS_BASE")
if cvss3 is not None and cvss3 != "-":
split_cvss(cvss3, _temp)
else:
- cvss2 = vuln_item.findtext('CVSS_FINAL')
+ cvss2 = vuln_item.findtext("CVSS_FINAL")
if cvss2 is not None and cvss2 != "-":
split_cvss(cvss2, _temp)
# DefectDojo does not support cvssv2
- _temp['CVSS_vector'] = None
+ _temp["CVSS_vector"] = None
# CVE and LINKS
- _temp_cve_details = vuln_item.iterfind('CVE_ID_LIST/CVE_ID')
+ _temp_cve_details = vuln_item.iterfind("CVE_ID_LIST/CVE_ID")
if _temp_cve_details:
- _cl = {cve_detail.findtext('ID'): cve_detail.findtext('URL') for cve_detail in _temp_cve_details}
- _temp['cve'] = "\n".join(list(_cl.keys()))
- _temp['links'] = "\n".join(list(_cl.values()))
+ _cl = {
+ cve_detail.findtext("ID"): cve_detail.findtext("URL")
+ for cve_detail in _temp_cve_details
+ }
+ _temp["cve"] = "\n".join(list(_cl.keys()))
+ _temp["links"] = "\n".join(list(_cl.values()))
# The CVE in Qualys report might not have a CVSS score, so findings are informational by default
- # unless we can find map to a Severity OR a CVSS score from the findings detail.
+ # unless we can find map to a Severity OR a CVSS score from the
+ # findings detail.
sev = None
- if _temp.get('CVSS_value') is not None and _temp['CVSS_value'] > 0:
- if 0.1 <= float(_temp['CVSS_value']) <= 3.9:
- sev = 'Low'
- elif 4.0 <= float(_temp['CVSS_value']) <= 6.9:
- sev = 'Medium'
- elif 7.0 <= float(_temp['CVSS_value']) <= 8.9:
- sev = 'High'
- elif float(_temp['CVSS_value']) >= 9.0:
- sev = 'Critical'
- elif vuln_item.findtext('SEVERITY') is not None:
- if int(vuln_item.findtext('SEVERITY')) == 1:
- sev = 'Informational'
- elif int(vuln_item.findtext('SEVERITY')) == 2:
- sev = 'Low'
- elif int(vuln_item.findtext('SEVERITY')) == 3:
- sev = 'Medium'
- elif int(vuln_item.findtext('SEVERITY')) == 4:
- sev = 'High'
- elif int(vuln_item.findtext('SEVERITY')) == 5:
- sev = 'Critical'
+ if _temp.get("CVSS_value") is not None and _temp["CVSS_value"] > 0:
+ if 0.1 <= float(_temp["CVSS_value"]) <= 3.9:
+ sev = "Low"
+ elif 4.0 <= float(_temp["CVSS_value"]) <= 6.9:
+ sev = "Medium"
+ elif 7.0 <= float(_temp["CVSS_value"]) <= 8.9:
+ sev = "High"
+ elif float(_temp["CVSS_value"]) >= 9.0:
+ sev = "Critical"
+ elif vuln_item.findtext("SEVERITY") is not None:
+ if int(vuln_item.findtext("SEVERITY")) == 1:
+ sev = "Informational"
+ elif int(vuln_item.findtext("SEVERITY")) == 2:
+ sev = "Low"
+ elif int(vuln_item.findtext("SEVERITY")) == 3:
+ sev = "Medium"
+ elif int(vuln_item.findtext("SEVERITY")) == 4:
+ sev = "High"
+ elif int(vuln_item.findtext("SEVERITY")) == 5:
+ sev = "Critical"
elif sev is None:
- sev = 'Informational'
+ sev = "Informational"
finding = None
if _temp_cve_details:
refs = "\n".join(list(_cl.values()))
- finding = Finding(title="QID-" + _gid[4:] + " | " + _temp['vuln_name'],
- mitigation=_temp['solution'],
- description=_temp['vuln_description'],
- severity=sev,
- references=refs,
- impact=_temp['IMPACT'],
- date=_temp['date'],
- vuln_id_from_tool=_gid,
- )
+ finding = Finding(
+ title="QID-" + _gid[4:] + " | " + _temp["vuln_name"],
+ mitigation=_temp["solution"],
+ description=_temp["vuln_description"],
+ severity=sev,
+ references=refs,
+ impact=_temp["IMPACT"],
+ date=_temp["date"],
+ vuln_id_from_tool=_gid,
+ )
else:
- finding = Finding(title="QID-" + _gid[4:] + " | " + _temp['vuln_name'],
- mitigation=_temp['solution'],
- description=_temp['vuln_description'],
- severity=sev,
- references=_gid,
- impact=_temp['IMPACT'],
- date=_temp['date'],
- vuln_id_from_tool=_gid,
- )
- finding.mitigated = _temp['mitigation_date']
- finding.is_mitigated = _temp['mitigated']
- finding.active = _temp['active']
- if _temp.get('CVSS_vector') is not None:
- finding.cvssv3 = _temp.get('CVSS_vector')
+ finding = Finding(
+ title="QID-" + _gid[4:] + " | " + _temp["vuln_name"],
+ mitigation=_temp["solution"],
+ description=_temp["vuln_description"],
+ severity=sev,
+ references=_gid,
+ impact=_temp["IMPACT"],
+ date=_temp["date"],
+ vuln_id_from_tool=_gid,
+ )
+ finding.mitigated = _temp["mitigation_date"]
+ finding.is_mitigated = _temp["mitigated"]
+ finding.active = _temp["active"]
+ if _temp.get("CVSS_vector") is not None:
+ finding.cvssv3 = _temp.get("CVSS_vector")
finding.verified = True
finding.unsaved_endpoints = list()
finding.unsaved_endpoints.append(ep)
@@ -233,7 +252,7 @@ def parse_finding(host, tree):
def qualys_parser(qualys_xml_file):
parser = etree.XMLParser()
tree = etree.parse(qualys_xml_file, parser)
- host_list = tree.find('HOST_LIST')
+ host_list = tree.find("HOST_LIST")
finding_list = []
if host_list is not None:
for host in host_list:
@@ -242,7 +261,6 @@ def qualys_parser(qualys_xml_file):
class QualysParser(object):
-
def get_scan_types(self):
return ["Qualys Scan"]
@@ -253,7 +271,7 @@ def get_description_for_scan_types(self, scan_type):
return "Qualys WebGUI output files can be imported in XML format."
def get_findings(self, file, test):
- if file.name.lower().endswith('.csv'):
+ if file.name.lower().endswith(".csv"):
return csv_parser.parse_csv(file)
else:
return qualys_parser(file)
diff --git a/dojo/tools/qualys_infrascan_webgui/parser.py b/dojo/tools/qualys_infrascan_webgui/parser.py
index 29c16742e6..e60084619a 100644
--- a/dojo/tools/qualys_infrascan_webgui/parser.py
+++ b/dojo/tools/qualys_infrascan_webgui/parser.py
@@ -21,76 +21,89 @@ def issue_r(raw_row, vuln, scan_date):
issue_row = {}
# IP ADDRESS
- issue_row['ip_address'] = raw_row.get('value')
+ issue_row["ip_address"] = raw_row.get("value")
# FQDN
- issue_row['fqdn'] = raw_row.get('name')
- if issue_row['fqdn'] == "No registered hostname":
- issue_row['fqdn'] = None
+ issue_row["fqdn"] = raw_row.get("name")
+ if issue_row["fqdn"] == "No registered hostname":
+ issue_row["fqdn"] = None
# port
- _port = raw_row.get('port')
+ _port = raw_row.get("port")
# Create Endpoint
- if issue_row['fqdn']:
- ep = Endpoint(host=issue_row['fqdn'])
+ if issue_row["fqdn"]:
+ ep = Endpoint(host=issue_row["fqdn"])
else:
- ep = Endpoint(host=issue_row['ip_address'])
+ ep = Endpoint(host=issue_row["ip_address"])
# OS NAME
- issue_row['os'] = raw_row.findtext('OS')
+ issue_row["os"] = raw_row.findtext("OS")
- # Scan details - VULNS//VULN indicates we only care about confirmed vulnerabilities
- for vuln_cat in raw_row.findall('VULNS/CAT'):
- _category = str(vuln_cat.get('value'))
- for vuln_details in vuln_cat.findall('VULN'):
+ # Scan details - VULNS//VULN indicates we only care about confirmed
+ # vulnerabilities
+ for vuln_cat in raw_row.findall("VULNS/CAT"):
+ _category = str(vuln_cat.get("value"))
+ for vuln_details in vuln_cat.findall("VULN"):
_temp = issue_row
- _gid = vuln_details.get('number')
+ _gid = vuln_details.get("number")
- _temp['port_status'] = _port
+ _temp["port_status"] = _port
- _result = str(vuln_details.findtext('RESULT'))
+ _result = str(vuln_details.findtext("RESULT"))
# Vuln name
- _temp['vuln_name'] = vuln_details.findtext('TITLE')
+ _temp["vuln_name"] = vuln_details.findtext("TITLE")
# Vuln Description
- _description = str(vuln_details.findtext('DIAGNOSIS'))
+ _description = str(vuln_details.findtext("DIAGNOSIS"))
# Solution Strips Heading Workaround(s)
- _temp['solution'] = htmltext(str(vuln_details.findtext('SOLUTION')))
+ _temp["solution"] = htmltext(
+ str(vuln_details.findtext("SOLUTION"))
+ )
# Vuln_description
- _temp['vuln_description'] = "\n".join([htmltext(_description),
- htmltext("**Category:** " + _category),
- htmltext("**QID:** " + str(_gid)),
- htmltext("**Port:** " + str(_port)),
- htmltext("**Result Evidence:** " + _result),
- ])
+ _temp["vuln_description"] = "\n".join(
+ [
+ htmltext(_description),
+ htmltext("**Category:** " + _category),
+ htmltext("**QID:** " + str(_gid)),
+ htmltext("**Port:** " + str(_port)),
+ htmltext("**Result Evidence:** " + _result),
+ ]
+ )
# Impact description
- _temp['IMPACT'] = htmltext(str(vuln_details.findtext('CONSEQUENCE')))
+ _temp["IMPACT"] = htmltext(
+ str(vuln_details.findtext("CONSEQUENCE"))
+ )
# CVE and LINKS
_cl = []
- _temp_cve_details = vuln_details.iterfind('CVE_ID_LIST/CVE_ID')
+ _temp_cve_details = vuln_details.iterfind("CVE_ID_LIST/CVE_ID")
if _temp_cve_details:
- _cl = {cve_detail.findtext('ID'): cve_detail.findtext('URL') for cve_detail in _temp_cve_details}
- _temp['cve'] = "\n".join(list(_cl.keys()))
- _temp['links'] = "\n".join(list(_cl.values()))
+ _cl = {
+ cve_detail.findtext("ID"): cve_detail.findtext("URL")
+ for cve_detail in _temp_cve_details
+ }
+ _temp["cve"] = "\n".join(list(_cl.keys()))
+ _temp["links"] = "\n".join(list(_cl.values()))
# The CVE in Qualys report might not have a CVSS score, so findings are informational by default
- # unless we can find map to a Severity OR a CVSS score from the findings detail.
- sev = qualys_convert_severity(vuln_details.get('severity'))
+ # unless we can find map to a Severity OR a CVSS score from the
+ # findings detail.
+ sev = qualys_convert_severity(vuln_details.get("severity"))
refs = "\n".join(list(_cl.values()))
- finding = Finding(title=_temp['vuln_name'],
- mitigation=_temp['solution'],
- description=_temp['vuln_description'],
- severity=sev,
- references=refs,
- impact=_temp['IMPACT'],
- vuln_id_from_tool=_gid,
- date=scan_date,
- )
+ finding = Finding(
+ title=_temp["vuln_name"],
+ mitigation=_temp["solution"],
+ description=_temp["vuln_description"],
+ severity=sev,
+ references=refs,
+ impact=_temp["IMPACT"],
+ vuln_id_from_tool=_gid,
+ date=scan_date,
+ )
finding.unsaved_endpoints = list()
finding.unsaved_endpoints.append(ep)
ret_rows.append(finding)
@@ -99,22 +112,21 @@ def issue_r(raw_row, vuln, scan_date):
def qualys_convert_severity(raw_val):
val = str(raw_val).strip()
- if '1' == val:
- return 'Info'
- elif '2' == val:
- return 'Low'
- elif '3' == val:
- return 'Medium'
- elif '4' == val:
- return 'High'
- elif '5' == val:
- return 'Critical'
+ if "1" == val:
+ return "Info"
+ elif "2" == val:
+ return "Low"
+ elif "3" == val:
+ return "Medium"
+ elif "4" == val:
+ return "High"
+ elif "5" == val:
+ return "Critical"
else:
- return 'Info'
+ return "Info"
class QualysInfrascanWebguiParser(object):
-
def get_scan_types(self):
return ["Qualys Infrastructure Scan (WebGUI XML)"]
@@ -129,11 +141,11 @@ def get_findings(self, file, test):
# fetch scan date e.g.: 2020-01-30T09:45:41Z
scan_date = datetime.now()
- for i in data.findall('HEADER/KEY'):
- if i.get('value') == 'DATE':
+ for i in data.findall("HEADER/KEY"):
+ if i.get("value") == "DATE":
scan_date = parser.isoparse(i.text)
master_list = []
- for issue in data.findall('IP'):
+ for issue in data.findall("IP"):
master_list += issue_r(issue, data, scan_date)
return master_list
diff --git a/dojo/tools/qualys_webapp/parser.py b/dojo/tools/qualys_webapp/parser.py
index eca2abc335..48b3b52dfc 100644
--- a/dojo/tools/qualys_webapp/parser.py
+++ b/dojo/tools/qualys_webapp/parser.py
@@ -24,16 +24,12 @@
# Since Info findings are not recroded in the Confirmed Vulnerability or
# Potential Vulnerability categories, a severity of 1 is shown as low
# in the portal.
-SEVERITY_MATCH = ['Low',
- 'Low',
- 'Medium',
- 'High',
- 'Critical']
+SEVERITY_MATCH = ["Low", "Low", "Medium", "High", "Critical"]
def truncate_str(value: str, maxlen: int):
if len(value) > maxlen:
- return value[:maxlen - 12] + " (truncated)"
+ return value[: maxlen - 12] + " (truncated)"
return value
@@ -46,7 +42,19 @@ def get_cwe(cwe):
return 0
-def attach_unique_extras(endpoints, requests, responses, finding, date, qid, param, payload, unique_id, active_text, test):
+def attach_unique_extras(
+ endpoints,
+ requests,
+ responses,
+ finding,
+ date,
+ qid,
+ param,
+ payload,
+ unique_id,
+ active_text,
+ test,
+):
# finding should always be none, since unique ID's are being used
if finding is None:
finding = Finding()
@@ -73,24 +81,32 @@ def attach_unique_extras(endpoints, requests, responses, finding, date, qid, par
port = "" # Set port to empty string by default
# Split the returned network address into host and
try: # If there is port number attached to host address
- host, port = parsedUrl.netloc.split(':')
- except: # there's no port attached to address
+ host, port = parsedUrl.netloc.split(":")
+ except BaseException: # there's no port attached to address
host = parsedUrl.netloc
- finding.unsaved_endpoints.append(Endpoint(
- host=truncate_str(host, 500), port=port,
- path=truncate_str(path, 500),
- protocol=protocol,
- query=truncate_str(query, 1000), fragment=truncate_str(fragment, 500)))
+ finding.unsaved_endpoints.append(
+ Endpoint(
+ host=truncate_str(host, 500),
+ port=port,
+ path=truncate_str(path, 500),
+ protocol=protocol,
+ query=truncate_str(query, 1000),
+ fragment=truncate_str(fragment, 500),
+ )
+ )
for i in range(0, len(requests)):
- if requests[i] != '' or responses[i] != '':
- finding.unsaved_req_resp.append({"req": requests[i], "resp": responses[i]})
+ if requests[i] != "" or responses[i] != "":
+ finding.unsaved_req_resp.append(
+ {"req": requests[i], "resp": responses[i]}
+ )
if active_text is not None:
- if 'fixed' in active_text.lower():
+ if "fixed" in active_text.lower():
finding.active = False
- # TODO: may need to look up by finding ID and mark current finding as fixed
+ # TODO: may need to look up by finding ID and mark current finding
+ # as fixed
else:
finding.active = True
@@ -118,8 +134,10 @@ def attach_extras(endpoints, requests, responses, finding, date, qid, test):
finding.unsaved_endpoints.append(Endpoint.from_uri(endpoint))
for i in range(0, len(requests)):
- if requests[i] != '' or responses[i] != '':
- finding.unsaved_req_resp.append({"req": requests[i], "resp": responses[i]})
+ if requests[i] != "" or responses[i] != "":
+ finding.unsaved_req_resp.append(
+ {"req": requests[i], "resp": responses[i]}
+ )
return finding
@@ -128,23 +146,23 @@ def attach_extras(endpoints, requests, responses, finding, date, qid, test):
# found in the this section of the report
def get_request(request):
if request is not None:
- header = ''
- header += str(request.findtext('METHOD')) + ': '
- header += str(request.findtext('URL')) + '\n'
- headers = request.find('HEADERS')
+ header = ""
+ header += str(request.findtext("METHOD")) + ": "
+ header += str(request.findtext("URL")) + "\n"
+ headers = request.find("HEADERS")
if headers is not None:
- for head in headers.iter('HEADER'):
- header += str(head.findtext('key')) + ': '
- header += str(head.findtext('value')) + '\n'
+ for head in headers.iter("HEADER"):
+ header += str(head.findtext("key")) + ": "
+ header += str(head.findtext("value")) + "\n"
return str(header)
- return ''
+ return ""
# Build a response string
def get_response(response):
if response is not None:
- return decode_tag(response.find('CONTENTS'))
- return ''
+ return decode_tag(response.find("CONTENTS"))
+ return ""
# Decode an XML tag with base64 if the tag has base64=true set.
@@ -162,127 +180,152 @@ def decode_tag(tag):
def get_request_response(payloads):
requests = []
responses = []
- for payload in payloads.iter('PAYLOAD'):
- requests.append(get_request(payload.find('REQUEST')))
- responses.append(get_response(payload.find('RESPONSE')))
+ for payload in payloads.iter("PAYLOAD"):
+ requests.append(get_request(payload.find("REQUEST")))
+ responses.append(get_response(payload.find("RESPONSE")))
return [requests, responses]
-def get_unique_vulnerabilities(vulnerabilities, test, is_info=False, is_app_report=False):
+def get_unique_vulnerabilities(
+ vulnerabilities, test, is_info=False, is_app_report=False
+):
findings = {}
# Iterate through all vulnerabilites to pull necessary info
for vuln in vulnerabilities:
urls = []
- requests = response = ''
- qid = int(vuln.findtext('QID'))
- url = vuln.findtext('URL')
+ requests = response = ""
+ qid = int(vuln.findtext("QID"))
+ url = vuln.findtext("URL")
if url is not None:
urls.append(str(url))
- access_path = vuln.find('ACCESS_PATH')
+ access_path = vuln.find("ACCESS_PATH")
if access_path is not None:
- urls += [url.text for url in access_path.iter('URL')]
- payloads = vuln.find('PAYLOADS')
+ urls += [url.text for url in access_path.iter("URL")]
+ payloads = vuln.find("PAYLOADS")
if payloads is not None:
req_resps = get_request_response(payloads)
else:
req_resps = [[], []]
if is_info:
- raw_finding_date = vuln.findtext('LAST_TIME_DETECTED')
+ raw_finding_date = vuln.findtext("LAST_TIME_DETECTED")
elif is_app_report:
- raw_finding_date = vuln.findtext('FIRST_TIME_DETECTED')
+ raw_finding_date = vuln.findtext("FIRST_TIME_DETECTED")
else:
- raw_finding_date = vuln.findtext('DETECTION_DATE')
+ raw_finding_date = vuln.findtext("DETECTION_DATE")
# Qualys uses a non-standard date format.
if raw_finding_date is not None:
if raw_finding_date.endswith("GMT"):
- finding_date = datetime.strptime(raw_finding_date, "%d %b %Y %I:%M%p GMT")
+ finding_date = datetime.strptime(
+ raw_finding_date, "%d %b %Y %I:%M%p GMT"
+ )
else:
- finding_date = datetime.strptime(raw_finding_date, "%d %b %Y %I:%M%p GMT%z")
+ finding_date = datetime.strptime(
+ raw_finding_date, "%d %b %Y %I:%M%p GMT%z"
+ )
else:
finding_date = None
# Updating to include customized values
- unique_id = vuln.findtext('UNIQUE_ID')
- active_text = vuln.findtext('STATUS')
+ unique_id = vuln.findtext("UNIQUE_ID")
+ active_text = vuln.findtext("STATUS")
param = None
payload = None
if not is_info:
- param = vuln.findtext('PARAM')
- payload = vuln.findtext('PAYLOADS/PAYLOAD/PAYLOAD')
-
- findings[unique_id] = attach_unique_extras(urls, req_resps[0], req_resps[1], None, finding_date, qid, param, payload,
- unique_id, active_text, test)
+ param = vuln.findtext("PARAM")
+ payload = vuln.findtext("PAYLOADS/PAYLOAD/PAYLOAD")
+
+ findings[unique_id] = attach_unique_extras(
+ urls,
+ req_resps[0],
+ req_resps[1],
+ None,
+ finding_date,
+ qid,
+ param,
+ payload,
+ unique_id,
+ active_text,
+ test,
+ )
return findings
# Traverse and retreive any information in the VULNERABILITY_LIST
# section of the report. This includes all endpoints and request/response pairs
-def get_vulnerabilities(vulnerabilities, test, is_info=False, is_app_report=False):
+def get_vulnerabilities(
+ vulnerabilities, test, is_info=False, is_app_report=False
+):
findings = {}
# Iterate through all vulnerabilites to pull necessary info
for vuln in vulnerabilities:
urls = []
- requests = response = ''
- qid = int(vuln.findtext('QID'))
- url = vuln.findtext('URL')
+ requests = response = ""
+ qid = int(vuln.findtext("QID"))
+ url = vuln.findtext("URL")
if url is not None:
urls.append(str(url))
- access_path = vuln.find('ACCESS_PATH')
+ access_path = vuln.find("ACCESS_PATH")
if access_path is not None:
- urls += [url.text for url in access_path.iter('URL')]
- payloads = vuln.find('PAYLOADS')
+ urls += [url.text for url in access_path.iter("URL")]
+ payloads = vuln.find("PAYLOADS")
if payloads is not None:
req_resps = get_request_response(payloads)
else:
req_resps = [[], []]
if is_info:
- raw_finding_date = vuln.findtext('LAST_TIME_DETECTED')
+ raw_finding_date = vuln.findtext("LAST_TIME_DETECTED")
elif is_app_report:
- raw_finding_date = vuln.findtext('FIRST_TIME_DETECTED')
+ raw_finding_date = vuln.findtext("FIRST_TIME_DETECTED")
else:
- raw_finding_date = vuln.findtext('DETECTION_DATE')
+ raw_finding_date = vuln.findtext("DETECTION_DATE")
# Qualys uses a non-standard date format.
if raw_finding_date is not None:
if raw_finding_date.endswith("GMT"):
- finding_date = datetime.strptime(raw_finding_date, "%d %b %Y %I:%M%p GMT")
+ finding_date = datetime.strptime(
+ raw_finding_date, "%d %b %Y %I:%M%p GMT"
+ )
else:
- finding_date = datetime.strptime(raw_finding_date, "%d %b %Y %I:%M%p GMT%z")
+ finding_date = datetime.strptime(
+ raw_finding_date, "%d %b %Y %I:%M%p GMT%z"
+ )
else:
finding_date = None
finding = findings.get(qid, None)
- findings[qid] = attach_extras(urls, req_resps[0], req_resps[1], finding, finding_date, qid, test)
+ findings[qid] = attach_extras(
+ urls, req_resps[0], req_resps[1], finding, finding_date, qid, test
+ )
return findings
# Retrieve information from a single glossary entry such as description,
# severity, title, impact, mitigation, and CWE
def get_glossary_item(glossary, finding, is_info=False, enable_weakness=False):
- title = glossary.findtext('TITLE')
+ title = glossary.findtext("TITLE")
if title is not None:
finding.title = str(title)
- severity = glossary.findtext('SEVERITY')
+ severity = glossary.findtext("SEVERITY")
if severity is not None:
- group = glossary.findtext('GROUP')
+ group = glossary.findtext("GROUP")
if is_info and (not enable_weakness or group in ("DIAG", "IG")):
# Scan Diagnostics are always Info.
finding.severity = "Info"
else:
finding.severity = SEVERITY_MATCH[int(severity) - 1]
- description = glossary.findtext('DESCRIPTION')
+ description = glossary.findtext("DESCRIPTION")
if description is not None:
finding.description = str(description)
- impact = glossary.findtext('IMPACT')
+ impact = glossary.findtext("IMPACT")
if impact is not None:
finding.impact = str(impact)
- solution = glossary.findtext('SOLUTION')
+ solution = glossary.findtext("SOLUTION")
if solution is not None:
finding.mitigation = str(solution)
- cwe = glossary.findtext('CWE')
+ cwe = glossary.findtext("CWE")
if cwe is not None:
finding.cwe = int(get_cwe(str(cwe)))
return finding
@@ -290,30 +333,44 @@ def get_glossary_item(glossary, finding, is_info=False, enable_weakness=False):
# Retrieve information from a single information gathered entry
def get_info_item(info_gathered, finding):
- data = info_gathered.find('DATA')
+ data = info_gathered.find("DATA")
if data is not None:
- finding.description += '\n\n' + decode_tag(data)
+ finding.description += "\n\n" + decode_tag(data)
return finding
# Create findings report for all unique vulnerabilities in the report
-def get_unique_items(vulnerabilities, info_gathered, glossary, is_app_report, test, enable_weakness=False):
- ig_qid_list = [int(ig.findtext('QID')) for ig in info_gathered]
- g_qid_list = [int(g.findtext('QID')) for g in glossary]
+def get_unique_items(
+ vulnerabilities,
+ info_gathered,
+ glossary,
+ is_app_report,
+ test,
+ enable_weakness=False,
+):
+ ig_qid_list = [int(ig.findtext("QID")) for ig in info_gathered]
+ g_qid_list = [int(g.findtext("QID")) for g in glossary]
# This dict has findings mapped by unique ID to remove any duplicates
findings = {}
- total = 0
- for unique_id, finding in get_unique_vulnerabilities(vulnerabilities, test, False, is_app_report).items():
+ for unique_id, finding in get_unique_vulnerabilities(
+ vulnerabilities, test, False, is_app_report
+ ).items():
qid = int(finding.vuln_id_from_tool)
if qid in g_qid_list:
index = g_qid_list.index(qid)
- findings[unique_id] = get_glossary_item(glossary[index], finding, enable_weakness)
- for unique_id, finding in get_unique_vulnerabilities(info_gathered, test, True, is_app_report).items():
+ findings[unique_id] = get_glossary_item(
+ glossary[index], finding, enable_weakness
+ )
+ for unique_id, finding in get_unique_vulnerabilities(
+ info_gathered, test, True, is_app_report
+ ).items():
qid = int(finding.vuln_id_from_tool)
if qid in g_qid_list:
index = g_qid_list.index(qid)
- finding = get_glossary_item(glossary[index], finding, True, enable_weakness)
+ finding = get_glossary_item(
+ glossary[index], finding, True, enable_weakness
+ )
if qid in ig_qid_list:
index = ig_qid_list.index(qid)
findings[unique_id] = get_info_item(info_gathered[index], finding)
@@ -321,21 +378,36 @@ def get_unique_items(vulnerabilities, info_gathered, glossary, is_app_report, te
# Create finding items for all vulnerabilities in the report
-def get_items(vulnerabilities, info_gathered, glossary, is_app_report, test, enable_weakness=False):
- ig_qid_list = [int(ig.findtext('QID')) for ig in info_gathered]
- g_qid_list = [int(g.findtext('QID')) for g in glossary]
+def get_items(
+ vulnerabilities,
+ info_gathered,
+ glossary,
+ is_app_report,
+ test,
+ enable_weakness=False,
+):
+ ig_qid_list = [int(ig.findtext("QID")) for ig in info_gathered]
+ g_qid_list = [int(g.findtext("QID")) for g in glossary]
# This dict has findings mapped by QID to remove any duplicates
findings = {}
- for qid, finding in get_vulnerabilities(vulnerabilities, test, False, is_app_report).items():
+ for qid, finding in get_vulnerabilities(
+ vulnerabilities, test, False, is_app_report
+ ).items():
if qid in g_qid_list:
index = g_qid_list.index(qid)
- findings[qid] = get_glossary_item(glossary[index], finding, enable_weakness)
- for qid, finding in get_vulnerabilities(info_gathered, test, True, is_app_report).items():
+ findings[qid] = get_glossary_item(
+ glossary[index], finding, enable_weakness
+ )
+ for qid, finding in get_vulnerabilities(
+ info_gathered, test, True, is_app_report
+ ).items():
if qid in g_qid_list:
index = g_qid_list.index(qid)
- finding = get_glossary_item(glossary[index], finding, True, enable_weakness)
+ finding = get_glossary_item(
+ glossary[index], finding, True, enable_weakness
+ )
if qid in ig_qid_list:
index = ig_qid_list.index(qid)
findings[qid] = get_info_item(info_gathered[index], finding)
@@ -347,28 +419,54 @@ def qualys_webapp_parser(qualys_xml_file, test, unique, enable_weakness=False):
if qualys_xml_file is None:
return []
- # supposed to be safe against XEE: https://docs.python.org/3/library/xml.html#xml-vulnerabilities
+ # supposed to be safe against XEE:
+ # https://docs.python.org/3/library/xml.html#xml-vulnerabilities
tree = xml.etree.ElementTree.parse(qualys_xml_file)
- is_app_report = tree.getroot().tag == 'WAS_WEBAPP_REPORT'
+ is_app_report = tree.getroot().tag == "WAS_WEBAPP_REPORT"
if is_app_report:
- vulnerabilities = tree.findall('./RESULTS/WEB_APPLICATION/VULNERABILITY_LIST/VULNERABILITY')
- info_gathered = tree.findall('./RESULTS/WEB_APPLICATION/INFORMATION_GATHERED_LIST/INFORMATION_GATHERED')
+ vulnerabilities = tree.findall(
+ "./RESULTS/WEB_APPLICATION/VULNERABILITY_LIST/VULNERABILITY"
+ )
+ info_gathered = tree.findall(
+ "./RESULTS/WEB_APPLICATION/INFORMATION_GATHERED_LIST/INFORMATION_GATHERED"
+ )
else:
- vulnerabilities = tree.findall('./RESULTS/VULNERABILITY_LIST/VULNERABILITY')
- info_gathered = tree.findall('./RESULTS/INFORMATION_GATHERED_LIST/INFORMATION_GATHERED')
- glossary = tree.findall('./GLOSSARY/QID_LIST/QID')
+ vulnerabilities = tree.findall(
+ "./RESULTS/VULNERABILITY_LIST/VULNERABILITY"
+ )
+ info_gathered = tree.findall(
+ "./RESULTS/INFORMATION_GATHERED_LIST/INFORMATION_GATHERED"
+ )
+ glossary = tree.findall("./GLOSSARY/QID_LIST/QID")
if unique:
- items = list(get_unique_items(vulnerabilities, info_gathered, glossary, is_app_report, test, enable_weakness).values())
+ items = list(
+ get_unique_items(
+ vulnerabilities,
+ info_gathered,
+ glossary,
+ is_app_report,
+ test,
+ enable_weakness,
+ ).values()
+ )
else:
- items = list(get_items(vulnerabilities, info_gathered, glossary, is_app_report, test, enable_weakness).values())
+ items = list(
+ get_items(
+ vulnerabilities,
+ info_gathered,
+ glossary,
+ is_app_report,
+ test,
+ enable_weakness,
+ ).values()
+ )
return items
class QualysWebAppParser(object):
-
def get_scan_types(self):
return ["Qualys Webapp Scan"]
@@ -378,5 +476,9 @@ def get_label_for_scan_types(self, scan_type):
def get_description_for_scan_types(self, scan_type):
return "Qualys WebScan output files can be imported in XML format."
- def get_findings(self, file, test, enable_weakness=QUALYS_WAS_WEAKNESS_IS_VULN):
- return qualys_webapp_parser(file, test, QUALYS_WAS_UNIQUE_ID, enable_weakness)
+ def get_findings(
+ self, file, test, enable_weakness=QUALYS_WAS_WEAKNESS_IS_VULN
+ ):
+ return qualys_webapp_parser(
+ file, test, QUALYS_WAS_UNIQUE_ID, enable_weakness
+ )
diff --git a/dojo/tools/retirejs/parser.py b/dojo/tools/retirejs/parser.py
index 2ddbe7e523..2482d517dc 100644
--- a/dojo/tools/retirejs/parser.py
+++ b/dojo/tools/retirejs/parser.py
@@ -5,7 +5,6 @@
class RetireJsParser(object):
-
def get_scan_types(self):
return ["Retire.js Scan"]
@@ -21,44 +20,56 @@ def get_findings(self, json_output, test):
def get_items(self, tree, test):
items = {}
- if 'data' in tree:
- tree = tree['data']
+ if "data" in tree:
+ tree = tree["data"]
for node in tree:
- for result in node['results']:
- if 'vulnerabilities' in result:
- for vulnerability in result['vulnerabilities']:
- item = self.get_item(vulnerability, test, node['file'])
- item.title += " (" + result['component'] + ", " + result['version'] + ")"
- item.description += "\n\n Raw Result: " + str(json.dumps(vulnerability, indent=4, sort_keys=True))
+ for result in node["results"]:
+ if "vulnerabilities" in result:
+ for vulnerability in result["vulnerabilities"]:
+ item = self.get_item(vulnerability, test, node["file"])
+ item.title += (
+ " ("
+ + result["component"]
+ + ", "
+ + result["version"]
+ + ")"
+ )
+ item.description += "\n\n Raw Result: " + str(
+ json.dumps(vulnerability, indent=4, sort_keys=True)
+ )
item.references = item.references
- item.component_name = result.get('component')
- item.component_version = result.get('version')
- item.file_path = node['file']
+ item.component_name = result.get("component")
+ item.component_version = result.get("version")
+ item.file_path = node["file"]
- encrypted_file = node['file']
- unique_key = hashlib.md5((item.title + item.references + encrypted_file).encode()).hexdigest()
+ encrypted_file = node["file"]
+ unique_key = hashlib.md5(
+ (
+ item.title + item.references + encrypted_file
+ ).encode()
+ ).hexdigest()
items[unique_key] = item
return list(items.values())
def get_item(self, item_node, test, file):
title = ""
- if 'identifiers' in item_node:
- if 'summary' in item_node['identifiers']:
- title = item_node['identifiers']['summary']
- elif 'CVE' in item_node['identifiers']:
- title = "".join(item_node['identifiers']['CVE'])
- elif 'osvdb' in item_node['identifiers']:
- title = "".join(item_node['identifiers']['osvdb'])
+ if "identifiers" in item_node:
+ if "summary" in item_node["identifiers"]:
+ title = item_node["identifiers"]["summary"]
+ elif "CVE" in item_node["identifiers"]:
+ title = "".join(item_node["identifiers"]["CVE"])
+ elif "osvdb" in item_node["identifiers"]:
+ title = "".join(item_node["identifiers"]["osvdb"])
finding = Finding(
title=title,
test=test,
cwe=1035, # Vulnerable Third Party Component
- severity=item_node['severity'].title(),
+ severity=item_node["severity"].title(),
description=title + "\n\n Affected File - " + file,
file_path=file,
- references="\n".join(item_node['info']),
+ references="\n".join(item_node["info"]),
false_p=False,
duplicate=False,
out_of_scope=False,
diff --git a/dojo/tools/risk_recon/api.py b/dojo/tools/risk_recon/api.py
index dc420067a3..0ac61f805d 100644
--- a/dojo/tools/risk_recon/api.py
+++ b/dojo/tools/risk_recon/api.py
@@ -11,17 +11,17 @@ def __init__(self, api_key, endpoint, data):
if not self.key:
raise Exception(
- 'Please supply a Risk Recon API key. \n'
- 'This can be generated in the system admin panel. \n'
- 'See https://documentation.defectdojo.com/integrations/import/#risk-recon-api-importer \n'
+ "Please supply a Risk Recon API key. \n"
+ "This can be generated in the system admin panel. \n"
+ "See https://documentation.defectdojo.com/integrations/import/#risk-recon-api-importer \n"
)
if not self.url:
raise Exception(
- 'Please supply a Risk Recon API url. \n'
- 'A general url is https://api.riskrecon.com/v1/ \n'
- 'See https://documentation.defectdojo.com/integrations/import/#risk-recon-api-importer \n'
+ "Please supply a Risk Recon API url. \n"
+ "A general url is https://api.riskrecon.com/v1/ \n"
+ "See https://documentation.defectdojo.com/integrations/import/#risk-recon-api-importer \n"
)
- if self.url.endswith('/'):
+ if self.url.endswith("/"):
self.url = endpoint[:-1]
self.session = requests.Session()
self.map_toes()
@@ -29,11 +29,8 @@ def __init__(self, api_key, endpoint, data):
def map_toes(self):
response = self.session.get(
- url='{}/toes'.format(self.url),
- headers={
- 'accept': 'application/json',
- 'Authorization': self.key
- }
+ url="{}/toes".format(self.url),
+ headers={"accept": "application/json", "Authorization": self.key},
)
if response.ok:
@@ -41,24 +38,26 @@ def map_toes(self):
data = response.json()
if isinstance(self.data, list):
for company in self.data:
- name = company.get('name', None)
- filters = company.get('filters', None)
+ name = company.get("name", None)
+ filters = company.get("filters", None)
if name:
comps[name] = filters
name_list = comps.keys()
for item in data:
- toe_id = item.get('toe_id', None)
- name = item.get('toe_short_name', None)
+ toe_id = item.get("toe_id", None)
+ name = item.get("toe_short_name", None)
if not comps or name in name_list:
filters = comps.get(name, None)
self.toe_map[toe_id] = filters if filters else self.data
else:
- raise Exception('Unable to query Target of Evaluations due to {} - {}'.format(
- response.status_code, response.content
- ))
+ raise Exception(
+ "Unable to query Target of Evaluations due to {} - {}".format(
+ response.status_code, response.content
+ )
+ )
def filter_finding(self, finding):
- filters = self.toe_map[finding['toe_id']]
+ filters = self.toe_map[finding["toe_id"]]
if not filters:
return False
@@ -72,11 +71,11 @@ def filter_finding(self, finding):
def get_findings(self):
for toe in self.toe_map.keys():
response = self.session.get(
- url='{}/findings/{}'.format(self.url, toe),
+ url="{}/findings/{}".format(self.url, toe),
headers={
- 'accept': 'application/json',
- 'Authorization': self.key
- }
+ "accept": "application/json",
+ "Authorization": self.key,
+ },
)
if response.ok:
@@ -85,6 +84,8 @@ def get_findings(self):
if not self.filter_finding(finding):
self.findings.append(finding)
else:
- raise Exception('Unable to collect findings from toe: {} due to {} - {}'.format(
- toe, response.status_code, response.content
- ))
+ raise Exception(
+ "Unable to collect findings from toe: {} due to {} - {}".format(
+ toe, response.status_code, response.content
+ )
+ )
diff --git a/dojo/tools/risk_recon/parser.py b/dojo/tools/risk_recon/parser.py
index 7c37c8bb5e..8c70496d69 100644
--- a/dojo/tools/risk_recon/parser.py
+++ b/dojo/tools/risk_recon/parser.py
@@ -6,7 +6,6 @@
class RiskReconParser(object):
-
def get_scan_types(self):
return ["Risk Recon API Importer"]
@@ -20,48 +19,75 @@ def get_findings(self, filename, test):
if filename:
tree = filename.read()
try:
- data = json.loads(str(tree, 'utf-8'))
- except:
+ data = json.loads(str(tree, "utf-8"))
+ except Exception:
data = json.loads(tree)
findings = []
- if not data.get('test', None):
+ if not data.get("test", None):
api = RiskReconAPI(
- data.get('api_key', None),
- data.get('url_endpoint', None),
- data.get('companies', data.get('filters', [])),
+ data.get("api_key", None),
+ data.get("url_endpoint", None),
+ data.get("companies", data.get("filters", [])),
)
findings = api.findings
else:
- findings = data.get('findings')
+ findings = data.get("findings")
return self._get_findings_internal(findings, test)
def _get_findings_internal(self, findings, test):
dupes = dict()
for item in findings:
- findingdetail = ''
- title = item.get('vendor') + ': ' + item.get('finding') + ' - ' + item.get('domain_name') + '(' + item.get('ip_address') + ')'
+ findingdetail = ""
+ title = (
+ item.get("vendor")
+ + ": "
+ + item.get("finding")
+ + " - "
+ + item.get("domain_name")
+ + "("
+ + item.get("ip_address")
+ + ")"
+ )
# Finding details information
- findingdetail += '**ID:** ' + item.get('finding_id') + '\n'
- findingdetail += '**Context:** ' + item.get('finding_context') + '\n'
- findingdetail += '**Value:** ' + item.get('finding_data_value') + '\n'
- findingdetail += '**Hosting Provider:** ' + item.get('hosting_provider') + '\n'
- findingdetail += '**Host Name:** ' + item.get('host_name') + '\n'
- findingdetail += '**Security Domain:** ' + item.get('security_domain') + '\n'
- findingdetail += '**Security Criteria:** ' + item.get('security_criteria') + '\n'
- findingdetail += '**Asset Value:** ' + item.get('asset_value') + '\n'
- findingdetail += '**Country:** ' + item.get('country_name') + '\n'
- findingdetail += '**Priority:** ' + item.get('priority') + '\n'
- findingdetail += '**First Seen:** ' + item.get('first_seen') + '\n'
-
- date = dateutil.parser.parse(item.get('first_seen'))
-
- sev = item.get('severity', "").capitalize()
+ findingdetail += "**ID:** " + item.get("finding_id") + "\n"
+ findingdetail += (
+ "**Context:** " + item.get("finding_context") + "\n"
+ )
+ findingdetail += (
+ "**Value:** " + item.get("finding_data_value") + "\n"
+ )
+ findingdetail += (
+ "**Hosting Provider:** " + item.get("hosting_provider") + "\n"
+ )
+ findingdetail += "**Host Name:** " + item.get("host_name") + "\n"
+ findingdetail += (
+ "**Security Domain:** " + item.get("security_domain") + "\n"
+ )
+ findingdetail += (
+ "**Security Criteria:** "
+ + item.get("security_criteria")
+ + "\n"
+ )
+ findingdetail += (
+ "**Asset Value:** " + item.get("asset_value") + "\n"
+ )
+ findingdetail += "**Country:** " + item.get("country_name") + "\n"
+ findingdetail += "**Priority:** " + item.get("priority") + "\n"
+ findingdetail += "**First Seen:** " + item.get("first_seen") + "\n"
+
+ date = dateutil.parser.parse(item.get("first_seen"))
+
+ sev = item.get("severity", "").capitalize()
sev = "Info" if not sev else sev
- tags = item.get('security_domain')[:20] + ', ' + item.get('security_criteria')[:20]
+ tags = (
+ item.get("security_domain")[:20]
+ + ", "
+ + item.get("security_criteria")[:20]
+ )
finding = Finding(
title=title,
@@ -71,12 +97,14 @@ def _get_findings_internal(self, findings, test):
static_finding=False,
dynamic_finding=True,
date=date,
- unique_id_from_tool=item.get('finding_id'),
+ unique_id_from_tool=item.get("finding_id"),
nb_occurences=1, # there is no de-duplication
)
finding.unsaved_tags = tags
- dupe_key = item.get('finding_id', title + '|' + tags + '|' + findingdetail)
+ dupe_key = item.get(
+ "finding_id", title + "|" + tags + "|" + findingdetail
+ )
if dupe_key in dupes:
find = dupes[dupe_key]
diff --git a/dojo/tools/rubocop/parser.py b/dojo/tools/rubocop/parser.py
index 99919ebd13..db18a4619b 100644
--- a/dojo/tools/rubocop/parser.py
+++ b/dojo/tools/rubocop/parser.py
@@ -4,7 +4,6 @@
class RubocopParser:
-
ID = "Rubocop Scan"
# possible values are:
diff --git a/dojo/tools/rusty_hog/parser.py b/dojo/tools/rusty_hog/parser.py
index 165110214a..da0baa6c83 100644
--- a/dojo/tools/rusty_hog/parser.py
+++ b/dojo/tools/rusty_hog/parser.py
@@ -4,7 +4,6 @@
class RustyhogParser(object):
-
def get_scan_types(self):
return ["Rusty Hog Scan"]
@@ -24,7 +23,9 @@ def parse_json(self, json_output):
def get_items(self, json_output, scanner, test):
items = {}
- findings = self.__getitem(vulnerabilities=self.parse_json(json_output), scanner=scanner)
+ findings = self.__getitem(
+ vulnerabilities=self.parse_json(json_output), scanner=scanner
+ )
for finding in findings:
unique_key = "Finding {}".format(finding)
items[unique_key] = finding
@@ -35,16 +36,22 @@ def get_tests(self, scan_type, handle):
tests = list()
parsername = "Rusty Hog"
for node in tree:
- if 'commit' in node or 'commitHash' in node or 'parent_commit_hash' in node or 'old_file_id' in node or 'new_file_id' in node:
+ if (
+ "commit" in node
+ or "commitHash" in node
+ or "parent_commit_hash" in node
+ or "old_file_id" in node
+ or "new_file_id" in node
+ ):
parsername = "Choctaw Hog"
break
- if 'linenum' in node or 'diff' in node:
+ if "linenum" in node or "diff" in node:
parsername = "Duroc Hog"
break
- if 'issue_id' in node or 'location' in node:
+ if "issue_id" in node or "location" in node:
parsername = "Gottingen Hog"
break
- if 'page_id' in node:
+ if "page_id" in node:
parsername = "Essex Hog"
break
test = ParserTest(
@@ -52,17 +59,20 @@ def get_tests(self, scan_type, handle):
type=parsername,
version="",
)
- if parsername == "Rusty Hog": # The outputfile is empty. A subscanner can't be classified
+ if (
+ parsername == "Rusty Hog"
+ ): # The outputfile is empty. A subscanner can't be classified
test.description = "The exact scanner within Rusty Hog could not be determined due to missing information within the scan result."
else:
test.description = parsername
- test.findings = self.__getitem(vulnerabilities=tree, scanner=parsername)
+ test.findings = self.__getitem(
+ vulnerabilities=tree, scanner=parsername
+ )
tests.append(test)
return tests
def __getitem(self, vulnerabilities, scanner):
findings = []
- line = ""
found_secret_string = ""
cwe = 200
for vulnerability in vulnerabilities:
@@ -70,85 +80,131 @@ def __getitem(self, vulnerabilities, scanner):
break
elif scanner == "Choctaw Hog":
"""Choctaw Hog"""
- found_secret_string = vulnerability.get('stringsFound')
- description = "**This string was found:** {}".format(found_secret_string)
- if vulnerability.get('commit') is not None:
- description += "\n**Commit message:** {}".format(vulnerability.get('commit'))
- if vulnerability.get('commitHash') is not None:
- description += "\n**Commit hash:** {}".format(vulnerability.get('commitHash'))
- if vulnerability.get('parent_commit_hash') is not None:
- description += "\n**Parent commit hash:** {}".format(vulnerability.get('parent_commit_hash'))
- if vulnerability.get('old_file_id') is not None and vulnerability.get('new_file_id') is not None:
- description += "\n**Old and new file IDs:** {} - {}".format(
- vulnerability.get('old_file_id'),
- vulnerability.get('new_file_id'))
- if vulnerability.get('old_line_num') is not None and vulnerability.get('new_line_num') is not None:
- description += "\n**Old and new line numbers:** {} - {}".format(
- vulnerability.get('old_line_num'),
- vulnerability.get('new_line_num'))
+ found_secret_string = vulnerability.get("stringsFound")
+ description = "**This string was found:** {}".format(
+ found_secret_string
+ )
+ if vulnerability.get("commit") is not None:
+ description += "\n**Commit message:** {}".format(
+ vulnerability.get("commit")
+ )
+ if vulnerability.get("commitHash") is not None:
+ description += "\n**Commit hash:** {}".format(
+ vulnerability.get("commitHash")
+ )
+ if vulnerability.get("parent_commit_hash") is not None:
+ description += "\n**Parent commit hash:** {}".format(
+ vulnerability.get("parent_commit_hash")
+ )
+ if (
+ vulnerability.get("old_file_id") is not None
+ and vulnerability.get("new_file_id") is not None
+ ):
+ description += (
+ "\n**Old and new file IDs:** {} - {}".format(
+ vulnerability.get("old_file_id"),
+ vulnerability.get("new_file_id"),
+ )
+ )
+ if (
+ vulnerability.get("old_line_num") is not None
+ and vulnerability.get("new_line_num") is not None
+ ):
+ description += (
+ "\n**Old and new line numbers:** {} - {}".format(
+ vulnerability.get("old_line_num"),
+ vulnerability.get("new_line_num"),
+ )
+ )
elif scanner == "Duroc Hog":
"""Duroc Hog"""
- found_secret_string = vulnerability.get('stringsFound')
- description = "**This string was found:** {}".format(found_secret_string)
- if vulnerability.get('path') is not None:
- description += "\n**Path of Issue:** {}".format(vulnerability.get('path'))
- if vulnerability.get('linenum') is not None:
- description += "\n**Linenum of Issue:** {}".format(vulnerability.get('linenum'))
- if vulnerability.get('diff') is not None:
- description += "\n**Diff:** {}".format(vulnerability.get('diff'))
+ found_secret_string = vulnerability.get("stringsFound")
+ description = "**This string was found:** {}".format(
+ found_secret_string
+ )
+ if vulnerability.get("path") is not None:
+ description += "\n**Path of Issue:** {}".format(
+ vulnerability.get("path")
+ )
+ if vulnerability.get("linenum") is not None:
+ description += "\n**Linenum of Issue:** {}".format(
+ vulnerability.get("linenum")
+ )
+ if vulnerability.get("diff") is not None:
+ description += "\n**Diff:** {}".format(
+ vulnerability.get("diff")
+ )
elif scanner == "Gottingen Hog":
"""Gottingen Hog"""
- found_secret_string = vulnerability.get('stringsFound')
- description = "**This string was found:** {}".format(found_secret_string)
- if vulnerability.get('issue_id') is not None:
- description += "\n**JIRA Issue ID:** {}".format(vulnerability.get('issue_id'))
- if vulnerability.get('location') is not None:
- description += "\n**JIRA location:** {}".format(vulnerability.get('location'))
- if vulnerability.get('url') is not None:
- description += "\n**JIRA url:** [{}]({})".format(vulnerability.get('url'), vulnerability.get('url'))
+ found_secret_string = vulnerability.get("stringsFound")
+ description = "**This string was found:** {}".format(
+ found_secret_string
+ )
+ if vulnerability.get("issue_id") is not None:
+ description += "\n**JIRA Issue ID:** {}".format(
+ vulnerability.get("issue_id")
+ )
+ if vulnerability.get("location") is not None:
+ description += "\n**JIRA location:** {}".format(
+ vulnerability.get("location")
+ )
+ if vulnerability.get("url") is not None:
+ description += "\n**JIRA url:** [{}]({})".format(
+ vulnerability.get("url"), vulnerability.get("url")
+ )
elif scanner == "Essex Hog":
- found_secret_string = vulnerability.get('stringsFound')
- description = "**This string was found:** {}".format(found_secret_string)
- if vulnerability.get('page_id') is not None:
- description += "\n**Confluence URL:** [{}]({})".format(vulnerability.get('url'), vulnerability.get('url'))
- description += "\n**Confluence Page ID:** {}".format(vulnerability.get('page_id'))
+ found_secret_string = vulnerability.get("stringsFound")
+ description = "**This string was found:** {}".format(
+ found_secret_string
+ )
+ if vulnerability.get("page_id") is not None:
+ description += "\n**Confluence URL:** [{}]({})".format(
+ vulnerability.get("url"), vulnerability.get("url")
+ )
+ description += "\n**Confluence Page ID:** {}".format(
+ vulnerability.get("page_id")
+ )
"""General - for all Rusty Hogs"""
- file_path = vulnerability.get('path')
- if vulnerability.get('date') is not None:
- description += "\n**Date:** {}".format(vulnerability.get('date'))
+ file_path = vulnerability.get("path")
+ if vulnerability.get("date") is not None:
+ description += "\n**Date:** {}".format(
+ vulnerability.get("date")
+ )
"""Finding Title"""
if scanner == "Choctaw Hog":
title = "{} found in Git path {} ({})".format(
- vulnerability.get('reason'),
- vulnerability.get('path'),
- vulnerability.get('commitHash'))
+ vulnerability.get("reason"),
+ vulnerability.get("path"),
+ vulnerability.get("commitHash"),
+ )
elif scanner == "Duroc Hog":
title = "{} found in path {}".format(
- vulnerability.get('reason'),
- vulnerability.get('path'))
+ vulnerability.get("reason"), vulnerability.get("path")
+ )
elif scanner == "Gottingen Hog":
title = "{} found in Jira ID {} ({})".format(
- vulnerability.get('reason'),
- vulnerability.get('issue_id'),
- vulnerability.get('location'))
+ vulnerability.get("reason"),
+ vulnerability.get("issue_id"),
+ vulnerability.get("location"),
+ )
elif scanner == "Essex Hog":
title = "{} found in Confluence Page ID {}".format(
- vulnerability.get('reason'),
- vulnerability.get('page_id'))
+ vulnerability.get("reason"), vulnerability.get("page_id")
+ )
# create the finding object
finding = Finding(
title=title,
- severity='High',
+ severity="High",
cwe=cwe,
description=description,
file_path=file_path,
static_finding=True,
dynamic_finding=False,
- payload=found_secret_string
+ payload=found_secret_string,
)
finding.description = finding.description.strip()
if scanner == "Choctaw Hog":
- finding.line = int(vulnerability.get('new_line_num'))
+ finding.line = int(vulnerability.get("new_line_num"))
finding.mitigation = "Please ensure no secret material nor confidential information is kept in clear within git repositories."
elif scanner == "Duroc Hog":
finding.mitigation = "Please ensure no secret material nor confidential information is kept in clear within directories, files, and archives."
From 5e70b393153408ea508da70c83a8a89a273801a3 Mon Sep 17 00:00:00 2001
From: Nicolas Velasquez <7769945+nv-pipo@users.noreply.github.com>
Date: Tue, 4 Jul 2023 19:23:38 +0200
Subject: [PATCH 11/85] Extract vulnerability type for Qualys scan import
(#8330)
---
dojo/tools/qualys/parser.py | 10 ++++++++++
1 file changed, 10 insertions(+)
diff --git a/dojo/tools/qualys/parser.py b/dojo/tools/qualys/parser.py
index d86c7f4c50..14ecbe564d 100644
--- a/dojo/tools/qualys/parser.py
+++ b/dojo/tools/qualys/parser.py
@@ -48,6 +48,12 @@
"category",
]
+TYPE_MAP = {
+ "Ig": "INFORMATION GATHERED",
+ "Practice": "POTENTIAL",
+ "Vuln": "CONFIRMED",
+}
+
def htmltext(blob):
h = html2text.HTML2Text()
@@ -148,10 +154,14 @@ def parse_finding(host, tree):
# _temp['solution'] = re.sub('Workaround(s)?:.+\n', '', htmltext(vuln_item.findtext('SOLUTION')))
_temp["solution"] = htmltext(vuln_item.findtext("SOLUTION"))
+ # type
+ _type = TYPE_MAP.get(vuln_details.findtext("TYPE"), "Unknown")
+
# Vuln_description
_temp["vuln_description"] = "\n".join(
[
htmltext(_description),
+ htmltext("Type: " + _type),
htmltext("Category: " + _category),
htmltext("QID: " + str(_gid)),
htmltext("Port: " + str(_port)),
From 3865dff9dbb9a510459968386e788ebeec490f40 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Wed, 5 Jul 2023 10:01:52 -0500
Subject: [PATCH 12/85] Bump djangosaml2 from 1.6.0 to 1.7.0 (#8343)
Bumps [djangosaml2](https://github.com/IdentityPython/djangosaml2) from 1.6.0 to 1.7.0.
- [Release notes](https://github.com/IdentityPython/djangosaml2/releases)
- [Changelog](https://github.com/IdentityPython/djangosaml2/blob/master/CHANGES)
- [Commits](https://github.com/IdentityPython/djangosaml2/compare/v1.6.0...v1.7.0)
---
updated-dependencies:
- dependency-name: djangosaml2
dependency-type: direct:production
update-type: version-update:semver-minor
...
Signed-off-by: dependabot[bot]
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
---
requirements.txt | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/requirements.txt b/requirements.txt
index 48ef7948f2..2e8c7645a4 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -72,7 +72,7 @@ cvss==2.6
django-fieldsignals==0.7.0
hyperlink==21.0.0
django-test-migrations==1.3.0
-djangosaml2==1.6.0
+djangosaml2==1.7.0
drf-spectacular==0.26.3
django-ratelimit==4.0.0
argon2-cffi==21.3.0
From f098bb4b1c811bd67203399a5e6d664ac798717c Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Wed, 5 Jul 2023 10:02:13 -0500
Subject: [PATCH 13/85] Bump boto3 from 1.26.165 to 1.27.0 (#8342)
Bumps [boto3](https://github.com/boto/boto3) from 1.26.165 to 1.27.0.
- [Release notes](https://github.com/boto/boto3/releases)
- [Changelog](https://github.com/boto/boto3/blob/develop/CHANGELOG.rst)
- [Commits](https://github.com/boto/boto3/compare/1.26.165...1.27.0)
---
updated-dependencies:
- dependency-name: boto3
dependency-type: direct:production
update-type: version-update:semver-minor
...
Signed-off-by: dependabot[bot]
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
---
requirements.txt | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/requirements.txt b/requirements.txt
index 2e8c7645a4..1b7264d7cd 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -78,7 +78,7 @@ django-ratelimit==4.0.0
argon2-cffi==21.3.0
blackduck==1.1.0
pycurl==7.45.2 # Required for Celery Broker AWS (SQS) support
-boto3==1.26.165 # Required for Celery Broker AWS (SQS) support
+boto3==1.27.0 # Required for Celery Broker AWS (SQS) support
netaddr==0.8.0
vulners==2.0.10
fontawesomefree==6.4.0
From 7c27a1472bd29244cca9dc780f490681d4dcdbce Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Wed, 5 Jul 2023 10:02:32 -0500
Subject: [PATCH 14/85] Bump pillow from 9.5.0 to 10.0.0 (#8335)
Bumps [pillow](https://github.com/python-pillow/Pillow) from 9.5.0 to 10.0.0.
- [Release notes](https://github.com/python-pillow/Pillow/releases)
- [Changelog](https://github.com/python-pillow/Pillow/blob/main/CHANGES.rst)
- [Commits](https://github.com/python-pillow/Pillow/compare/9.5.0...10.0.0)
---
updated-dependencies:
- dependency-name: pillow
dependency-type: direct:production
update-type: version-update:semver-major
...
Signed-off-by: dependabot[bot]
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
---
requirements.txt | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/requirements.txt b/requirements.txt
index 1b7264d7cd..9fd4f7adfb 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -36,7 +36,7 @@ Markdown==3.4.3
mysqlclient==2.1.1
openpyxl==3.1.2
xlrd==1.2.0
-Pillow==9.5.0 # required by django-imagekit
+Pillow==10.0.0 # required by django-imagekit
psycopg2-binary==2.9.6
cryptography==41.0.1
python-dateutil==2.8.2
From 69cc53e91ffb1bc753513d01159395e3d40a213b Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Wed, 5 Jul 2023 11:41:29 -0500
Subject: [PATCH 15/85] Bump lxml from 4.9.2 to 4.9.3 (#8348)
Bumps [lxml](https://github.com/lxml/lxml) from 4.9.2 to 4.9.3.
- [Release notes](https://github.com/lxml/lxml/releases)
- [Changelog](https://github.com/lxml/lxml/blob/master/CHANGES.txt)
- [Commits](https://github.com/lxml/lxml/compare/lxml-4.9.2...lxml-4.9.3)
---
updated-dependencies:
- dependency-name: lxml
dependency-type: direct:production
update-type: version-update:semver-patch
...
Signed-off-by: dependabot[bot]
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
---
requirements.txt | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/requirements.txt b/requirements.txt
index 9fd4f7adfb..f0cbe39cae 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -31,7 +31,7 @@ html2text==2020.1.16
humanize==4.7.0
jira==3.5.2
PyGithub==1.58.2
-lxml==4.9.2
+lxml==4.9.3
Markdown==3.4.3
mysqlclient==2.1.1
openpyxl==3.1.2
From ea9505ba8e09d7e34367617c7aa69995a5b5aa28 Mon Sep 17 00:00:00 2001
From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com>
Date: Thu, 6 Jul 2023 21:18:52 -0500
Subject: [PATCH 16/85] Update dependency postcss from 8.4.24 to v8.4.25
(docs/package.json) (#8356)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
---
docs/package-lock.json | 14 +++++++-------
docs/package.json | 2 +-
2 files changed, 8 insertions(+), 8 deletions(-)
diff --git a/docs/package-lock.json b/docs/package-lock.json
index 2421c15925..21416effb1 100644
--- a/docs/package-lock.json
+++ b/docs/package-lock.json
@@ -6,7 +6,7 @@
"": {
"devDependencies": {
"autoprefixer": "10.4.14",
- "postcss": "8.4.24",
+ "postcss": "8.4.25",
"postcss-cli": "10.1.0"
}
},
@@ -596,9 +596,9 @@
}
},
"node_modules/postcss": {
- "version": "8.4.24",
- "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.24.tgz",
- "integrity": "sha512-M0RzbcI0sO/XJNucsGjvWU9ERWxb/ytp1w6dKtxTKgixdtQDq4rmx/g8W1hnaheq9jgwL/oyEdH5Bc4WwJKMqg==",
+ "version": "8.4.25",
+ "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.25.tgz",
+ "integrity": "sha512-7taJ/8t2av0Z+sQEvNzCkpDynl0tX3uJMCODi6nT3PfASC7dYCWV9aQ+uiCf+KBD4SEFcu+GvJdGdwzQ6OSjCw==",
"dev": true,
"funding": [
{
@@ -1366,9 +1366,9 @@
"dev": true
},
"postcss": {
- "version": "8.4.24",
- "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.24.tgz",
- "integrity": "sha512-M0RzbcI0sO/XJNucsGjvWU9ERWxb/ytp1w6dKtxTKgixdtQDq4rmx/g8W1hnaheq9jgwL/oyEdH5Bc4WwJKMqg==",
+ "version": "8.4.25",
+ "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.25.tgz",
+ "integrity": "sha512-7taJ/8t2av0Z+sQEvNzCkpDynl0tX3uJMCODi6nT3PfASC7dYCWV9aQ+uiCf+KBD4SEFcu+GvJdGdwzQ6OSjCw==",
"dev": true,
"requires": {
"nanoid": "^3.3.6",
diff --git a/docs/package.json b/docs/package.json
index f3890a7b17..c7e2160786 100644
--- a/docs/package.json
+++ b/docs/package.json
@@ -1,6 +1,6 @@
{
"devDependencies": {
- "postcss": "8.4.24",
+ "postcss": "8.4.25",
"autoprefixer": "10.4.14",
"postcss-cli": "10.1.0"
}
From c5a347865d981789c6a30dbe81a2527cf5e22bdf Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Fri, 7 Jul 2023 09:56:44 -0500
Subject: [PATCH 17/85] Bump boto3 from 1.27.0 to 1.28.0 (#8362)
Bumps [boto3](https://github.com/boto/boto3) from 1.27.0 to 1.28.0.
- [Release notes](https://github.com/boto/boto3/releases)
- [Changelog](https://github.com/boto/boto3/blob/develop/CHANGELOG.rst)
- [Commits](https://github.com/boto/boto3/compare/1.27.0...1.28.0)
---
updated-dependencies:
- dependency-name: boto3
dependency-type: direct:production
update-type: version-update:semver-minor
...
Signed-off-by: dependabot[bot]
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
---
requirements.txt | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/requirements.txt b/requirements.txt
index f0cbe39cae..5d7867bf26 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -78,7 +78,7 @@ django-ratelimit==4.0.0
argon2-cffi==21.3.0
blackduck==1.1.0
pycurl==7.45.2 # Required for Celery Broker AWS (SQS) support
-boto3==1.27.0 # Required for Celery Broker AWS (SQS) support
+boto3==1.28.0 # Required for Celery Broker AWS (SQS) support
netaddr==0.8.0
vulners==2.0.10
fontawesomefree==6.4.0
From f13d20d36fda0da0fcabeaf5faddd5fc5e093bb9 Mon Sep 17 00:00:00 2001
From: Cody Maffucci <46459665+Maffooch@users.noreply.github.com>
Date: Fri, 7 Jul 2023 15:29:06 -0500
Subject: [PATCH 18/85] Update naming convention for product tags in related
objects (#8350)
---
docs/content/en/getting_started/upgrading.md | 18 ++++++
dojo/filters.py | 64 ++++++++++----------
2 files changed, 50 insertions(+), 32 deletions(-)
diff --git a/docs/content/en/getting_started/upgrading.md b/docs/content/en/getting_started/upgrading.md
index 33872b487c..b27f29e1aa 100644
--- a/docs/content/en/getting_started/upgrading.md
+++ b/docs/content/en/getting_started/upgrading.md
@@ -72,6 +72,24 @@ godojo installations
If you have installed DefectDojo on "iron" and wish to upgrade the installation, please see the [instructions in the repo](https://github.com/DefectDojo/godojo/blob/master/docs-and-scripts/upgrading.md).
+## Upgrading to DefectDojo Version 2.25.x.
+
+A few query parameters related to filtering object via API related to a products tags have been renamed to be more consistent with the other "related object tags":
+
+**Breaking Change**
+
+ - Engagement
+ - `product__tags__name` -> `product__tags`
+ - `not_product__tags__name` -> `not_product__tags`
+ - Test
+ - `engagement__product__tags__name` -> `engagement__product__tags`
+ - `not_engagement__product__tags__name` -> `not_engagement__product__tags`
+ - Finding
+ - `test__engagement__product__tags__name` -> `test__engagement__product__tags`
+ - `not_test__engagement__product__tags__name` -> `not_test__engagement__product__tags`
+
+For all other changes, check the [Release Notes](https://github.com/DefectDojo/django-DefectDojo/releases/tag/2.25.0) for the contents of the release.
+
## Upgrading to DefectDojo Version 2.24.x.
There are no special instruction for upgrading to 2.24.0. Check the [Release Notes](https://github.com/DefectDojo/django-DefectDojo/releases/tag/2.24.0) for the contents of the release.
diff --git a/dojo/filters.py b/dojo/filters.py
index 170d5d7ba6..9bcfb6ad9e 100644
--- a/dojo/filters.py
+++ b/dojo/filters.py
@@ -830,17 +830,17 @@ class ApiEngagementFilter(DojoFilter):
product__prod_type = NumberInFilter(field_name='product__prod_type', lookup_expr='in')
tag = CharFilter(field_name='tags__name', lookup_expr='icontains', help_text='Tag name contains')
tags = CharFieldInFilter(field_name='tags__name', lookup_expr='in',
- help_text='Comma seperated list of exact tags')
- product__tags__name = CharFieldInFilter(field_name='product__tags__name',
+ help_text='Comma separated list of exact tags')
+ product__tags = CharFieldInFilter(field_name='product__tags__name',
lookup_expr='in',
- help_text='Comma seperated list of exact tags present on product')
+ help_text='Comma separated list of exact tags present on product')
not_tag = CharFilter(field_name='tags__name', lookup_expr='icontains', help_text='Not Tag name contains', exclude='True')
not_tags = CharFieldInFilter(field_name='tags__name', lookup_expr='in',
- help_text='Comma seperated list of exact tags not present on model', exclude='True')
- not_product__tags__name = CharFieldInFilter(field_name='product__tags__name',
+ help_text='Comma separated list of exact tags not present on model', exclude='True')
+ not_product__tags = CharFieldInFilter(field_name='product__tags__name',
lookup_expr='in',
- help_text='Comma seperated list of exact tags not present on product',
+ help_text='Comma separated list of exact tags not present on product',
exclude='True')
o = OrderingFilter(
@@ -1035,11 +1035,11 @@ class ApiProductFilter(DojoFilter):
tag = CharFilter(field_name='tags__name', lookup_expr='icontains', label='Tag name contains')
tags = CharFieldInFilter(field_name='tags__name', lookup_expr='in',
- help_text='Comma seperated list of exact tags')
+ help_text='Comma separated list of exact tags')
not_tag = CharFilter(field_name='tags__name', lookup_expr='icontains', help_text='Not Tag name contains', exclude='True')
not_tags = CharFieldInFilter(field_name='tags__name', lookup_expr='in',
- help_text='Comma seperated list of exact tags not present on product', exclude='True')
+ help_text='Comma separated list of exact tags not present on product', exclude='True')
# DateRangeFilter
created = DateRangeFilter()
@@ -1145,26 +1145,26 @@ class ApiFindingFilter(DojoFilter):
tag = CharFilter(field_name='tags__name', lookup_expr='icontains', help_text='Tag name contains')
tags = CharFieldInFilter(field_name='tags__name', lookup_expr='in',
- help_text='Comma seperated list of exact tags')
+ help_text='Comma separated list of exact tags')
test__tags = CharFieldInFilter(field_name='test__tags__name', lookup_expr='in',
- help_text='Comma seperated list of exact tags present on test')
+ help_text='Comma separated list of exact tags present on test')
test__engagement__tags = CharFieldInFilter(field_name='test__engagement__tags', lookup_expr='in',
- help_text='Comma seperated list of exact tags present on engagement')
- test__engagement__product__tags__name = CharFieldInFilter(field_name='test__engagement__product__tags__name',
+ help_text='Comma separated list of exact tags present on engagement')
+ test__engagement__product__tags = CharFieldInFilter(field_name='test__engagement__product__tags__name',
lookup_expr='in',
- help_text='Comma seperated list of exact tags present on product')
+ help_text='Comma separated list of exact tags present on product')
not_tag = CharFilter(field_name='tags__name', lookup_expr='icontains', help_text='Not Tag name contains', exclude='True')
not_tags = CharFieldInFilter(field_name='tags__name', lookup_expr='in',
- help_text='Comma seperated list of exact tags not present on model', exclude='True')
+ help_text='Comma separated list of exact tags not present on model', exclude='True')
not_test__tags = CharFieldInFilter(field_name='test__tags__name', lookup_expr='in',
- help_text='Comma seperated list of exact tags not present on test', exclude='True')
+ help_text='Comma separated list of exact tags not present on test', exclude='True')
not_test__engagement__tags = CharFieldInFilter(field_name='test__engagement__tags', lookup_expr='in',
- help_text='Comma seperated list of exact tags not present on engagement',
+ help_text='Comma separated list of exact tags not present on engagement',
exclude='True')
- not_test__engagement__product__tags__name = CharFieldInFilter(field_name='test__engagement__product__tags__name',
+ not_test__engagement__product__tags = CharFieldInFilter(field_name='test__engagement__product__tags__name',
lookup_expr='in',
- help_text='Comma seperated list of exact tags not present on product',
+ help_text='Comma separated list of exact tags not present on product',
exclude='True')
o = OrderingFilter(
@@ -1556,11 +1556,11 @@ def __init__(self, *args, **kwargs):
class ApiTemplateFindingFilter(DojoFilter):
tag = CharFilter(field_name='tags__name', lookup_expr='icontains', help_text='Tag name contains')
tags = CharFieldInFilter(field_name='tags__name', lookup_expr='in',
- help_text='Comma seperated list of exact tags')
+ help_text='Comma separated list of exact tags')
not_tag = CharFilter(field_name='tags__name', lookup_expr='icontains', help_text='Not Tag name contains', exclude='True')
not_tags = CharFieldInFilter(field_name='tags__name', lookup_expr='in',
- help_text='Comma seperated list of exact tags not present on model', exclude='True')
+ help_text='Comma separated list of exact tags not present on model', exclude='True')
o = OrderingFilter(
# tuple-mapping retains order
@@ -1798,11 +1798,11 @@ class Meta:
class ApiEndpointFilter(DojoFilter):
tag = CharFilter(field_name='tags__name', lookup_expr='icontains', help_text='Tag name contains')
tags = CharFieldInFilter(field_name='tags__name', lookup_expr='in',
- help_text='Comma seperated list of exact tags')
+ help_text='Comma separated list of exact tags')
not_tag = CharFilter(field_name='tags__name', lookup_expr='icontains', help_text='Not Tag name contains', exclude='True')
not_tags = CharFieldInFilter(field_name='tags__name', lookup_expr='in',
- help_text='Comma seperated list of exact tags not present on model', exclude='True')
+ help_text='Comma separated list of exact tags not present on model', exclude='True')
o = OrderingFilter(
# tuple-mapping retains order
fields=(
@@ -1897,22 +1897,22 @@ def __init__(self, *args, **kwargs):
class ApiTestFilter(DojoFilter):
tag = CharFilter(field_name='tags__name', lookup_expr='icontains', help_text='Tag name contains')
tags = CharFieldInFilter(field_name='tags__name', lookup_expr='in',
- help_text='Comma seperated list of exact tags')
+ help_text='Comma separated list of exact tags')
engagement__tags = CharFieldInFilter(field_name='engagement__tags', lookup_expr='in',
- help_text='Comma seperated list of exact tags present on engagement')
- engagement__product__tags__name = CharFieldInFilter(field_name='engagement__product__tags__name',
+ help_text='Comma separated list of exact tags present on engagement')
+ engagement__product__tags = CharFieldInFilter(field_name='engagement__product__tags__name',
lookup_expr='in',
- help_text='Comma seperated list of exact tags present on product')
+ help_text='Comma separated list of exact tags present on product')
not_tag = CharFilter(field_name='tags__name', lookup_expr='icontains', help_text='Not Tag name contains', exclude='True')
not_tags = CharFieldInFilter(field_name='tags__name', lookup_expr='in',
- help_text='Comma seperated list of exact tags not present on model', exclude='True')
+ help_text='Comma separated list of exact tags not present on model', exclude='True')
not_engagement__tags = CharFieldInFilter(field_name='engagement__tags', lookup_expr='in',
- help_text='Comma seperated list of exact tags not present on engagement',
+ help_text='Comma separated list of exact tags not present on engagement',
exclude='True')
- not_engagement__product__tags__name = CharFieldInFilter(field_name='engagement__product__tags__name',
+ not_engagement__product__tags = CharFieldInFilter(field_name='engagement__product__tags__name',
lookup_expr='in',
- help_text='Comma seperated list of exact tags not present on product',
+ help_text='Comma separated list of exact tags not present on product',
exclude='True')
o = OrderingFilter(
@@ -1950,11 +1950,11 @@ class Meta:
class ApiAppAnalysisFilter(DojoFilter):
tag = CharFilter(field_name='tags__name', lookup_expr='icontains', help_text='Tag name contains')
tags = CharFieldInFilter(field_name='tags__name', lookup_expr='in',
- help_text='Comma seperated list of exact tags')
+ help_text='Comma separated list of exact tags')
not_tag = CharFilter(field_name='tags__name', lookup_expr='icontains', help_text='Not Tag name contains', exclude='True')
not_tags = CharFieldInFilter(field_name='tags__name', lookup_expr='in',
- help_text='Comma seperated list of exact tags not present on model', exclude='True')
+ help_text='Comma separated list of exact tags not present on model', exclude='True')
class Meta:
model = App_Analysis
From e15d1b1dd57aaa1b03d889d1127734271151e575 Mon Sep 17 00:00:00 2001
From: testaccount90009 <122134756+testaccount90009@users.noreply.github.com>
Date: Fri, 7 Jul 2023 14:01:05 -0700
Subject: [PATCH 19/85] Update DOCKER.md "run with docker compose using https"
(#8361)
---
readme-docs/DOCKER.md | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/readme-docs/DOCKER.md b/readme-docs/DOCKER.md
index 4b9a29d2a8..1b026b93bc 100644
--- a/readme-docs/DOCKER.md
+++ b/readme-docs/DOCKER.md
@@ -289,13 +289,13 @@ To secure the application by https, follow those steps
* Generate a CSR (Certificate Signing Request)
* Have the CSR signed by a certificate authority
* Place the private key and the certificate under the nginx folder
-* copy your secrets into:
+* copy your secrets into ../nginx/nginx_TLS.conf:
```
server_name your.servername.com;
ssl_certificate /etc/nginx/ssl/nginx.crt
ssl_certificate_key /etc/nginx/ssl/nginx.key;
```
-*set the GENERATE_TLS_CERTIFICATE != True in the docker-compose.override.https.yml
+*set the GENERATE_TLS_CERTIFICATE != True in the docker-compose.override.https.yml
* Protect your private key from other users:
```
chmod 400 nginx/*.key
From b15ff1813f2365a5356922922f94a15d1eafb605 Mon Sep 17 00:00:00 2001
From: Alejandro Tortolero
Date: Fri, 7 Jul 2023 16:24:12 -0500
Subject: [PATCH 20/85] Update files with PEP8 standards in folder dojo/tools
#006 (#8319)
* Update files in folder dojo/tools/sarif with PEP8 standars.
* Update files in folder dojo/tools/scantist with PEP8 standars.
* Update files in folder dojo/tools/semgrep with PEP8 standars.
* Update files in folder dojo/tools/skf with PEP8 standars.
* Update files in folder dojo/tools/snyk with PEP8 standars.
* Update files in folder dojo/tools/solar_appscreener with PEP8 standars.
* Update files in folder dojo/tools/sonarqube with PEP8 standars.
* Update files in folder dojo/tools/sonatype with PEP8 standars.
* Update files in folder dojo/tools/spotbugs with PEP8 standars.
* Update files in folder dojo/tools/ssl_labs with PEP8 standars.
* Update files in folder dojo/tools/ssl_labs with PEP8 standars.
* Update files in folder dojo/tools/sslscan with PEP8 standars.
* Update files in folder dojo/tools/sslyze with PEP8 standars.
* Update files in folder dojo/tools/stackhawk with PEP8 standars.
* Update files in folder dojo/tools/talisman with PEP8 standars.
* Update files in folder dojo/tools/tenable with PEP8 standars.
* Update files in folder dojo/tools/terrascan with PEP8 standars.
* Update files in folder dojo/tools/testssl with PEP8 standars.
* Update files in folder dojo/tools/tfsec with PEP8 standars.
* Update files in folder dojo/tools/trivy with PEP8 standars.
* Update files in folder dojo/tools/trivy_operator with PEP8 standars.
* Update files in folder dojo/tools/trufflehog with PEP8 standars.
* Update files in folder dojo/tools/trufflehog3 and trustwave with PEP8 standars.
* Update files in folder dojo/tools/trustwave_fusion_api with PEP8 standars.
* Update files in folder dojo/tools/twistlock with PEP8 standars.
* Update files in folder dojo/tools/vcg with PEP8 standars.
* Update files in folder dojo/tools/veracode with PEP8 standars.
* Update files in folder dojo/tools/veracode_sca with PEP8 standars.
* Update files in folder dojo/tools/wapiti with PEP8 standars.
* Update files in folder dojo/tools/wazuh with PEP8 standars.
* Update files in folder dojo/tools/wfuzz with PEP8 standars.
* Update files in folder dojo/tools/whispers with PEP8 standars.
* Update files in folder dojo/tools/whitehat_sentinel with PEP8 standars.
* Update files in folder dojo/tools/whitesource with PEP8 standars.
* Update files in folder dojo/tools/wpscan with PEP8 standars.
* Update files in folder dojo/tools/xanitizer with PEP8 standars.
* Update files in folder dojo/tools/yarn_audit with PEP8 standars.
* Update files in folder dojo/tools/zap with PEP8 standars.
* Change BaseException to Exception
* Removing blank space.
* Removing unusing variable.
---
dojo/tools/sarif/parser.py | 300 ++++++----
dojo/tools/scantist/parser.py | 13 +-
dojo/tools/scout_suite/__init__.py | 2 +-
dojo/tools/scout_suite/parser.py | 102 ++--
dojo/tools/semgrep/parser.py | 49 +-
dojo/tools/skf/__init__.py | 2 +-
dojo/tools/skf/parser.py | 48 +-
dojo/tools/snyk/parser.py | 161 ++++--
dojo/tools/solar_appscreener/parser.py | 26 +-
dojo/tools/sonarqube/parser.py | 187 ++++--
dojo/tools/sonatype/parser.py | 98 ++--
dojo/tools/spotbugs/parser.py | 91 +--
dojo/tools/ssl_labs/__init__.py | 2 +-
dojo/tools/ssl_labs/parser.py | 281 ++++++---
dojo/tools/sslscan/parser.py | 66 ++-
dojo/tools/sslyze/parser.py | 7 +-
dojo/tools/sslyze/parser_json.py | 666 ++++++++++++++--------
dojo/tools/sslyze/parser_xml.py | 136 +++--
dojo/tools/stackhawk/parser.py | 102 ++--
dojo/tools/talisman/parser.py | 10 +-
dojo/tools/tenable/csv_format.py | 54 +-
dojo/tools/tenable/parser.py | 12 +-
dojo/tools/tenable/xml_format.py | 103 +++-
dojo/tools/terrascan/parser.py | 35 +-
dojo/tools/testssl/parser.py | 78 ++-
dojo/tools/tfsec/parser.py | 50 +-
dojo/tools/trivy/parser.py | 179 +++---
dojo/tools/trivy_operator/parser.py | 106 ++--
dojo/tools/trufflehog/parser.py | 65 ++-
dojo/tools/trufflehog3/parser.py | 132 +++--
dojo/tools/trustwave/__init__.py | 5 +-
dojo/tools/trustwave/parser.py | 62 +-
dojo/tools/trustwave_fusion_api/parser.py | 46 +-
dojo/tools/twistlock/parser.py | 158 +++--
dojo/tools/vcg/parser.py | 123 ++--
dojo/tools/veracode/__init__.py | 2 +-
dojo/tools/veracode/parser.py | 284 +++++----
dojo/tools/veracode_sca/parser.py | 152 +++--
dojo/tools/wapiti/parser.py | 46 +-
dojo/tools/wazuh/parser.py | 33 +-
dojo/tools/wfuzz/parser.py | 2 -
dojo/tools/whispers/parser.py | 17 +-
dojo/tools/whitehat_sentinel/parser.py | 180 +++---
dojo/tools/whitesource/parser.py | 163 +++---
dojo/tools/wpscan/parser.py | 49 +-
dojo/tools/xanitizer/__init__.py | 2 +-
dojo/tools/xanitizer/parser.py | 148 +++--
dojo/tools/yarn_audit/parser.py | 118 ++--
dojo/tools/zap/parser.py | 36 +-
49 files changed, 3009 insertions(+), 1780 deletions(-)
diff --git a/dojo/tools/sarif/parser.py b/dojo/tools/sarif/parser.py
index 1d5f3ec5ee..d604279218 100644
--- a/dojo/tools/sarif/parser.py
+++ b/dojo/tools/sarif/parser.py
@@ -8,7 +8,7 @@
logger = logging.getLogger(__name__)
-CWE_REGEX = r'cwe-\d+'
+CWE_REGEX = r"cwe-\d+"
class SarifParser(object):
@@ -31,18 +31,18 @@ def get_findings(self, filehandle, test):
tree = json.load(filehandle)
items = list()
# for each runs we just aggregate everything
- for run in tree.get('runs', list()):
+ for run in tree.get("runs", list()):
items.extend(self.__get_items_from_run(run))
return items
def get_tests(self, scan_type, handle):
tree = json.load(handle)
tests = list()
- for run in tree.get('runs', list()):
+ for run in tree.get("runs", list()):
test = ParserTest(
- name=run['tool']['driver']['name'],
- type=run['tool']['driver']['name'],
- version=run['tool']['driver'].get('version'),
+ name=run["tool"]["driver"]["name"],
+ type=run["tool"]["driver"]["name"],
+ version=run["tool"]["driver"].get("version"),
)
test.findings = self.__get_items_from_run(run)
tests.append(test)
@@ -55,18 +55,18 @@ def __get_items_from_run(self, run):
artifacts = get_artifacts(run)
# get the timestamp of the run if possible
run_date = self.__get_last_invocation_date(run)
- for result in run.get('results', list()):
+ for result in run.get("results", list()):
item = get_item(result, rules, artifacts, run_date)
if item is not None:
items.append(item)
return items
def __get_last_invocation_date(self, data):
- invocations = data.get('invocations', [])
+ invocations = data.get("invocations", [])
if len(invocations) == 0:
return None
# try to get the last 'endTimeUtc'
- raw_date = invocations[-1].get('endTimeUtc')
+ raw_date = invocations[-1].get("endTimeUtc")
if raw_date is None:
return None
# if the data is here we try to convert it to datetime
@@ -75,8 +75,8 @@ def __get_last_invocation_date(self, data):
def get_rules(run):
rules = {}
- for item in run['tool']['driver'].get('rules', []):
- rules[item['id']] = item
+ for item in run["tool"]["driver"].get("rules", []):
+ rules[item["id"]] = item
return rules
@@ -84,7 +84,7 @@ def get_rules(run):
def get_properties_tags(value):
if not value:
return []
- return value.get('properties', {}).get('tags', [])
+ return value.get("properties", {}).get("tags", [])
def search_cwe(value, cwes):
@@ -96,9 +96,9 @@ def search_cwe(value, cwes):
def get_rule_cwes(rule):
cwes = []
# data of the specification
- if 'relationships' in rule and type(rule['relationships']) == list:
- for relationship in rule['relationships']:
- value = relationship['target']['id']
+ if "relationships" in rule and isinstance(rule["relationships"], list):
+ for relationship in rule["relationships"]:
+ value = relationship["target"]["id"]
search_cwe(value, cwes)
return cwes
@@ -110,8 +110,8 @@ def get_rule_cwes(rule):
def get_result_cwes_properties(result):
"""Some tools like njsscan store the CWE in the properties of the result"""
cwes = []
- if 'properties' in result and 'cwe' in result['properties']:
- value = result['properties']['cwe']
+ if "properties" in result and "cwe" in result["properties"]:
+ value = result["properties"]["cwe"]
search_cwe(value, cwes)
return cwes
@@ -119,8 +119,8 @@ def get_result_cwes_properties(result):
def get_artifacts(run):
artifacts = {}
custom_index = 0 # hack because some tool doesn't generate this attribute
- for tree_artifact in run.get('artifacts', []):
- artifacts[tree_artifact.get('index', custom_index)] = tree_artifact
+ for tree_artifact in run.get("artifacts", []):
+ artifacts[tree_artifact.get("index", custom_index)] = tree_artifact
custom_index += 1
return artifacts
@@ -130,9 +130,9 @@ def get_message_from_multiformatMessageString(data, rule):
See here for the specification: https://docs.oasis-open.org/sarif/sarif/v2.1.0/os/sarif-v2.1.0-os.html#_Toc34317468
"""
- if rule is not None and 'id' in data:
- text = rule['messageStrings'][data['id']].get('text')
- arguments = data.get('arguments', [])
+ if rule is not None and "id" in data:
+ text = rule["messageStrings"][data["id"]].get("text")
+ arguments = data.get("arguments", [])
# argument substitution
for i in range(6): # the specification limit to 6
substitution_str = "{" + str(i) + "}"
@@ -142,7 +142,7 @@ def get_message_from_multiformatMessageString(data, rule):
return text
else:
# TODO manage markdown
- return data.get('text')
+ return data.get("text")
def cve_try(val):
@@ -156,90 +156,129 @@ def cve_try(val):
def get_title(result, rule):
title = None
- if 'message' in result:
- title = get_message_from_multiformatMessageString(result['message'], rule)
+ if "message" in result:
+ title = get_message_from_multiformatMessageString(
+ result["message"], rule
+ )
if title is None and rule is not None:
- if 'shortDescription' in rule:
- title = get_message_from_multiformatMessageString(rule['shortDescription'], rule)
- elif 'fullDescription' in rule:
- title = get_message_from_multiformatMessageString(rule['fullDescription'], rule)
- elif 'name' in rule:
- title = rule['name']
- elif 'id' in rule:
- title = rule['id']
+ if "shortDescription" in rule:
+ title = get_message_from_multiformatMessageString(
+ rule["shortDescription"], rule
+ )
+ elif "fullDescription" in rule:
+ title = get_message_from_multiformatMessageString(
+ rule["fullDescription"], rule
+ )
+ elif "name" in rule:
+ title = rule["name"]
+ elif "id" in rule:
+ title = rule["id"]
if title is None:
- raise ValueError('No information found to create a title')
+ raise ValueError("No information found to create a title")
return textwrap.shorten(title, 150)
def get_snippet(result):
snippet = None
- if 'locations' in result:
- location = result['locations'][0]
- if 'physicalLocation' in location:
- if 'region' in location['physicalLocation']:
- if 'snippet' in location['physicalLocation']['region']:
- if 'text' in location['physicalLocation']['region']['snippet']:
- snippet = location['physicalLocation']['region']['snippet']['text']
- if snippet is None and 'contextRegion' in location['physicalLocation']:
- if 'snippet' in location['physicalLocation']['contextRegion']:
- if 'text' in location['physicalLocation']['contextRegion']['snippet']:
- snippet = location['physicalLocation']['contextRegion']['snippet']['text']
+ if "locations" in result:
+ location = result["locations"][0]
+ if "physicalLocation" in location:
+ if "region" in location["physicalLocation"]:
+ if "snippet" in location["physicalLocation"]["region"]:
+ if (
+ "text"
+ in location["physicalLocation"]["region"]["snippet"]
+ ):
+ snippet = location["physicalLocation"]["region"][
+ "snippet"
+ ]["text"]
+ if (
+ snippet is None
+ and "contextRegion" in location["physicalLocation"]
+ ):
+ if "snippet" in location["physicalLocation"]["contextRegion"]:
+ if (
+ "text"
+ in location["physicalLocation"]["contextRegion"][
+ "snippet"
+ ]
+ ):
+ snippet = location["physicalLocation"][
+ "contextRegion"
+ ]["snippet"]["text"]
return snippet
def get_codeFlowsDescription(codeFlows):
- description = ''
+ description = ""
for codeFlow in codeFlows:
- if 'threadFlows' not in codeFlow:
+ if "threadFlows" not in codeFlow:
continue
- for threadFlow in codeFlow['threadFlows']:
- if 'locations' not in threadFlow:
+ for threadFlow in codeFlow["threadFlows"]:
+ if "locations" not in threadFlow:
continue
- description = '**Code flow:**\n'
- for location in threadFlow['locations']:
- physicalLocation = location['location']['physicalLocation']
- region = physicalLocation['region']
- description += '\t' + physicalLocation['artifactLocation'][
- 'uri'] if 'byteOffset' in region else '\t' + physicalLocation['artifactLocation']['uri'] + ':' + str(
- region['startLine'])
- if 'startColumn' in region:
- description += ':' + str(region['startColumn'])
- if 'snippet' in region:
- description += '\t-\t' + region['snippet']['text']
- description += '\n'
+ description = "**Code flow:**\n"
+ for location in threadFlow["locations"]:
+ physicalLocation = location["location"]["physicalLocation"]
+ region = physicalLocation["region"]
+ description += (
+ "\t" + physicalLocation["artifactLocation"]["uri"]
+ if "byteOffset" in region
+ else "\t"
+ + physicalLocation["artifactLocation"]["uri"]
+ + ":"
+ + str(region["startLine"])
+ )
+ if "startColumn" in region:
+ description += ":" + str(region["startColumn"])
+ if "snippet" in region:
+ description += "\t-\t" + region["snippet"]["text"]
+ description += "\n"
return description
def get_description(result, rule):
- description = ''
- message = ''
- if 'message' in result:
- message = get_message_from_multiformatMessageString(result['message'], rule)
- description += '**Result message:** {}\n'.format(message)
+ description = ""
+ message = ""
+ if "message" in result:
+ message = get_message_from_multiformatMessageString(
+ result["message"], rule
+ )
+ description += "**Result message:** {}\n".format(message)
if get_snippet(result) is not None:
- description += '**Snippet:**\n```{}```\n'.format(get_snippet(result))
+ description += "**Snippet:**\n```{}```\n".format(get_snippet(result))
if rule is not None:
- if 'name' in rule:
- description += '**Rule name:** {}\n'.format(rule.get('name'))
- shortDescription = ''
- if 'shortDescription' in rule:
- shortDescription = get_message_from_multiformatMessageString(rule['shortDescription'], rule)
+ if "name" in rule:
+ description += "**Rule name:** {}\n".format(rule.get("name"))
+ shortDescription = ""
+ if "shortDescription" in rule:
+ shortDescription = get_message_from_multiformatMessageString(
+ rule["shortDescription"], rule
+ )
if shortDescription != message:
- description += '**Rule short description:** {}\n'.format(shortDescription)
- if 'fullDescription' in rule:
- fullDescription = get_message_from_multiformatMessageString(rule['fullDescription'], rule)
- if fullDescription != message and fullDescription != shortDescription:
- description += '**Rule full description:** {}\n'.format(fullDescription)
-
- if len(result.get('codeFlows', [])) > 0:
- description += get_codeFlowsDescription(result['codeFlows'])
-
- if description.endswith('\n'):
+ description += "**Rule short description:** {}\n".format(
+ shortDescription
+ )
+ if "fullDescription" in rule:
+ fullDescription = get_message_from_multiformatMessageString(
+ rule["fullDescription"], rule
+ )
+ if (
+ fullDescription != message
+ and fullDescription != shortDescription
+ ):
+ description += "**Rule full description:** {}\n".format(
+ fullDescription
+ )
+
+ if len(result.get("codeFlows", [])) > 0:
+ description += get_codeFlowsDescription(result["codeFlows"])
+
+ if description.endswith("\n"):
description = description[:-1]
return description
@@ -248,11 +287,13 @@ def get_description(result, rule):
def get_references(rule):
reference = None
if rule is not None:
- if 'helpUri' in rule:
- reference = rule['helpUri']
- elif 'help' in rule:
- helpText = get_message_from_multiformatMessageString(rule['help'], rule)
- if helpText.startswith('http'):
+ if "helpUri" in rule:
+ reference = rule["helpUri"]
+ elif "help" in rule:
+ helpText = get_message_from_multiformatMessageString(
+ rule["help"], rule
+ )
+ if helpText.startswith("http"):
reference = helpText
return reference
@@ -260,11 +301,11 @@ def get_references(rule):
def cvss_to_severity(cvss):
severity_mapping = {
- 1: 'Info',
- 2: 'Low',
- 3: 'Medium',
- 4: 'High',
- 5: 'Critical'
+ 1: "Info",
+ 2: "Low",
+ 3: "Medium",
+ 4: "High",
+ 5: "Critical",
}
if cvss >= 9:
@@ -280,30 +321,33 @@ def cvss_to_severity(cvss):
def get_severity(result, rule):
- severity = result.get('level')
+ severity = result.get("level")
if severity is None and rule is not None:
# get the severity from the rule
- if 'defaultConfiguration' in rule:
- severity = rule['defaultConfiguration'].get('level')
-
- if 'note' == severity:
- return 'Info'
- elif 'warning' == severity:
- return 'Medium'
- elif 'error' == severity:
- return 'High'
+ if "defaultConfiguration" in rule:
+ severity = rule["defaultConfiguration"].get("level")
+
+ if "note" == severity:
+ return "Info"
+ elif "warning" == severity:
+ return "Medium"
+ elif "error" == severity:
+ return "High"
else:
- return 'Medium'
+ return "Medium"
def get_item(result, rules, artifacts, run_date):
- # see https://docs.oasis-open.org/sarif/sarif/v2.1.0/csprd01/sarif-v2.1.0-csprd01.html / 3.27.9
- kind = result.get('kind', 'fail')
- if kind != 'fail':
+ # see
+ # https://docs.oasis-open.org/sarif/sarif/v2.1.0/csprd01/sarif-v2.1.0-csprd01.html
+ # / 3.27.9
+ kind = result.get("kind", "fail")
+ if kind != "fail":
return None
# if finding is suppressed, mark it as False Positive
- # Note: see https://docs.oasis-open.org/sarif/sarif/v2.0/csprd02/sarif-v2.0-csprd02.html#_Toc10127852
+ # Note: see
+ # https://docs.oasis-open.org/sarif/sarif/v2.0/csprd02/sarif-v2.0-csprd02.html#_Toc10127852
suppressed = False
if result.get("suppressions"):
suppressed = True
@@ -312,21 +356,21 @@ def get_item(result, rules, artifacts, run_date):
file_path = None
line = None
if "locations" in result:
- location = result['locations'][0]
- if 'physicalLocation' in location:
- file_path = location['physicalLocation']['artifactLocation']['uri']
+ location = result["locations"][0]
+ if "physicalLocation" in location:
+ file_path = location["physicalLocation"]["artifactLocation"]["uri"]
# 'region' attribute is optionnal
- if 'region' in location['physicalLocation']:
+ if "region" in location["physicalLocation"]:
# https://docs.oasis-open.org/sarif/sarif/v2.0/csprd02/sarif-v2.0-csprd02.html / 3.30.1
# need to check whether it is byteOffset
- if 'byteOffset' in location['physicalLocation']['region']:
+ if "byteOffset" in location["physicalLocation"]["region"]:
pass
else:
- line = location['physicalLocation']['region']['startLine']
+ line = location["physicalLocation"]["region"]["startLine"]
# test rule link
- rule = rules.get(result.get('ruleId'))
+ rule = rules.get(result.get("ruleId"))
finding = Finding(
title=get_title(result, rule),
@@ -341,20 +385,21 @@ def get_item(result, rules, artifacts, run_date):
references=get_references(rule),
)
- if 'ruleId' in result:
- finding.vuln_id_from_tool = result['ruleId']
+ if "ruleId" in result:
+ finding.vuln_id_from_tool = result["ruleId"]
# for now we only support when the id of the rule is a CVE
- if cve_try(result['ruleId']):
- finding.unsaved_vulnerability_ids = [cve_try(result['ruleId'])]
+ if cve_try(result["ruleId"]):
+ finding.unsaved_vulnerability_ids = [cve_try(result["ruleId"])]
# some time the rule id is here but the tool doesn't define it
if rule is not None:
cwes_extracted = get_rule_cwes(rule)
if len(cwes_extracted) > 0:
finding.cwe = cwes_extracted[-1]
- # Some tools such as GitHub or Grype return the severity in properties instead
- if 'properties' in rule and 'security-severity' in rule['properties']:
- cvss = float(rule['properties']['security-severity'])
+ # Some tools such as GitHub or Grype return the severity in properties
+ # instead
+ if "properties" in rule and "security-severity" in rule["properties"]:
+ cvss = float(rule["properties"]["security-severity"])
severity = cvss_to_severity(cvss)
finding.cvssv3_score = cvss
finding.severity = severity
@@ -366,7 +411,9 @@ def get_item(result, rules, artifacts, run_date):
# manage fixes provided in the report
if "fixes" in result:
- finding.mitigation = "\n".join([fix.get('description', {}).get("text") for fix in result["fixes"]])
+ finding.mitigation = "\n".join(
+ [fix.get("description", {}).get("text") for fix in result["fixes"]]
+ )
if run_date:
finding.date = run_date
@@ -378,16 +425,19 @@ def get_item(result, rules, artifacts, run_date):
# manage fingerprints
# fingerprinting in SARIF is more complete than in current implementation
# SARIF standard make it possible to have multiple version in the same report
- # for now we just take the first one and keep the format to be able to compare it
+ # for now we just take the first one and keep the format to be able to
+ # compare it
if result.get("fingerprints"):
hashes = get_fingerprints_hashes(result["fingerprints"])
first_item = next(iter(hashes.items()))
- finding.unique_id_from_tool = first_item[1]['value']
+ finding.unique_id_from_tool = first_item[1]["value"]
elif result.get("partialFingerprints"):
# for this one we keep an order to have id that could be compared
hashes = get_fingerprints_hashes(result["partialFingerprints"])
sorted_hashes = sorted(hashes.keys())
- finding.unique_id_from_tool = "|".join([f'{key}:{hashes[key]["value"]}' for key in sorted_hashes])
+ finding.unique_id_from_tool = "|".join(
+ [f'{key}:{hashes[key]["value"]}' for key in sorted_hashes]
+ )
return finding
diff --git a/dojo/tools/scantist/parser.py b/dojo/tools/scantist/parser.py
index e84fa1c2e8..d4b1e6c076 100644
--- a/dojo/tools/scantist/parser.py
+++ b/dojo/tools/scantist/parser.py
@@ -37,6 +37,7 @@ def get_items(self, tree, test):
test:
: purpose: parses input rawto extract dojo
"""
+
def get_findings(vuln, test):
"""
vuln : input vulnerable node
@@ -49,7 +50,7 @@ def get_findings(vuln, test):
component_name = vuln.get("Library")
component_version = vuln.get("Library Version")
- title = vulnerability_id + '|' + component_name
+ title = vulnerability_id + "|" + component_name
description = vuln.get("Description")
file_path = vuln.get("File Path", "")
@@ -65,12 +66,12 @@ def get_findings(vuln, test):
severity=severity,
cwe=cwe,
mitigation=mitigation,
- references=vuln.get('references'),
+ references=vuln.get("references"),
file_path=file_path,
component_name=component_name,
component_version=component_version,
- severity_justification=vuln.get('severity_justification'),
- dynamic_finding=True
+ severity_justification=vuln.get("severity_justification"),
+ dynamic_finding=True,
)
if vulnerability_id:
finding.unsaved_vulnerability_ids = [vulnerability_id]
@@ -82,7 +83,9 @@ def get_findings(vuln, test):
if item:
hash_key = hashlib.md5(
- node.get('Public ID').encode('utf-8') + node.get('Library').encode('utf-8')).hexdigest()
+ node.get("Public ID").encode("utf-8")
+ + node.get("Library").encode("utf-8")
+ ).hexdigest()
items[hash_key] = get_findings(node, test)
diff --git a/dojo/tools/scout_suite/__init__.py b/dojo/tools/scout_suite/__init__.py
index e2b8f0a3a1..237e49125f 100644
--- a/dojo/tools/scout_suite/__init__.py
+++ b/dojo/tools/scout_suite/__init__.py
@@ -1 +1 @@
-__author__ = 'Hasan Tayyar Besik'
+__author__ = "Hasan Tayyar Besik"
diff --git a/dojo/tools/scout_suite/parser.py b/dojo/tools/scout_suite/parser.py
index d66aca4583..038efd5251 100644
--- a/dojo/tools/scout_suite/parser.py
+++ b/dojo/tools/scout_suite/parser.py
@@ -1,4 +1,3 @@
-
import json
import textwrap
from datetime import datetime
@@ -26,8 +25,8 @@ def get_description_for_scan_types(self, scan_type):
def get_tests(self, scan_type, handle):
content = handle.read()
- if type(content) is bytes:
- content = content.decode('utf-8')
+ if isinstance(content, bytes):
+ content = content.decode("utf-8")
raw_data = content.replace("scoutsuite_results =", "")
data = json.loads(raw_data)
@@ -35,30 +34,49 @@ def get_tests(self, scan_type, handle):
last_run = data["last_run"]
test_description = ""
- test_description = "%s**Account:** `%s`\n" % (test_description, account_id)
- test_description = "%s**Provider:** %s\n" % (test_description, data["provider_name"])
- test_description = "%s**Ruleset:** `%s`\n" % (test_description, last_run["ruleset_name"])
- test_description = "%s**Ruleset Description:** %s\n" % (test_description, last_run["ruleset_about"])
+ test_description = "%s**Account:** `%s`\n" % (
+ test_description,
+ account_id,
+ )
+ test_description = "%s**Provider:** %s\n" % (
+ test_description,
+ data["provider_name"],
+ )
+ test_description = "%s**Ruleset:** `%s`\n" % (
+ test_description,
+ last_run["ruleset_name"],
+ )
+ test_description = "%s**Ruleset Description:** %s\n" % (
+ test_description,
+ last_run["ruleset_about"],
+ )
# Summary of Services
- test_description = "%s\n\n Services | Checked Items | Flagged Items | Max Level | Resource Count | Rules Count" % (test_description)
- test_description = "%s\n:---|---:|---:|---:|---:|---:" % (test_description)
+ test_description = (
+ "%s\n\n Services | Checked Items | Flagged Items | Max Level | Resource Count | Rules Count"
+ % (test_description)
+ )
+ test_description = "%s\n:---|---:|---:|---:|---:|---:" % (
+ test_description
+ )
for service, items in list(last_run["summary"].items()):
test_description += "\n"
- test_description += "|".join([
- service,
- str(items["checked_items"]),
- str(items["flagged_items"]),
- str(items["max_level"]),
- str(items["resources_count"]),
- str(items["rules_count"])
- ])
+ test_description += "|".join(
+ [
+ service,
+ str(items["checked_items"]),
+ str(items["flagged_items"]),
+ str(items["max_level"]),
+ str(items["resources_count"]),
+ str(items["rules_count"]),
+ ]
+ )
tests = list()
test = ParserTest(
name=self.ID,
type=data["provider_name"],
- version=last_run.get('version'),
+ version=last_run.get("version"),
)
test.description = test_description
@@ -68,8 +86,8 @@ def get_tests(self, scan_type, handle):
def get_findings(self, filename, test):
content = filename.read()
- if type(content) is bytes:
- content = content.decode('utf-8')
+ if isinstance(content, bytes):
+ content = content.decode("utf-8")
raw_data = content.replace("scoutsuite_results =", "")
data = json.loads(raw_data)
return self.__get_items(data)
@@ -79,7 +97,9 @@ def __get_items(self, data):
# get the date of the run
last_run_date = None
if "time" in data.get("last_run", {}):
- last_run_date = datetime.strptime(data["last_run"]["time"][0:10], "%Y-%m-%d").date()
+ last_run_date = datetime.strptime(
+ data["last_run"]["time"][0:10], "%Y-%m-%d"
+ ).date()
# Configured Services
for service_name in data["services"]:
@@ -87,15 +107,23 @@ def __get_items(self, data):
for finding_name in service_item.get("findings", []):
finding = service_item["findings"][finding_name]
for name in finding["items"]:
- description_text = finding.get("rationale", "") + "\n**Location:** " + name + "\n\n---\n"
- key = name.split('.')
+ description_text = (
+ finding.get("rationale", "")
+ + "\n**Location:** "
+ + name
+ + "\n\n---\n"
+ )
+ key = name.split(".")
i = 1
lookup = service_item
while i < len(key):
if key[i] in lookup:
- if (type(lookup[key[i]]) is dict):
+ if isinstance(lookup[key[i]], dict):
lookup = lookup[key[i]]
- if (key[i - 1] == "security_groups" or key[i - 1] == "PolicyDocument"):
+ if (
+ key[i - 1] == "security_groups"
+ or key[i - 1] == "PolicyDocument"
+ ):
break
i = i + 1
@@ -104,16 +132,20 @@ def __get_items(self, data):
self.item_data = ""
find = Finding(
- title=textwrap.shorten(finding['description'], 150),
+ title=textwrap.shorten(finding["description"], 150),
date=last_run_date,
cwe=1032, # Security Configuration Weaknesses, would like to fine tune
description=description_text,
severity=self.getCriticalityRating(finding["level"]),
mitigation=finding.get("remediation"),
- file_path=name, # we use file_path as a hack as there is no notion of "service" in finding today
+ file_path=name,
+ # we use file_path as a hack as there is no notion of
+ # "service" in finding today
dynamic_finding=False,
static_finding=True,
- vuln_id_from_tool=":".join([data["provider_code"], finding_name]),
+ vuln_id_from_tool=":".join(
+ [data["provider_code"], finding_name]
+ ),
)
if finding.get("references"):
find.references = "\n".join(finding["references"])
@@ -127,8 +159,8 @@ def formatview(self, depth):
else:
return ""
- def recursive_print(self, src, depth=0, key=''):
- tabs = lambda n: ' ' * n * 2
+ def recursive_print(self, src, depth=0, key=""):
+ def tabs(n): return " " * n * 2
if isinstance(src, dict):
for key, value in src.items():
if isinstance(src, str):
@@ -141,9 +173,15 @@ def recursive_print(self, src, depth=0, key=''):
if self.pdepth != depth:
self.item_data = self.item_data + "\n"
if key:
- self.item_data = self.item_data + self.formatview(depth) + '**%s:** %s\n\n' % (key.title(), src)
+ self.item_data = (
+ self.item_data
+ + self.formatview(depth)
+ + "**%s:** %s\n\n" % (key.title(), src)
+ )
else:
- self.item_data = self.item_data + self.formatview(depth) + '%s\n' % src
+ self.item_data = (
+ self.item_data + self.formatview(depth) + "%s\n" % src
+ )
self.pdepth = depth
# Criticality rating
diff --git a/dojo/tools/semgrep/parser.py b/dojo/tools/semgrep/parser.py
index e9a04336d4..12a95e0557 100644
--- a/dojo/tools/semgrep/parser.py
+++ b/dojo/tools/semgrep/parser.py
@@ -4,7 +4,6 @@
class SemgrepParser(object):
-
def get_scan_types(self):
return ["Semgrep JSON Report"]
@@ -25,7 +24,7 @@ def get_findings(self, filename, test):
title=item["check_id"],
severity=self.convert_severity(item["extra"]["severity"]),
description=self.get_description(item),
- file_path=item['path'],
+ file_path=item["path"],
line=item["start"]["line"],
static_finding=True,
dynamic_finding=False,
@@ -34,26 +33,40 @@ def get_findings(self, filename, test):
)
# manage CWE
- if 'cwe' in item["extra"]["metadata"]:
+ if "cwe" in item["extra"]["metadata"]:
if isinstance(item["extra"]["metadata"].get("cwe"), list):
- finding.cwe = int(item["extra"]["metadata"].get("cwe")[0].partition(':')[0].partition('-')[2])
+ finding.cwe = int(
+ item["extra"]["metadata"]
+ .get("cwe")[0]
+ .partition(":")[0]
+ .partition("-")[2]
+ )
else:
- finding.cwe = int(item["extra"]["metadata"].get("cwe").partition(':')[0].partition('-')[2])
+ finding.cwe = int(
+ item["extra"]["metadata"]
+ .get("cwe")
+ .partition(":")[0]
+ .partition("-")[2]
+ )
# manage references from metadata
- if 'references' in item["extra"]["metadata"]:
- finding.references = "\n".join(item["extra"]["metadata"]["references"])
+ if "references" in item["extra"]["metadata"]:
+ finding.references = "\n".join(
+ item["extra"]["metadata"]["references"]
+ )
# manage mitigation from metadata
- if 'fix' in item["extra"]:
+ if "fix" in item["extra"]:
finding.mitigation = item["extra"]["fix"]
- elif 'fix_regex' in item["extra"]:
- finding.mitigation = "\n".join([
- "**You can automaticaly apply this regex:**",
- "\n```\n",
- json.dumps(item["extra"]["fix_regex"]),
- "\n```\n",
- ])
+ elif "fix_regex" in item["extra"]:
+ finding.mitigation = "\n".join(
+ [
+ "**You can automaticaly apply this regex:**",
+ "\n```\n",
+ json.dumps(item["extra"]["fix_regex"]),
+ "\n```\n",
+ ]
+ )
dupe_key = finding.title + finding.file_path + str(finding.line)
@@ -76,13 +89,13 @@ def convert_severity(self, val):
raise ValueError(f"Unknown value for severity: {val}")
def get_description(self, item):
- description = ''
+ description = ""
message = item["extra"]["message"]
- description += '**Result message:** {}\n'.format(message)
+ description += "**Result message:** {}\n".format(message)
snippet = item["extra"].get("lines")
if snippet is not None:
- description += '**Snippet:**\n```{}```\n'.format(snippet)
+ description += "**Snippet:**\n```{}```\n".format(snippet)
return description
diff --git a/dojo/tools/skf/__init__.py b/dojo/tools/skf/__init__.py
index 56a56d5116..ad180af05d 100644
--- a/dojo/tools/skf/__init__.py
+++ b/dojo/tools/skf/__init__.py
@@ -1 +1 @@
-__author__ = 'martin.marsicano'
+__author__ = "martin.marsicano"
diff --git a/dojo/tools/skf/parser.py b/dojo/tools/skf/parser.py
index c8d15250b1..8200075693 100644
--- a/dojo/tools/skf/parser.py
+++ b/dojo/tools/skf/parser.py
@@ -7,7 +7,6 @@
class ColumnMappingStrategy(object):
-
mapped_column = None
def __init__(self):
@@ -17,27 +16,29 @@ def map_column_value(self, finding, column_value):
pass
def process_column(self, column_name, column_value, finding):
-
- if column_name.lower() == self.mapped_column and column_value is not None:
+ if (
+ column_name.lower() == self.mapped_column
+ and column_value is not None
+ ):
self.map_column_value(finding, column_value)
elif self.successor is not None:
self.successor.process_column(column_name, column_value, finding)
class DateColumnMappingStrategy(ColumnMappingStrategy):
-
def __init__(self):
- self.mapped_column = 'date'
+ self.mapped_column = "date"
super(DateColumnMappingStrategy, self).__init__()
def map_column_value(self, finding, column_value):
- finding.date = datetime.strptime(column_value, '%Y-%m-%d %H:%M:%S').date()
+ finding.date = datetime.strptime(
+ column_value, "%Y-%m-%d %H:%M:%S"
+ ).date()
class TitleColumnMappingStrategy(ColumnMappingStrategy):
-
def __init__(self):
- self.mapped_column = 'title'
+ self.mapped_column = "title"
super(TitleColumnMappingStrategy, self).__init__()
def map_column_value(self, finding, column_value):
@@ -45,9 +46,8 @@ def map_column_value(self, finding, column_value):
class DescriptionColumnMappingStrategy(ColumnMappingStrategy):
-
def __init__(self):
- self.mapped_column = 'description'
+ self.mapped_column = "description"
super(DescriptionColumnMappingStrategy, self).__init__()
def map_column_value(self, finding, column_value):
@@ -55,9 +55,8 @@ def map_column_value(self, finding, column_value):
class MitigationColumnMappingStrategy(ColumnMappingStrategy):
-
def __init__(self):
- self.mapped_column = 'mitigation'
+ self.mapped_column = "mitigation"
super(MitigationColumnMappingStrategy, self).__init__()
def map_column_value(self, finding, column_value):
@@ -65,7 +64,6 @@ def map_column_value(self, finding, column_value):
class SKFParser(object):
-
def get_scan_types(self):
return ["SKF Scan"]
@@ -95,18 +93,20 @@ def read_column_names(self, column_names, row):
def get_findings(self, filename, test):
content = filename.read()
- if type(content) is bytes:
- content = content.decode('utf-8')
+ if isinstance(content, bytes):
+ content = content.decode("utf-8")
column_names = dict()
chain = self.create_chain()
row_number = 0
- reader = csv.reader(io.StringIO(content), delimiter=',', quotechar='"', escapechar='\\')
+ reader = csv.reader(
+ io.StringIO(content), delimiter=",", quotechar='"', escapechar="\\"
+ )
dupes = dict()
for row in reader:
finding = Finding(test=test)
- finding.severity = 'Info'
+ finding.severity = "Info"
if row_number == 0:
self.read_column_names(column_names, row)
@@ -115,11 +115,21 @@ def get_findings(self, filename, test):
column_number = 0
for column in row:
- chain.process_column(column_names[column_number], column, finding)
+ chain.process_column(
+ column_names[column_number], column, finding
+ )
column_number += 1
if finding is not None:
- key = hashlib.sha256(str(finding.severity + '|' + finding.title + '|' + finding.description).encode('utf-8')).hexdigest()
+ key = hashlib.sha256(
+ str(
+ finding.severity
+ + "|"
+ + finding.title
+ + "|"
+ + finding.description
+ ).encode("utf-8")
+ ).hexdigest()
if key not in dupes:
dupes[key] = finding
diff --git a/dojo/tools/snyk/parser.py b/dojo/tools/snyk/parser.py
index 304935ba20..0918fc7f11 100755
--- a/dojo/tools/snyk/parser.py
+++ b/dojo/tools/snyk/parser.py
@@ -5,7 +5,6 @@
class SnykParser(object):
-
def get_scan_types(self):
return ["Snyk Scan"]
@@ -16,7 +15,6 @@ def get_description_for_scan_types(self, scan_type):
return "Snyk output file (snyk test --json > snyk.json) can be imported in JSON format."
def get_findings(self, json_output, test):
-
reportTree = self.parse_json(json_output)
if isinstance(reportTree, list):
@@ -34,58 +32,72 @@ def parse_json(self, json_output):
try:
data = json_output.read()
try:
- tree = json.loads(str(data, 'utf-8'))
- except:
+ tree = json.loads(str(data, "utf-8"))
+ except Exception:
tree = json.loads(data)
- except:
+ except Exception:
raise ValueError("Invalid format")
return tree
def get_items(self, tree, test):
items = {}
- target_file = tree.get('displayTargetFile', None)
- upgrades = tree.get('remediation', {}).get('upgrade', None)
- if 'vulnerabilities' in tree:
- vulnerabilityTree = tree['vulnerabilities']
+ target_file = tree.get("displayTargetFile", None)
+ upgrades = tree.get("remediation", {}).get("upgrade", None)
+ if "vulnerabilities" in tree:
+ vulnerabilityTree = tree["vulnerabilities"]
for node in vulnerabilityTree:
- item = self.get_item(node, test, target_file=target_file, upgrades=upgrades)
- unique_key = node['title'] + str(node['packageName'] + str(
- node['version']) + str(node['from']) + str(node['id']))
+ item = self.get_item(
+ node, test, target_file=target_file, upgrades=upgrades
+ )
+ unique_key = node["title"] + str(
+ node["packageName"]
+ + str(node["version"])
+ + str(node["from"])
+ + str(node["id"])
+ )
items[unique_key] = item
return list(items.values())
def get_item(self, vulnerability, test, target_file=None, upgrades=None):
-
# vulnerable and unaffected versions can be in string format for a single vulnerable version,
# or an array for multiple versions depending on the language.
- if isinstance(vulnerability['semver']['vulnerable'], list):
- vulnerable_versions = ", ".join(vulnerability['semver']['vulnerable'])
+ if isinstance(vulnerability["semver"]["vulnerable"], list):
+ vulnerable_versions = ", ".join(
+ vulnerability["semver"]["vulnerable"]
+ )
else:
- vulnerable_versions = vulnerability['semver']['vulnerable']
+ vulnerable_versions = vulnerability["semver"]["vulnerable"]
# Following the CVSS Scoring per https://nvd.nist.gov/vuln-metrics/cvss
- if 'cvssScore' in vulnerability:
- if vulnerability['cvssScore'] is None:
- severity = vulnerability['severity'].title()
- # If we're dealing with a license finding, there will be no cvssScore
- elif vulnerability['cvssScore'] <= 3.9:
+ if "cvssScore" in vulnerability:
+ if vulnerability["cvssScore"] is None:
+ severity = vulnerability["severity"].title()
+ # If we're dealing with a license finding, there will be no
+ # cvssScore
+ elif vulnerability["cvssScore"] <= 3.9:
severity = "Low"
- elif vulnerability['cvssScore'] >= 4.0 and vulnerability['cvssScore'] <= 6.9:
+ elif (
+ vulnerability["cvssScore"] >= 4.0
+ and vulnerability["cvssScore"] <= 6.9
+ ):
severity = "Medium"
- elif vulnerability['cvssScore'] >= 7.0 and vulnerability['cvssScore'] <= 8.9:
+ elif (
+ vulnerability["cvssScore"] >= 7.0
+ and vulnerability["cvssScore"] <= 8.9
+ ):
severity = "High"
else:
severity = "Critical"
else:
# Re-assign 'severity' directly
- severity = vulnerability['severity'].title()
+ severity = vulnerability["severity"].title()
# Construct "file_path" removing versions
- vulnPath = ''
- for index, item in enumerate(vulnerability['from']):
+ vulnPath = ""
+ for index, item in enumerate(vulnerability["from"]):
if index == 0:
vulnPath += "@".join(item.split("@")[0:-1])
else:
@@ -93,19 +105,28 @@ def get_item(self, vulnerability, test, target_file=None, upgrades=None):
# create the finding object
finding = Finding(
- title=vulnerability['from'][0] + ": " + vulnerability['title'],
+ title=vulnerability["from"][0] + ": " + vulnerability["title"],
test=test,
severity=severity,
- severity_justification="Issue severity of: **" + severity + "** from a base " +
- "CVSS score of: **" + str(vulnerability.get('cvssScore')) + "**",
- description="## Component Details\n - **Vulnerable Package**: " +
- vulnerability['packageName'] + "\n- **Current Version**: " + str(
- vulnerability['version']) + "\n- **Vulnerable Version(s)**: " +
- vulnerable_versions + "\n- **Vulnerable Path**: " + " > ".join(
- vulnerability['from']) + "\n" + vulnerability['description'],
+ severity_justification="Issue severity of: **"
+ + severity
+ + "** from a base "
+ + "CVSS score of: **"
+ + str(vulnerability.get("cvssScore"))
+ + "**",
+ description="## Component Details\n - **Vulnerable Package**: "
+ + vulnerability["packageName"]
+ + "\n- **Current Version**: "
+ + str(vulnerability["version"])
+ + "\n- **Vulnerable Version(s)**: "
+ + vulnerable_versions
+ + "\n- **Vulnerable Path**: "
+ + " > ".join(vulnerability["from"])
+ + "\n"
+ + vulnerability["description"],
mitigation="A fix (if available) will be provided in the description.",
- component_name=vulnerability['packageName'],
- component_version=vulnerability['version'],
+ component_name=vulnerability["packageName"],
+ component_version=vulnerability["version"],
false_p=False,
duplicate=False,
out_of_scope=False,
@@ -113,42 +134,47 @@ def get_item(self, vulnerability, test, target_file=None, upgrades=None):
static_finding=True,
dynamic_finding=False,
file_path=vulnPath,
- vuln_id_from_tool=vulnerability['id'],
+ vuln_id_from_tool=vulnerability["id"],
)
finding.unsaved_tags = []
# CVSSv3 vector
- if vulnerability.get('CVSSv3'):
- finding.cvssv3 = CVSS3(vulnerability['CVSSv3']).clean_vector()
+ if vulnerability.get("CVSSv3"):
+ finding.cvssv3 = CVSS3(vulnerability["CVSSv3"]).clean_vector()
# manage CVE and CWE with idnitifiers
- cwe_references = ''
- if 'identifiers' in vulnerability:
- if 'CVE' in vulnerability['identifiers']:
- vulnerability_ids = vulnerability['identifiers']['CVE']
+ cwe_references = ""
+ if "identifiers" in vulnerability:
+ if "CVE" in vulnerability["identifiers"]:
+ vulnerability_ids = vulnerability["identifiers"]["CVE"]
if vulnerability_ids:
finding.unsaved_vulnerability_ids = vulnerability_ids
- if 'CWE' in vulnerability['identifiers']:
- cwes = vulnerability['identifiers']['CWE']
+ if "CWE" in vulnerability["identifiers"]:
+ cwes = vulnerability["identifiers"]["CWE"]
if cwes:
- # Per the current json format, if several CWEs, take the first one.
+ # Per the current json format, if several CWEs, take the
+ # first one.
finding.cwe = int(cwes[0].split("-")[1])
- if len(vulnerability['identifiers']['CVE']) > 1:
- cwe_references = ', '.join(cwes)
+ if len(vulnerability["identifiers"]["CVE"]) > 1:
+ cwe_references = ", ".join(cwes)
else:
finding.cwe = 1035
- references = ''
- if 'id' in vulnerability:
- references = "**SNYK ID**: https://app.snyk.io/vuln/{}\n\n".format(vulnerability['id'])
+ references = ""
+ if "id" in vulnerability:
+ references = "**SNYK ID**: https://app.snyk.io/vuln/{}\n\n".format(
+ vulnerability["id"]
+ )
if cwe_references:
- references += "Several CWEs were reported: \n\n{}\n".format(cwe_references)
+ references += "Several CWEs were reported: \n\n{}\n".format(
+ cwe_references
+ )
# Append vuln references to references section
- for item in vulnerability.get('references', []):
- references += "**" + item['title'] + "**: " + item['url'] + "\n"
+ for item in vulnerability.get("references", []):
+ references += "**" + item["title"] + "**: " + item["url"] + "\n"
finding.references = references
@@ -160,21 +186,30 @@ def get_item(self, vulnerability, test, target_file=None, upgrades=None):
# Add the remediation substring to mitigation section
if (remediation_index != -1) and (references_index != -1):
- finding.mitigation = finding.description[remediation_index:references_index]
+ finding.mitigation = finding.description[
+ remediation_index:references_index
+ ]
# Add Target file if supplied
if target_file:
- finding.unsaved_tags.append('target_file:{}'.format(target_file))
- finding.mitigation += '\nUpgrade Location: {}'.format(target_file)
+ finding.unsaved_tags.append("target_file:{}".format(target_file))
+ finding.mitigation += "\nUpgrade Location: {}".format(target_file)
# Add the upgrade libs list to the mitigation section
if upgrades:
for current_pack_version, meta_dict in upgrades.items():
- upgraded_pack = meta_dict['upgradeTo']
- tertiary_upgrade_list = meta_dict['upgrades']
- if any(lib.split('@')[0] in finding.mitigation for lib in tertiary_upgrade_list):
- finding.unsaved_tags.append('upgrade_to:{}'.format(upgraded_pack))
- finding.mitigation += '\nUpgrade from {} to {} to fix this issue, as well as updating the following:\n - '.format(current_pack_version, upgraded_pack)
- finding.mitigation += '\n - '.join(tertiary_upgrade_list)
+ upgraded_pack = meta_dict["upgradeTo"]
+ tertiary_upgrade_list = meta_dict["upgrades"]
+ if any(
+ lib.split("@")[0] in finding.mitigation
+ for lib in tertiary_upgrade_list
+ ):
+ finding.unsaved_tags.append(
+ "upgrade_to:{}".format(upgraded_pack)
+ )
+ finding.mitigation += "\nUpgrade from {} to {} to fix this issue, as well as updating the following:\n - ".format(
+ current_pack_version, upgraded_pack
+ )
+ finding.mitigation += "\n - ".join(tertiary_upgrade_list)
return finding
diff --git a/dojo/tools/solar_appscreener/parser.py b/dojo/tools/solar_appscreener/parser.py
index cc21ee81cf..093d476fd2 100644
--- a/dojo/tools/solar_appscreener/parser.py
+++ b/dojo/tools/solar_appscreener/parser.py
@@ -18,15 +18,15 @@ def get_description_for_scan_types(self, scan_type):
return "Solar Appscreener report file can be imported in CSV format from Detailed_Results.csv."
def get_findings(self, filename, test):
-
if filename is None:
return ()
content = filename.read()
- if type(content) is bytes:
- content = content.decode('utf-8')
- reader = csv.DictReader(io.StringIO(
- content), delimiter=',', quotechar='"')
+ if isinstance(content, bytes):
+ content = content.decode("utf-8")
+ reader = csv.DictReader(
+ io.StringIO(content), delimiter=",", quotechar='"'
+ )
csvarray = []
for row in reader:
@@ -35,14 +35,14 @@ def get_findings(self, filename, test):
items = list()
for row in csvarray:
finding = Finding(test=test)
- finding.title = row.get('Vulnerability', '')
- finding.description = row.get('Description', '')
- finding.mitigation = row.get('Recommendations')
- finding.references = row.get('Links')
- finding.severity = row.get('Severity Level', 'Info')
- finding.file_path = row.get('File')
- finding.sast_source_file_path = row.get('File')
- finding.line = row.get('Line')
+ finding.title = row.get("Vulnerability", "")
+ finding.description = row.get("Description", "")
+ finding.mitigation = row.get("Recommendations")
+ finding.references = row.get("Links")
+ finding.severity = row.get("Severity Level", "Info")
+ finding.file_path = row.get("File")
+ finding.sast_source_file_path = row.get("File")
+ finding.line = row.get("Line")
if finding.line:
if not finding.line.isdigit():
diff --git a/dojo/tools/sonarqube/parser.py b/dojo/tools/sonarqube/parser.py
index 24151d401e..d05c70d040 100644
--- a/dojo/tools/sonarqube/parser.py
+++ b/dojo/tools/sonarqube/parser.py
@@ -10,7 +10,6 @@
class SonarQubeParser(object):
-
mode = None
def set_mode(self, mode):
@@ -31,16 +30,23 @@ def get_description_for_scan_types(self, scan_type):
def get_findings(self, filename, test):
parser = etree.HTMLParser()
tree = etree.parse(filename, parser)
- if self.mode not in [None, 'detailed']:
- raise ValueError("Internal error: Invalid mode " + self.mode + ". Expected: one of None, 'detailed'")
+ if self.mode not in [None, "detailed"]:
+ raise ValueError(
+ "Internal error: Invalid mode "
+ + self.mode
+ + ". Expected: one of None, 'detailed'"
+ )
return self.get_items(tree, test, self.mode)
def get_items(self, tree, test, mode):
- # Check that there is at least one vulnerability (the vulnerabilities table is absent when no vuln are found)
- detailTbody = tree.xpath("/html/body/div[contains(@class,'detail')]/table/tbody")
+ # Check that there is at least one vulnerability (the vulnerabilities
+ # table is absent when no vuln are found)
+ detailTbody = tree.xpath(
+ "/html/body/div[contains(@class,'detail')]/table/tbody"
+ )
dupes = dict()
- if (len(detailTbody) == 2):
+ if len(detailTbody) == 2:
# First is "Detail of the Detected Vulnerabilities" (not present if no vuln)
# Second is "Known Security Rules"
vulnerabilities_table = list(detailTbody[0].iter("tr"))
@@ -57,18 +63,25 @@ def get_items(self, tree, test, mode):
for vuln in vulnerabilities_table:
vuln_properties = list(vuln.iter("td"))
vuln_rule_name = list(vuln_properties[0].iter("a"))[0].text
- vuln_severity = self.convert_sonar_severity(vuln_properties[1].text)
+ vuln_severity = self.convert_sonar_severity(
+ vuln_properties[1].text
+ )
vuln_file_path = vuln_properties[2].text
vuln_line = vuln_properties[3].text
vuln_title = vuln_properties[4].text
vuln_mitigation = vuln_properties[5].text
vuln_key = vuln_properties[6].text
if vuln_title is None or vuln_mitigation is None:
- raise ValueError("Parser ValueError: can't find a title or a mitigation for vulnerability of name " + vuln_rule_name)
+ raise ValueError(
+ "Parser ValueError: can't find a title or a mitigation for vulnerability of name "
+ + vuln_rule_name
+ )
try:
vuln_details = rulesDic[vuln_rule_name]
vuln_description = self.get_description(vuln_details)
- vuln_references = self.get_references(vuln_rule_name, vuln_details)
+ vuln_references = self.get_references(
+ vuln_rule_name, vuln_details
+ )
vuln_cwe = self.get_cwe(vuln_references)
except KeyError:
vuln_description = "No description provided"
@@ -76,34 +89,73 @@ def get_items(self, tree, test, mode):
vuln_cwe = 0
if mode is None:
self.process_result_file_name_aggregated(
- test, dupes, vuln_title, vuln_cwe, vuln_description, vuln_file_path, vuln_line, vuln_severity, vuln_mitigation, vuln_references)
+ test,
+ dupes,
+ vuln_title,
+ vuln_cwe,
+ vuln_description,
+ vuln_file_path,
+ vuln_line,
+ vuln_severity,
+ vuln_mitigation,
+ vuln_references,
+ )
else:
self.process_result_detailed(
- test, dupes, vuln_title, vuln_cwe, vuln_description, vuln_file_path, vuln_line, vuln_severity, vuln_mitigation, vuln_references, vuln_key)
+ test,
+ dupes,
+ vuln_title,
+ vuln_cwe,
+ vuln_description,
+ vuln_file_path,
+ vuln_line,
+ vuln_severity,
+ vuln_mitigation,
+ vuln_references,
+ vuln_key,
+ )
return list(dupes.values())
# Process one vuln from the report for "SonarQube Scan detailed"
# Create the finding and add it into the dupes list
- def process_result_detailed(self, test, dupes, vuln_title, vuln_cwe, vuln_description, vuln_file_path, vuln_line, vuln_severity, vuln_mitigation, vuln_references, vuln_key):
- # vuln_key is the unique id from tool which means that there is basically no aggregation except real duplicates
- aggregateKeys = "{}{}{}{}{}".format(vuln_cwe, vuln_title, vuln_description, vuln_file_path, vuln_key)
- find = Finding(title=vuln_title,
- cwe=int(vuln_cwe),
- description=vuln_description,
- file_path=vuln_file_path,
- line=vuln_line,
- test=test,
- severity=vuln_severity,
- mitigation=vuln_mitigation,
- references=vuln_references,
- false_p=False,
- duplicate=False,
- out_of_scope=False,
- mitigated=None,
- impact="No impact provided",
- static_finding=True,
- dynamic_finding=False,
- unique_id_from_tool=vuln_key)
+ def process_result_detailed(
+ self,
+ test,
+ dupes,
+ vuln_title,
+ vuln_cwe,
+ vuln_description,
+ vuln_file_path,
+ vuln_line,
+ vuln_severity,
+ vuln_mitigation,
+ vuln_references,
+ vuln_key,
+ ):
+ # vuln_key is the unique id from tool which means that there is
+ # basically no aggregation except real duplicates
+ aggregateKeys = "{}{}{}{}{}".format(
+ vuln_cwe, vuln_title, vuln_description, vuln_file_path, vuln_key
+ )
+ find = Finding(
+ title=vuln_title,
+ cwe=int(vuln_cwe),
+ description=vuln_description,
+ file_path=vuln_file_path,
+ line=vuln_line,
+ test=test,
+ severity=vuln_severity,
+ mitigation=vuln_mitigation,
+ references=vuln_references,
+ false_p=False,
+ duplicate=False,
+ out_of_scope=False,
+ mitigated=None,
+ impact="No impact provided",
+ static_finding=True,
+ dynamic_finding=False,
+ unique_id_from_tool=vuln_key,
+ )
dupes[aggregateKeys] = find
# Process one vuln from the report for "SonarQube Scan"
@@ -111,33 +163,58 @@ def process_result_detailed(self, test, dupes, vuln_title, vuln_cwe, vuln_descri
# For aggregated findings:
# - the description is enriched with each finding line number
# - the mitigation (message) is concatenated with each finding's mitigation value
- def process_result_file_name_aggregated(self, test, dupes, vuln_title, vuln_cwe, vuln_description, vuln_file_path, vuln_line, vuln_severity, vuln_mitigation, vuln_references):
- aggregateKeys = "{}{}{}{}".format(vuln_cwe, vuln_title, vuln_description, vuln_file_path)
+ def process_result_file_name_aggregated(
+ self,
+ test,
+ dupes,
+ vuln_title,
+ vuln_cwe,
+ vuln_description,
+ vuln_file_path,
+ vuln_line,
+ vuln_severity,
+ vuln_mitigation,
+ vuln_references,
+ ):
+ aggregateKeys = "{}{}{}{}".format(
+ vuln_cwe, vuln_title, vuln_description, vuln_file_path
+ )
descriptionOneOccurence = "Line: {}".format(vuln_line)
if aggregateKeys not in dupes:
- find = Finding(title=vuln_title,
- cwe=int(vuln_cwe),
- description=vuln_description + '\n\n-----\nOccurences:\n' + descriptionOneOccurence,
- file_path=vuln_file_path,
- # No line number because we have aggregated different vulnerabilities that may have different line numbers
- test=test,
- severity=vuln_severity,
- mitigation=vuln_mitigation,
- references=vuln_references,
- false_p=False,
- duplicate=False,
- out_of_scope=False,
- mitigated=None,
- impact="No impact provided",
- static_finding=True,
- dynamic_finding=False,
- nb_occurences=1)
+ find = Finding(
+ title=vuln_title,
+ cwe=int(vuln_cwe),
+ description=vuln_description
+ + "\n\n-----\nOccurences:\n"
+ + descriptionOneOccurence,
+ file_path=vuln_file_path,
+ # No line number because we have aggregated different
+ # vulnerabilities that may have different line numbers
+ test=test,
+ severity=vuln_severity,
+ mitigation=vuln_mitigation,
+ references=vuln_references,
+ false_p=False,
+ duplicate=False,
+ out_of_scope=False,
+ mitigated=None,
+ impact="No impact provided",
+ static_finding=True,
+ dynamic_finding=False,
+ nb_occurences=1,
+ )
dupes[aggregateKeys] = find
else:
- # We have already created a finding for this aggregate: updates the description, nb_occurences and mitigation (message field in the report which may vary for each vuln)
+ # We have already created a finding for this aggregate: updates the
+ # description, nb_occurences and mitigation (message field in the
+ # report which may vary for each vuln)
find = dupes[aggregateKeys]
- find.description = "{}\n{}".format(find.description, descriptionOneOccurence)
- find.mitigation = "{}\n______\n{}".format(find.mitigation, vuln_mitigation)
+ find.description = "{}\n{}".format(
+ find.description, descriptionOneOccurence
+ )
+ find.mitigation = "{}\n______\n{}".format(
+ find.mitigation, vuln_mitigation
+ )
find.nb_occurences = find.nb_occurences + 1
def convert_sonar_severity(self, sonar_severity):
@@ -154,7 +231,9 @@ def convert_sonar_severity(self, sonar_severity):
return "Info"
def get_description(self, vuln_details):
- rule_description = etree.tostring(vuln_details, pretty_print=True).decode('utf-8', errors='replace')
+ rule_description = etree.tostring(
+ vuln_details, pretty_print=True
+ ).decode("utf-8", errors="replace")
rule_description = rule_description.split("