From 5914a8d0721ec804661fd13a03551c9fe8f614a8 Mon Sep 17 00:00:00 2001 From: DefectDojo release bot Date: Mon, 3 Jul 2023 16:10:02 +0000 Subject: [PATCH 01/85] Update versions in application files --- components/package.json | 2 +- dojo/__init__.py | 2 +- helm/defectdojo/Chart.yaml | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/components/package.json b/components/package.json index 13cc2a40a3..4e8aea8365 100644 --- a/components/package.json +++ b/components/package.json @@ -1,6 +1,6 @@ { "name": "defectdojo", - "version": "2.24.0", + "version": "2.25.0-dev", "license" : "BSD-3-Clause", "private": true, "dependencies": { diff --git a/dojo/__init__.py b/dojo/__init__.py index 425c77274e..4c1f6f5856 100644 --- a/dojo/__init__.py +++ b/dojo/__init__.py @@ -4,6 +4,6 @@ # Django starts so that shared_task will use this app. from .celery import app as celery_app # noqa -__version__ = '2.24.0' +__version__ = '2.25.0-dev' __url__ = 'https://github.com/DefectDojo/django-DefectDojo' __docs__ = 'https://documentation.defectdojo.com' diff --git a/helm/defectdojo/Chart.yaml b/helm/defectdojo/Chart.yaml index 4781a3fd42..40ab2609ac 100644 --- a/helm/defectdojo/Chart.yaml +++ b/helm/defectdojo/Chart.yaml @@ -1,8 +1,8 @@ apiVersion: v2 -appVersion: "2.24.0" +appVersion: "2.25.0-dev" description: A Helm chart for Kubernetes to install DefectDojo name: defectdojo -version: 1.6.74 +version: 1.6.75-dev icon: https://www.defectdojo.org/img/favicon.ico maintainers: - name: madchap From 7d714d4fc0f4a791070ef1ad28db86c2a1a2e7af Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 3 Jul 2023 13:32:54 -0500 Subject: [PATCH 02/85] Update rabbitmq Docker tag from 3.12.0 to v3.12.1 (docker-compose.yml) (#8331) Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> --- docker-compose.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker-compose.yml b/docker-compose.yml index 24edcdf973..53ef096186 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -154,7 +154,7 @@ services: volumes: - defectdojo_postgres:/var/lib/postgresql/data rabbitmq: - image: rabbitmq:3.12.0-alpine@sha256:670efbfec7e9501ff877eca67f0653f421803269e113b4e8cf17cb5965ea415d + image: rabbitmq:3.12.1-alpine@sha256:8ec30b33b1bb517145a3c43cd0d377367db0fa4903650e1a026c541f15bfc9a8 profiles: - mysql-rabbitmq - postgres-rabbitmq From b58043674f07b91e05fdc6bbe3bd1faa5288b58e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 3 Jul 2023 13:36:35 -0500 Subject: [PATCH 03/85] Bump jira from 3.5.1 to 3.5.2 (#8329) Bumps [jira](https://github.com/pycontribs/jira) from 3.5.1 to 3.5.2. - [Release notes](https://github.com/pycontribs/jira/releases) - [Commits](https://github.com/pycontribs/jira/compare/3.5.1...3.5.2) --- updated-dependencies: - dependency-name: jira dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index f569e00ea2..deeb48ccb7 100644 --- a/requirements.txt +++ b/requirements.txt @@ -29,7 +29,7 @@ djangorestframework==3.14.0 gunicorn==20.1.0 html2text==2020.1.16 humanize==4.6.0 -jira==3.5.1 +jira==3.5.2 PyGithub==1.58.2 lxml==4.9.2 Markdown==3.4.3 From 81507a48f020573d9a6051d3b1426e3c009b9ce2 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 3 Jul 2023 13:37:40 -0500 Subject: [PATCH 04/85] Update release-drafter/release-drafter action from v5.23.0 to v5.24.0 (.github/workflows/release-drafter.yml) (#8322) Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> --- .github/workflows/release-drafter.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/release-drafter.yml b/.github/workflows/release-drafter.yml index 060891fac2..786d895516 100644 --- a/.github/workflows/release-drafter.yml +++ b/.github/workflows/release-drafter.yml @@ -19,7 +19,7 @@ jobs: update_release_draft: runs-on: ubuntu-latest steps: - - uses: release-drafter/release-drafter@v5.23.0 + - uses: release-drafter/release-drafter@v5.24.0 with: version: ${{github.event.inputs.version}} env: From c382877e9f39811c5a8efd459b4792b28c87c6df Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 3 Jul 2023 13:38:49 -0500 Subject: [PATCH 05/85] Bump vcrpy from 4.3.1 to 5.0.0 (#8316) Bumps [vcrpy](https://github.com/kevin1024/vcrpy) from 4.3.1 to 5.0.0. - [Release notes](https://github.com/kevin1024/vcrpy/releases) - [Changelog](https://github.com/kevin1024/vcrpy/blob/master/docs/changelog.rst) - [Commits](https://github.com/kevin1024/vcrpy/compare/v4.3.1...v5.0.0) --- updated-dependencies: - dependency-name: vcrpy dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index deeb48ccb7..a9a0e98fe0 100644 --- a/requirements.txt +++ b/requirements.txt @@ -64,7 +64,7 @@ JSON-log-formatter==0.5.2 django-split-settings==1.2.0 django-debug-toolbar==4.1.0 django-debug-toolbar-request-history==0.1.4 -vcrpy==4.3.1 +vcrpy==5.0.0 vcrpy-unittest==0.1.7 django-tagulous==1.3.3 PyJWT==2.7.0 From 40581b38d34cf95650f234536f5181a6e6868e5b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 3 Jul 2023 14:04:44 -0500 Subject: [PATCH 06/85] Bump humanize from 4.6.0 to 4.7.0 (#8324) Bumps [humanize](https://github.com/python-humanize/humanize) from 4.6.0 to 4.7.0. - [Release notes](https://github.com/python-humanize/humanize/releases) - [Commits](https://github.com/python-humanize/humanize/compare/4.6.0...4.7.0) --- updated-dependencies: - dependency-name: humanize dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index a9a0e98fe0..b4893a8e66 100644 --- a/requirements.txt +++ b/requirements.txt @@ -28,7 +28,7 @@ Django==4.1.9 djangorestframework==3.14.0 gunicorn==20.1.0 html2text==2020.1.16 -humanize==4.6.0 +humanize==4.7.0 jira==3.5.2 PyGithub==1.58.2 lxml==4.9.2 From 3abcb71f289dab8cefdab8e4272d2dffd60b786d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 3 Jul 2023 14:51:27 -0500 Subject: [PATCH 07/85] Bump boto3 from 1.26.159 to 1.26.165 (#8336) Bumps [boto3](https://github.com/boto/boto3) from 1.26.159 to 1.26.165. - [Release notes](https://github.com/boto/boto3/releases) - [Changelog](https://github.com/boto/boto3/blob/develop/CHANGELOG.rst) - [Commits](https://github.com/boto/boto3/compare/1.26.159...1.26.165) --- updated-dependencies: - dependency-name: boto3 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index b4893a8e66..48ef7948f2 100644 --- a/requirements.txt +++ b/requirements.txt @@ -78,7 +78,7 @@ django-ratelimit==4.0.0 argon2-cffi==21.3.0 blackduck==1.1.0 pycurl==7.45.2 # Required for Celery Broker AWS (SQS) support -boto3==1.26.159 # Required for Celery Broker AWS (SQS) support +boto3==1.26.165 # Required for Celery Broker AWS (SQS) support netaddr==0.8.0 vulners==2.0.10 fontawesomefree==6.4.0 From 94097d6519b7d33ed0e3800b499cf622d6a102df Mon Sep 17 00:00:00 2001 From: kiblik Date: Mon, 3 Jul 2023 20:12:43 +0000 Subject: [PATCH 08/85] Fix: STATICFILES_DIRS warning (#8252) --- Dockerfile.django-alpine | 5 ++++- Dockerfile.django-debian | 5 ++++- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/Dockerfile.django-alpine b/Dockerfile.django-alpine index 88c90df9c9..f777e41722 100644 --- a/Dockerfile.django-alpine +++ b/Dockerfile.django-alpine @@ -106,7 +106,10 @@ RUN \ chown ${appuser} /var/run/${appuser} && \ chmod g=u /var/run/${appuser} && \ chmod 775 /*.sh && \ - mkdir -p media/threat && chown -R ${uid} media + mkdir -p media/threat && chown -R ${uid} media && \ + # To avoid warning: (staticfiles.W004) The directory '/app/components/node_modules' in the STATICFILES_DIRS setting does not exist. + mkdir -p components/node_modules && \ + chown ${appuser} components/node_modules USER ${uid} ENV \ # Only variables that are not defined in settings.dist.py diff --git a/Dockerfile.django-debian b/Dockerfile.django-debian index e03ea67e86..cac385b199 100644 --- a/Dockerfile.django-debian +++ b/Dockerfile.django-debian @@ -111,7 +111,10 @@ RUN \ chown ${appuser} /var/run/${appuser} && \ chmod g=u /var/run/${appuser} && \ chmod 775 /*.sh && \ - mkdir -p media/threat && chown -R ${uid} media + mkdir -p media/threat && chown -R ${uid} media && \ + # To avoid warning: (staticfiles.W004) The directory '/app/components/node_modules' in the STATICFILES_DIRS setting does not exist. + mkdir -p components/node_modules && \ + chown ${appuser} components/node_modules USER ${uid} ENV \ # Only variables that are not defined in settings.dist.py From 140e006c95927f8a41611064a034d0e383b0c32f Mon Sep 17 00:00:00 2001 From: Alejandro Tortolero Date: Mon, 3 Jul 2023 15:14:00 -0500 Subject: [PATCH 09/85] Update files with PEP8 standards in folder dojo/tools #004 (#8304) * Update files in folder dojo/tools/hydra with PEP8 standars. * Update files in folder dojo/tools/huskyci with PEP8 standars. * Update files in folder dojo/tools/ibm_app with PEP8 standars. * Update files in folder dojo/tools/immuniweb with PEP8 standars. * Update files in folder dojo/tools/intsights with PEP8 standars. * Update files in folder dojo/tools/jfrog_xray_api_summary_artifact with PEP8 standars. * Update files in folder dojo/tools/jfrog_xray_unified with PEP8 standars. * Update files in folder dojo/tools/jfrogxray with PEP8 standars. * Update files in folder dojo/tools/kics with PEP8 standars. * Update files in folder dojo/tools/kiuwan with PEP8 standars. * Update files in folder dojo/tools/meterian with PEP8 standars. * Update files in folder dojo/tools/microfocus_webinspect with PEP8 standars. * Update files in folder dojo/tools/mobsfscan with PEP8 standars. * Change BaseException to Exception --- dojo/tools/huskyci/parser.py | 37 ++-- dojo/tools/hydra/parser.py | 67 ++++-- dojo/tools/ibm_app/parser.py | 123 ++++++----- dojo/tools/immuniweb/parser.py | 49 +++-- dojo/tools/intsights/parser.py | 194 +++++++++++------- .../jfrog_xray_api_summary_artifact/parser.py | 105 ++++++---- dojo/tools/jfrog_xray_unified/parser.py | 94 +++++---- dojo/tools/jfrogxray/parser.py | 119 +++++++---- dojo/tools/kics/parser.py | 44 ++-- dojo/tools/kiuwan/parser.py | 106 ++++++---- dojo/tools/kubebench/parser.py | 111 +++++----- dojo/tools/meterian/parser.py | 70 +++++-- dojo/tools/microfocus_webinspect/parser.py | 69 ++++--- dojo/tools/mobsfscan/parser.py | 44 ++-- 14 files changed, 768 insertions(+), 464 deletions(-) diff --git a/dojo/tools/huskyci/parser.py b/dojo/tools/huskyci/parser.py index 24d5639640..455204bd52 100644 --- a/dojo/tools/huskyci/parser.py +++ b/dojo/tools/huskyci/parser.py @@ -19,7 +19,6 @@ def get_description_for_scan_types(self, scan_type): return "Import HuskyCI Report vulnerabilities in JSON format." def get_findings(self, json_output, test): - if json_output is None: return @@ -31,10 +30,10 @@ def parse_json(self, json_output): try: data = json_output.read() try: - tree = json.loads(str(data, 'utf-8')) - except: + tree = json.loads(str(data, "utf-8")) + except Exception: tree = json.loads(data) - except: + except Exception: raise ValueError("Invalid format") return tree @@ -42,18 +41,19 @@ def parse_json(self, json_output): def get_items(self, tree, test): items = {} - for language in tree.get('huskyciresults', {}): - tools_results = tree['huskyciresults'][language] + for language in tree.get("huskyciresults", {}): + tools_results = tree["huskyciresults"][language] for tool in tools_results: severity_results = tools_results[tool] for severity in severity_results: vulns = severity_results[severity] for vuln in vulns: - vuln['severity'] = severity[0:-5].lower().capitalize() - if vuln['severity'] not in ('High', 'Medium', 'Low'): + vuln["severity"] = severity[0:-5].lower().capitalize() + if vuln["severity"] not in ("High", "Medium", "Low"): continue unique_key = hashlib.md5( - str(vuln).encode('utf-8')).hexdigest() + str(vuln).encode("utf-8") + ).hexdigest() item = get_item(vuln, test) items[unique_key] = item @@ -62,21 +62,21 @@ def get_items(self, tree, test): def get_item(item_node, test): # description - description = item_node.get('details', '') - if 'code' in item_node: + description = item_node.get("details", "") + if "code" in item_node: description += "\nCode: " + item_node.get("code") - if 'confidence' in item_node: + if "confidence" in item_node: description += "\nConfidence: " + item_node.get("confidence") - if 'securitytool' in item_node: + if "securitytool" in item_node: description += "\nSecurity Tool: " + item_node.get("securitytool") finding = Finding( - title=item_node.get('title'), + title=item_node.get("title"), test=test, - severity=item_node.get('severity'), + severity=item_node.get("severity"), description=description, - mitigation='N/A', - references='', + mitigation="N/A", + references="", false_p=False, duplicate=False, out_of_scope=False, @@ -85,6 +85,7 @@ def get_item(item_node, test): line=item_node.get("line"), static_finding=True, dynamic_finding=False, - impact="No impact provided") + impact="No impact provided" + ) return finding diff --git a/dojo/tools/hydra/parser.py b/dojo/tools/hydra/parser.py index cd4767f96f..f24160ac7f 100644 --- a/dojo/tools/hydra/parser.py +++ b/dojo/tools/hydra/parser.py @@ -10,12 +10,14 @@ class HydraScanMetadata: def __init__(self, generator): - self.date = generator.get('built', ) - self.command = generator.get('commandline') - self.schema_version = generator.get('jsonoutputversion') - self.service_type = generator.get('service') - self.tool_version = generator.get('version') - self.server = generator.get('server') + self.date = generator.get( + "built", + ) + self.command = generator.get("commandline") + self.schema_version = generator.get("jsonoutputversion") + self.service_type = generator.get("service") + self.tool_version = generator.get("version") + self.server = generator.get("server") class HydraParser(object): @@ -40,7 +42,9 @@ def get_findings(self, json_output, test): return findings - def __extract_findings(self, raw_findings, metadata: HydraScanMetadata, test): + def __extract_findings( + self, raw_findings, metadata: HydraScanMetadata, test + ): findings = [] for raw_finding in raw_findings: @@ -48,28 +52,47 @@ def __extract_findings(self, raw_findings, metadata: HydraScanMetadata, test): finding = self.__extract_finding(raw_finding, metadata, test) findings.append(finding) except ValueError: - logger.warning('Error when digesting a finding from hydra! Please revise supplied report, vital information was missing (e.g. host)!') + logger.warning( + "Error when digesting a finding from hydra! Please revise supplied report, vital information was missing (e.g. host)!" + ) return findings - def __extract_finding(self, raw_finding, metadata: HydraScanMetadata, test) -> Finding: - host = raw_finding.get('host') - port = raw_finding.get('port') - username = raw_finding.get('login') - password = raw_finding.get('password') - - if (host is None) or (port is None) or (username is None) or (password is None): - raise ValueError("Vital information is missing for this finding! Skipping this finding!") + def __extract_finding( + self, raw_finding, metadata: HydraScanMetadata, test + ) -> Finding: + host = raw_finding.get("host") + port = raw_finding.get("port") + username = raw_finding.get("login") + password = raw_finding.get("password") + + if ( + (host is None) + or (port is None) + or (username is None) + or (password is None) + ): + raise ValueError( + "Vital information is missing for this finding! Skipping this finding!" + ) finding = Finding( test=test, title="Weak username / password combination found for " + host, - date=parse_datetime(metadata.date) if metadata.date else date.today(), + date=parse_datetime(metadata.date) + if metadata.date + else date.today(), severity="High", - description=host + " on port " + str(port) + " is allowing logins with easy to guess username " + username + " and password " + password, + description=host + + " on port " + + str(port) + + " is allowing logins with easy to guess username " + + username + + " and password " + + password, static_finding=False, dynamic_finding=True, - service=metadata.service_type, + service=metadata.service_type ) finding.unsaved_endpoints = [Endpoint(host=host, port=port)] @@ -79,7 +102,9 @@ def __extract_finding(self, raw_finding, metadata: HydraScanMetadata, test) -> F def __parse_json(json_output): report = json.load(json_output) - if 'generator' not in report or 'results' not in report: - raise ValueError("Unexpected JSON format provided. That doesn't look like a Hydra scan!") + if "generator" not in report or "results" not in report: + raise ValueError( + "Unexpected JSON format provided. That doesn't look like a Hydra scan!" + ) return report diff --git a/dojo/tools/ibm_app/parser.py b/dojo/tools/ibm_app/parser.py index 8a1c52fc16..8e4147a228 100644 --- a/dojo/tools/ibm_app/parser.py +++ b/dojo/tools/ibm_app/parser.py @@ -9,7 +9,6 @@ class IbmAppParser(object): - def get_scan_types(self): return ["IBM AppScan DAST"] @@ -20,19 +19,20 @@ def get_description_for_scan_types(self, scan_type): return "XML file from IBM App Scanner." def get_findings(self, file, test): - ibm_scan_tree = ElementTree.parse(file) root = ibm_scan_tree.getroot() # validate XML file - if 'xml-report' not in root.tag: - raise ValueError("This does not look like a valid expected Ibm AppScan DAST XML file.") + if "xml-report" not in root.tag: + raise ValueError( + "This does not look like a valid expected Ibm AppScan DAST XML file." + ) - issue_list = [] # self.hosts = self.fetch_host_details() issue_types = self.fetch_issue_types(root) dupes = dict() - # Now time to loop through individual issues and perform necessary actions + # Now time to loop through individual issues and perform necessary + # actions for issue in root.iter("issue-group"): for item in issue.iter("item"): ref_link = "" @@ -40,34 +40,52 @@ def get_findings(self, file, test): recommendation_data = "" issue_data = issue_types[item.find("issue-type/ref").text] - name = issue_data['name'] + name = issue_data["name"] # advisory = issue_data['advisory'] - vulnerability_id = issue_data.get('cve') + vulnerability_id = issue_data.get("cve") - cwe = issue_data.get('cwe') + cwe = issue_data.get("cwe") if cwe: cwe = int(cwe) - url = self.get_url(root, item.find('url/ref').text) - - severity = item.find('severity').text.capitalize() - if severity == 'Informational': - severity = 'Info' - issue_description = self.fetch_advisory_group(root, issue_data['advisory']) - - for fix_recommendation_group in root.iter("fix-recommendation-group"): - for recommendation in fix_recommendation_group.iter("item"): - if recommendation.attrib['id'] == issue_data["fix-recommendation"]: - data = recommendation.find("general/fixRecommendation") + url = self.get_url(root, item.find("url/ref").text) + + severity = item.find("severity").text.capitalize() + if severity == "Informational": + severity = "Info" + issue_description = self.fetch_advisory_group( + root, issue_data["advisory"] + ) + + for fix_recommendation_group in root.iter( + "fix-recommendation-group" + ): + for recommendation in fix_recommendation_group.iter( + "item" + ): + if ( + recommendation.attrib["id"] + == issue_data["fix-recommendation"] + ): + data = recommendation.find( + "general/fixRecommendation" + ) for data_text in data.iter("text"): - recommendation_data += data_text.text + "\n" # some texts are being repeated + recommendation_data += ( + data_text.text + "\n" + ) # some texts are being repeated - for link in data.iter('link'): + for link in data.iter("link"): if link is not None: ref_link += link.text + "\n" - # Now time to start assigning issues to findings and endpoints - dupe_key = hashlib.md5(str(issue_description + name + severity).encode('utf-8')).hexdigest() + # Now time to start assigning issues to findings and + # endpoints + dupe_key = hashlib.md5( + str(issue_description + name + severity).encode( + "utf-8" + ) + ).hexdigest() # check if finding is a duplicate if dupe_key in dupes: finding = dupes[dupe_key] # fetch finding @@ -75,24 +93,31 @@ def get_findings(self, file, test): finding.description += issue_description else: # finding is not a duplicate # create finding - finding = Finding(title=name, - test=test, - cwe=cwe, - description=issue_description, - severity=severity, - mitigation=recommendation_data, - references=ref_link, - dynamic_finding=True) + finding = Finding( + title=name, + test=test, + cwe=cwe, + description=issue_description, + severity=severity, + mitigation=recommendation_data, + references=ref_link, + dynamic_finding=True + ) if vulnerability_id: - finding.unsaved_vulnerability_ids = [vulnerability_id] + finding.unsaved_vulnerability_ids = [ + vulnerability_id + ] finding.unsaved_endpoints = list() dupes[dupe_key] = finding # in case empty string is returned as url # this condition is very rare to occur - # As most of the actions of any vuln scanner depends on urls + # As most of the actions of any vuln scanner depends on + # urls if url: - finding.unsaved_endpoints.append(Endpoint.from_uri(url)) + finding.unsaved_endpoints.append( + Endpoint.from_uri(url) + ) return list(dupes.values()) @@ -101,22 +126,24 @@ def fetch_issue_types(self, root): issues = {} for issue_type in root.iter("issue-type-group"): for item in issue_type.iter("item"): - issues[item.attrib['id']] = { - 'name': item.find("name").text, - 'advisory': item.find("advisory/ref").text, - 'fix-recommendation': item.find("fix-recommendation/ref").text + issues[item.attrib["id"]] = { + "name": item.find("name").text, + "advisory": item.find("advisory/ref").text, + "fix-recommendation": item.find( + "fix-recommendation/ref" + ).text, } cve = item.find("cve").text if cve is not None: - issues[item.attrib['id']]['cve'] = cve + issues[item.attrib["id"]]["cve"] = cve # cwe can be a link cwe = item.find("cwe/link") if cwe is None: cwe = item.find("cwe") if cwe.text is not None: - issues[item.attrib['id']]['cwe'] = int(cwe.text) + issues[item.attrib["id"]]["cwe"] = int(cwe.text) return issues @@ -127,14 +154,16 @@ def fetch_advisory_group(self, root, advisory): """ for advisory_group in root.iter("advisory-group"): for item in advisory_group.iter("item"): - if item.attrib['id'] == advisory: - return item.find('advisory/testTechnicalDescription/text').text + if item.attrib["id"] == advisory: + return item.find( + "advisory/testTechnicalDescription/text" + ).text return "N/A" def get_url(self, root, ref): - for url_group in root.iter('url-group'): - for item in url_group.iter('item'): - if item.attrib['id'] == ref: - return item.find('name').text + for url_group in root.iter("url-group"): + for item in url_group.iter("item"): + if item.attrib["id"] == ref: + return item.find("name").text return None # This case is very rare to occur diff --git a/dojo/tools/immuniweb/parser.py b/dojo/tools/immuniweb/parser.py index 2c03bace4a..6265d1f620 100644 --- a/dojo/tools/immuniweb/parser.py +++ b/dojo/tools/immuniweb/parser.py @@ -4,11 +4,10 @@ from dojo.models import Endpoint, Finding -__author__ = 'properam' +__author__ = "properam" class ImmuniwebParser(object): - def get_scan_types(self): return ["Immuniweb Scan"] @@ -19,43 +18,49 @@ def get_description_for_scan_types(self, scan_type): return "XML Scan Result File from Imuniweb Scan." def get_findings(self, file, test): - ImmuniScanTree = ElementTree.parse(file) root = ImmuniScanTree.getroot() # validate XML file - if 'Vulnerabilities' not in root.tag: - raise ValueError("This does not look like a valid expected Immuniweb XML file.") + if "Vulnerabilities" not in root.tag: + raise ValueError( + "This does not look like a valid expected Immuniweb XML file." + ) dupes = dict() for vulnerability in root.iter("Vulnerability"): """ - The Tags available in XML File are: - ID, Name, Date, Status, - Type, CWE_ID, CVE_ID, CVSSv3, - Risk, URL, Description, PoC + The Tags available in XML File are: + ID, Name, Date, Status, + Type, CWE_ID, CVE_ID, CVSSv3, + Risk, URL, Description, PoC """ mitigation = "N/A" impact = "N/A" - title = vulnerability.find('Name').text - reference = vulnerability.find('ID').text - cwe = ''.join(i for i in vulnerability.find('CWE-ID').text if i.isdigit()) + title = vulnerability.find("Name").text + reference = vulnerability.find("ID").text + cwe = "".join( + i for i in vulnerability.find("CWE-ID").text if i.isdigit() + ) if cwe: cwe = cwe else: cwe = None - vulnerability_id = vulnerability.find('CVE-ID').text - steps_to_reproduce = vulnerability.find('PoC').text - # just to make sure severity is in the recognised sentence casing form - severity = vulnerability.find('Risk').text.capitalize() + vulnerability_id = vulnerability.find("CVE-ID").text + steps_to_reproduce = vulnerability.find("PoC").text + # just to make sure severity is in the recognised sentence casing + # form + severity = vulnerability.find("Risk").text.capitalize() # Set 'Warning' severity === 'Informational' - if severity == 'Warning': + if severity == "Warning": severity = "Informational" - description = (vulnerability.find('Description').text) + description = vulnerability.find("Description").text url = vulnerability.find("URL").text - dupe_key = hashlib.md5(str(description + title + severity).encode('utf-8')).hexdigest() + dupe_key = hashlib.md5( + str(description + title + severity).encode("utf-8") + ).hexdigest() # check if finding is a duplicate if dupe_key in dupes: @@ -64,7 +69,8 @@ def get_findings(self, file, test): finding.description += description else: # finding is not a duplicate # create finding - finding = Finding(title=title, + finding = Finding( + title=title, test=test, description=description, severity=severity, @@ -73,7 +79,8 @@ def get_findings(self, file, test): mitigation=mitigation, impact=impact, references=reference, - dynamic_finding=True) + dynamic_finding=True + ) if vulnerability_id: finding.unsaved_vulnerability_ids = [vulnerability_id] finding.unsaved_endpoints = list() diff --git a/dojo/tools/intsights/parser.py b/dojo/tools/intsights/parser.py index 323a9036df..2c97225fae 100644 --- a/dojo/tools/intsights/parser.py +++ b/dojo/tools/intsights/parser.py @@ -34,23 +34,38 @@ def _parse_json(self, json_file) -> [dict]: alerts = [] original_alerts = json.load(json_file) - for original_alert in original_alerts.get('Alerts', []): + for original_alert in original_alerts.get("Alerts", []): alert = dict() - alert['alert_id'] = original_alert['_id'] - alert['title'] = original_alert['Details']['Title'] - alert['description'] = original_alert['Details']['Description'] - alert['severity'] = original_alert['Details']['Severity'] - alert['type'] = original_alert['Details']['Type'] - alert['source_date'] = original_alert['Details']['Source'].get("Date", "None provided") - alert['report_date'] = original_alert.get("FoundDate", "None provided") - alert['network_type'] = original_alert['Details']['Source'].get('NetworkType') - alert['source_url'] = original_alert['Details']['Source'].get('URL') - alert['assets'] = ','.join([item.get('Value') for item in original_alert['Assets']]) - alert['tags'] = original_alert['Details'].get('Tags') - alert['status'] = 'Closed' if original_alert['Closed'].get('IsClosed') else 'Open' - alert[ - 'alert_link'] = f'https://dashboard.intsights.com/#/threat-command/alerts?search=' \ - f'{original_alert["_id"]}' + alert["alert_id"] = original_alert["_id"] + alert["title"] = original_alert["Details"]["Title"] + alert["description"] = original_alert["Details"]["Description"] + alert["severity"] = original_alert["Details"]["Severity"] + alert["type"] = original_alert["Details"]["Type"] + alert["source_date"] = original_alert["Details"]["Source"].get( + "Date", "None provided" + ) + alert["report_date"] = original_alert.get( + "FoundDate", "None provided" + ) + alert["network_type"] = original_alert["Details"]["Source"].get( + "NetworkType" + ) + alert["source_url"] = original_alert["Details"]["Source"].get( + "URL" + ) + alert["assets"] = ",".join( + [item.get("Value") for item in original_alert["Assets"]] + ) + alert["tags"] = original_alert["Details"].get("Tags") + alert["status"] = ( + "Closed" + if original_alert["Closed"].get("IsClosed") + else "Open" + ) + alert["alert_link"] = ( + f"https://dashboard.intsights.com/#/threat-command/alerts?search=" + f'{original_alert["_id"]}' + ) alerts.append(alert) @@ -66,44 +81,73 @@ def _parse_csv(self, csv_file) -> [dict]: A list of alerts [dict()] """ - default_keys = ['Alert ID', 'Title', 'Description', 'Severity', 'Type', 'Source Date (UTC)', - 'Report Date (UTC)', 'Network Type', 'Source URL', 'Source Name', 'Assets', 'Tags', - 'Assignees', 'Remediation', 'Status', 'Closed Reason', 'Additional Info', 'Rating', - 'Alert Link'] + default_keys = [ + "Alert ID", + "Title", + "Description", + "Severity", + "Type", + "Source Date (UTC)", + "Report Date (UTC)", + "Network Type", + "Source URL", + "Source Name", + "Assets", + "Tags", + "Assignees", + "Remediation", + "Status", + "Closed Reason", + "Additional Info", + "Rating", + "Alert Link" + ] # These keys require a value. If one ore more of the values is null or empty, the entire Alert is ignored. # This is to avoid attempting to import incomplete Findings. - required_keys = ['alert_id', 'title', 'severity', 'status'] + required_keys = ["alert_id", "title", "severity", "status"] alerts = [] invalid_alerts = [] content = csv_file.read() - if type(content) is bytes: - content = content.decode('utf-8') - csv_reader = csv.DictReader(io.StringIO(content), delimiter=',', quotechar='"') + if isinstance(content, bytes): + content = content.decode("utf-8") + csv_reader = csv.DictReader( + io.StringIO(content), delimiter=",", quotechar='"' + ) # Don't bother parsing if the keys don't match exactly what's expected - if collections.Counter(default_keys) == collections.Counter(csv_reader.fieldnames): - default_valud = 'None provided' + if collections.Counter(default_keys) == collections.Counter( + csv_reader.fieldnames + ): + default_valud = "None provided" for alert in csv_reader: - alert['alert_id'] = alert.pop('Alert ID') - alert['title'] = alert.pop('Title') - alert['description'] = alert.pop('Description') - alert['severity'] = alert.pop('Severity') - alert['type'] = alert.pop('Type', ) - alert['source_date'] = alert.pop('Source Date (UTC)', default_valud) - alert['report_date'] = alert.pop('Report Date (UTC)', default_valud) - alert['network_type'] = alert.pop('Network Type', default_valud) - alert['source_url'] = alert.pop('Source URL', default_valud) - alert['assets'] = alert.pop('Assets', default_valud) - alert['tags'] = alert.pop('Tags', default_valud) - alert['status'] = alert.pop('Status', default_valud) - alert['alert_link'] = alert.pop('Alert Link') - alert.pop('Assignees') - alert.pop('Remediation') - alert.pop('Closed Reason') - alert.pop('Rating') + alert["alert_id"] = alert.pop("Alert ID") + alert["title"] = alert.pop("Title") + alert["description"] = alert.pop("Description") + alert["severity"] = alert.pop("Severity") + alert["type"] = alert.pop( + "Type", + ) + alert["source_date"] = alert.pop( + "Source Date (UTC)", default_valud + ) + alert["report_date"] = alert.pop( + "Report Date (UTC)", default_valud + ) + alert["network_type"] = alert.pop( + "Network Type", default_valud + ) + alert["source_url"] = alert.pop("Source URL", default_valud) + alert["assets"] = alert.pop("Assets", default_valud) + alert["tags"] = alert.pop("Tags", default_valud) + alert["status"] = alert.pop("Status", default_valud) + alert["alert_link"] = alert.pop("Alert Link") + alert.pop("Assignees") + alert.pop("Remediation") + alert.pop("Closed Reason") + alert.pop("Rating") for key in required_keys: if not alert[key]: invalid_alerts.append(alert) @@ -111,7 +155,9 @@ def _parse_csv(self, csv_file) -> [dict]: if alert not in invalid_alerts: alerts.append(alert) else: - self._LOGGER.error('The CSV file has one or more missing or unexpected header values') + self._LOGGER.error( + "The CSV file has one or more missing or unexpected header values" + ) return alerts @@ -123,41 +169,49 @@ def _build_finding_description(self, alert: dict) -> str: Returns: A markdown formatted description """ - description = "\n".join([ - alert["description"], - f'**Date Found**: `{alert.get("report_date", "None provided")} `', - f'**Type**: `{alert.get("type", "None provided")} `', - f'**Source**: `{alert.get("source_url", "None provided")} `', - f'**Source Date**: ` {alert.get("source_date", "None provided")} `', - f'**Source Network Type**: `{alert.get("network_type", "None provided")} `', - f'**Assets Affected**: `{alert.get("assets", "None provided")} `', - f'**Alert Link**: {alert.get("alert_link", "None provided")}' - ]) + description = "\n".join( + [ + alert["description"], + f'**Date Found**: `{alert.get("report_date", "None provided")} `', + f'**Type**: `{alert.get("type", "None provided")} `', + f'**Source**: `{alert.get("source_url", "None provided")} `', + f'**Source Date**: ` {alert.get("source_date", "None provided")} `', + f'**Source Network Type**: `{alert.get("network_type", "None provided")} `', + f'**Assets Affected**: `{alert.get("assets", "None provided")} `', + f'**Alert Link**: {alert.get("alert_link", "None provided")}' + ] + ) return description def get_findings(self, file, test): duplicates = dict() - if file.name.lower().endswith('.json'): - alerts = self._parse_json(file, ) - elif file.name.lower().endswith('.csv'): + if file.name.lower().endswith(".json"): + alerts = self._parse_json( + file, + ) + elif file.name.lower().endswith(".csv"): alerts = self._parse_csv(file) else: - raise ValueError('Filename extension not recognized. Use .json or .csv') + raise ValueError( + "Filename extension not recognized. Use .json or .csv" + ) for alert in alerts: - dupe_key = alert['alert_id'] - - alert = Finding(title=alert['title'], - test=test, - active=False if alert['status'] == 'Closed' else True, - verified=True, - description=self._build_finding_description(alert), - severity=alert['severity'], - references=alert["alert_link"], - static_finding=False, - dynamic_finding=True, - unique_id_from_tool=alert['alert_id']) + dupe_key = alert["alert_id"] + + alert = Finding( + title=alert["title"], + test=test, + active=False if alert["status"] == "Closed" else True, + verified=True, + description=self._build_finding_description(alert), + severity=alert["severity"], + references=alert["alert_link"], + static_finding=False, + dynamic_finding=True, + unique_id_from_tool=alert["alert_id"] + ) duplicates[dupe_key] = alert diff --git a/dojo/tools/jfrog_xray_api_summary_artifact/parser.py b/dojo/tools/jfrog_xray_api_summary_artifact/parser.py index a980f31fe0..d8bea2acd9 100644 --- a/dojo/tools/jfrog_xray_api_summary_artifact/parser.py +++ b/dojo/tools/jfrog_xray_api_summary_artifact/parser.py @@ -8,16 +8,17 @@ class JFrogXrayApiSummaryArtifactParser(object): - # This function return a list of all the scan_type supported by your parser def get_scan_types(self): return ["JFrog Xray API Summary Artifact Scan"] - # This function return a string used to provide some text in the UI (short label) + # This function return a string used to provide some text in the UI (short + # label) def get_label_for_scan_types(self, scan_type): return scan_type - # This function return a string used to provide some text in the UI (long description) + # This function return a string used to provide some text in the UI (long + # description) def get_description_for_scan_types(self, scan_type): return "Import Xray findings in JSON format from the JFrog Xray API Summary/Artifact JSON response" @@ -28,56 +29,83 @@ def get_findings(self, json_output, test): def get_items(self, tree, test): items = [] - if 'artifacts' in tree: - artifact_tree = tree['artifacts'] + if "artifacts" in tree: + artifact_tree = tree["artifacts"] for artifactNode in artifact_tree: - artifact_general = artifactNode['general'] - artifact_issues = artifactNode['issues'] + artifact_general = artifactNode["general"] + artifact_issues = artifactNode["issues"] artifact = decode_artifact(artifact_general) for node in artifact_issues: - service = decode_service(artifact_general['name']) - item = get_item(node, str(service), test, artifact.name, artifact.version, artifact.sha256) + service = decode_service(artifact_general["name"]) + item = get_item( + node, + str(service), + test, + artifact.name, + artifact.version, + artifact.sha256, + ) items.append(item) return items # Retrieve the findings of the affected 1st level component (Artifact) -def get_item(vulnerability, service, test, artifact_name, artifact_version, artifact_sha256): +def get_item( + vulnerability, + service, + test, + artifact_name, + artifact_version, + artifact_sha256, +): cve = None cwe = None cvssv3 = None impact_path = ImpactPath("", "", "") - if 'severity' in vulnerability: - if vulnerability['severity'] == 'Unknown': + if "severity" in vulnerability: + if vulnerability["severity"] == "Unknown": severity = "Informational" else: - severity = vulnerability['severity'].title() + severity = vulnerability["severity"].title() else: severity = "Informational" - # Some entries have no CVE entries, despite they exist. Example CVE-2017-1000502. - cves = vulnerability.get('cves', []) + # Some entries have no CVE entries, despite they exist. Example + # CVE-2017-1000502. + cves = vulnerability.get("cves", []) vulnerability_ids = list() if cves: - if len(cves[0].get('cwe', [])) > 0: - cwe = decode_cwe_number(cves[0].get('cwe', [])[0]) - if 'cvss_v3' in cves[0]: - cvss_v3 = cves[0]['cvss_v3'] + if len(cves[0].get("cwe", [])) > 0: + cwe = decode_cwe_number(cves[0].get("cwe", [])[0]) + if "cvss_v3" in cves[0]: + cvss_v3 = cves[0]["cvss_v3"] cvssv3 = CVSS3.from_rh_vector(cvss_v3).clean_vector() - impact_paths = vulnerability.get('impact_path', []) + impact_paths = vulnerability.get("impact_path", []) if len(impact_paths) > 0: impact_path = decode_impact_path(impact_paths[0]) result = hashlib.sha256() - if 'issue_id' in vulnerability: - unique_id = str(artifact_sha256 + impact_path.name + impact_path.version + vulnerability['issue_id']) - vuln_id_from_tool = vulnerability['issue_id'] + if "issue_id" in vulnerability: + unique_id = str( + artifact_sha256 + + impact_path.name + + impact_path.version + + vulnerability["issue_id"] + ) + vuln_id_from_tool = vulnerability["issue_id"] elif cve: - unique_id = str(artifact_sha256 + impact_path.name + impact_path.version + cve) + unique_id = str( + artifact_sha256 + impact_path.name + impact_path.version + cve + ) else: - unique_id = str(artifact_sha256 + impact_path.name + impact_path.version + vulnerability['summary']) + unique_id = str( + artifact_sha256 + + impact_path.name + + impact_path.version + + vulnerability["summary"] + ) vuln_id_from_tool = "" result.update(unique_id.encode()) unique_id_from_tool = result.hexdigest() @@ -85,28 +113,32 @@ def get_item(vulnerability, service, test, artifact_name, artifact_version, arti finding = Finding( vuln_id_from_tool=vuln_id_from_tool, service=service, - title=vulnerability['summary'], + title=vulnerability["summary"], cwe=cwe, cvssv3=cvssv3, severity=severity, - description=impact_path.name + ":" + impact_path.version + " -> " + vulnerability['description'], + description=impact_path.name + + ":" + + impact_path.version + + " -> " + + vulnerability["description"], test=test, file_path=impact_paths[0], component_name=artifact_name, component_version=artifact_version, static_finding=True, dynamic_finding=False, - unique_id_from_tool=unique_id_from_tool + unique_id_from_tool=unique_id_from_tool, ) if vulnerability_ids: finding.unsaved_vulnerability_ids = vulnerability_ids # Add vulnerability ids vulnerability_ids = list() - if 'cve' in cves[0]: - vulnerability_ids.append(cves[0]['cve']) - if 'issue_id' in vulnerability: - vulnerability_ids.append(vulnerability['issue_id']) + if "cve" in cves[0]: + vulnerability_ids.append(cves[0]["cve"]) + if "issue_id" in vulnerability: + vulnerability_ids.append(vulnerability["issue_id"]) if vulnerability_ids: finding.unsaved_vulnerability_ids = vulnerability_ids @@ -115,10 +147,11 @@ def get_item(vulnerability, service, test, artifact_name, artifact_version, arti # Regex helpers + def decode_service(name): match = re.match(r".*/(.*):", name, re.IGNORECASE) if match is None: - return '' + return "" return match[1] @@ -126,13 +159,13 @@ def decode_cwe_number(value): match = re.match(r"CWE-\d+", value, re.IGNORECASE) if match is None: return 0 - return int(match[0].rsplit('-')[1]) + return int(match[0].rsplit("-")[1]) def decode_artifact(artifact_general): artifact = Artifact("", "", "") - artifact.sha256 = artifact_general['sha256'] - match = re.match(r"(.*):(.*)", artifact_general['name'], re.IGNORECASE) + artifact.sha256 = artifact_general["sha256"] + match = re.match(r"(.*):(.*)", artifact_general["name"], re.IGNORECASE) if match: artifact.name = match[1] artifact.version = match[2] diff --git a/dojo/tools/jfrog_xray_unified/parser.py b/dojo/tools/jfrog_xray_unified/parser.py index bb48fe815f..23e739101c 100644 --- a/dojo/tools/jfrog_xray_unified/parser.py +++ b/dojo/tools/jfrog_xray_unified/parser.py @@ -22,8 +22,8 @@ def get_findings(self, json_output, test): def get_items(self, tree, test): items = [] - if 'rows' in tree: - vulnerabilityTree = tree['rows'] + if "rows" in tree: + vulnerabilityTree = tree["rows"] for node in vulnerabilityTree: item = get_item(node, test) @@ -35,24 +35,28 @@ def get_items(self, tree, test): def get_item(vulnerability, test): # Some items have multiple CVEs for some reason, so get the CVE with the highest CVSSv3 score. - # Note: the xray v2 importer just took the first CVE in the list, that doesn't seem ideal though + # Note: the xray v2 importer just took the first CVE in the list, that + # doesn't seem ideal though highestCvssV3Index = 0 highestCvssV3Score = 0 - for thisCveIndex in range(0, len(vulnerability['cves']) - 1): - # not all cves have cvssv3 scores, so skip these. If no v3 scores, we'll default to index 0 - if 'cvss_v3_score' in vulnerability['cves'][thisCveIndex]: - thisCvssV3Score = vulnerability['cves'][thisCveIndex]['cvss_v3_score'] + for thisCveIndex in range(0, len(vulnerability["cves"]) - 1): + # not all cves have cvssv3 scores, so skip these. If no v3 scores, + # we'll default to index 0 + if "cvss_v3_score" in vulnerability["cves"][thisCveIndex]: + thisCvssV3Score = vulnerability["cves"][thisCveIndex][ + "cvss_v3_score" + ] if thisCvssV3Score > highestCvssV3Score: highestCvssV3Index = thisCveIndex highestCvssV3Score = thisCvssV3Score # Following the CVSS Scoring per https://nvd.nist.gov/vuln-metrics/cvss - if 'severity' in vulnerability: - if vulnerability['severity'] == 'Unknown': + if "severity" in vulnerability: + if vulnerability["severity"] == "Unknown": severity = "Info" else: - severity = vulnerability['severity'].title() + severity = vulnerability["severity"].title() # TODO: Needs UNKNOWN new status in the model. else: severity = "Info" @@ -66,61 +70,79 @@ def get_item(vulnerability, test): mitigation = None extra_desc = "" - cves = vulnerability.get('cves', []) + cves = vulnerability.get("cves", []) if len(cves) > 0: worstCve = cves[cveIndex] - if 'cve' in cves[cveIndex]: - vulnerability_id = worstCve['cve'] - if 'cvss_v3_vector' in worstCve: - cvss_v3 = worstCve['cvss_v3_vector'] + if "cve" in cves[cveIndex]: + vulnerability_id = worstCve["cve"] + if "cvss_v3_vector" in worstCve: + cvss_v3 = worstCve["cvss_v3_vector"] cvssv3 = cvss_v3 - if 'cvss_v2_vector' in worstCve: - cvss_v2 = worstCve['cvss_v2_vector'] + if "cvss_v2_vector" in worstCve: + cvss_v2 = worstCve["cvss_v2_vector"] - if 'fixed_versions' in vulnerability and len(vulnerability['fixed_versions']) > 0: + if ( + "fixed_versions" in vulnerability + and len(vulnerability["fixed_versions"]) > 0 + ): mitigation = "Versions containing a fix:\n" - mitigation = mitigation + "\n".join(vulnerability['fixed_versions']) - - if 'external_advisory_source' in vulnerability and 'external_advisory_severity' in vulnerability: - extra_desc = vulnerability['external_advisory_source'] + ": " + vulnerability['external_advisory_severity'] - - if vulnerability['issue_id']: - title = vulnerability['issue_id'] + " - " + vulnerability['summary'] + mitigation = mitigation + "\n".join(vulnerability["fixed_versions"]) + + if ( + "external_advisory_source" in vulnerability + and "external_advisory_severity" in vulnerability + ): + extra_desc = ( + vulnerability["external_advisory_source"] + + ": " + + vulnerability["external_advisory_severity"] + ) + + if vulnerability["issue_id"]: + title = vulnerability["issue_id"] + " - " + vulnerability["summary"] else: - title = vulnerability['summary'] + title = vulnerability["summary"] - references = "\n".join(vulnerability['references']) + references = "\n".join(vulnerability["references"]) - scan_time = datetime.strptime(vulnerability['artifact_scan_time'], "%Y-%m-%dT%H:%M:%S%z") + scan_time = datetime.strptime( + vulnerability["artifact_scan_time"], "%Y-%m-%dT%H:%M:%S%z" + ) - # component has several parts separated by colons. Last part is the version, everything else is the name - splitComponent = vulnerability['vulnerable_component'].split(':') + # component has several parts separated by colons. Last part is the + # version, everything else is the name + splitComponent = vulnerability["vulnerable_component"].split(":") component_name = ":".join(splitComponent[:-1]) component_version = splitComponent[-1:][0] # remove package type from component name component_name = component_name.split("://", 1)[1] - tags = ["packagetype_" + vulnerability['package_type']] + tags = ["packagetype_" + vulnerability["package_type"]] # create the finding object finding = Finding( title=title, test=test, severity=severity, - description=(vulnerability['description'] + "\n\n" + extra_desc).strip(), + description=( + vulnerability["description"] + "\n\n" + extra_desc + ).strip(), mitigation=mitigation, component_name=component_name, component_version=component_version, - file_path=vulnerability['path'], - severity_justification="CVSS v3 base score: {}\nCVSS v2 base score: {}".format(cvss_v3, cvss_v2), + file_path=vulnerability["path"], + severity_justification="CVSS v3 base score: {}\nCVSS v2 base score: {}".format( + cvss_v3, cvss_v2 + ), static_finding=True, dynamic_finding=False, references=references, impact=severity, cvssv3=cvssv3, date=scan_time, - unique_id_from_tool=vulnerability['issue_id'], - tags=tags) + unique_id_from_tool=vulnerability["issue_id"], + tags=tags, + ) if vulnerability_id: finding.unsaved_vulnerability_ids = [vulnerability_id] diff --git a/dojo/tools/jfrogxray/parser.py b/dojo/tools/jfrogxray/parser.py index d2a34a6514..9f45abd6be 100644 --- a/dojo/tools/jfrogxray/parser.py +++ b/dojo/tools/jfrogxray/parser.py @@ -24,21 +24,32 @@ def get_findings(self, json_output, test): def get_items(self, tree, test): items = {} - if 'data' in tree: - vulnerabilityTree = tree['data'] + if "data" in tree: + vulnerabilityTree = tree["data"] for node in vulnerabilityTree: - item = get_item(node, test) title_cve = "No CVE" - more_details = node.get('component_versions').get('more_details') - if 'cves' in more_details: - if 'cve' in more_details.get('cves')[0]: - title_cve = node.get('component_versions').get('more_details').get('cves')[0].get('cve') - - unique_key = node.get('id') + node.get('summary') + node.get('provider') + node.get('source_comp_id') + \ - title_cve + more_details = node.get("component_versions").get( + "more_details" + ) + if "cves" in more_details: + if "cve" in more_details.get("cves")[0]: + title_cve = ( + node.get("component_versions") + .get("more_details") + .get("cves")[0] + .get("cve") + ) + + unique_key = ( + node.get("id") + + node.get("summary") + + node.get("provider") + + node.get("source_comp_id") + + title_cve + ) items[unique_key] = item return list(items.values()) @@ -48,16 +59,16 @@ def decode_cwe_number(value): match = re.match(r"CWE-\d+", value, re.IGNORECASE) if match is None: return 0 - return int(match[0].rsplit('-')[1]) + return int(match[0].rsplit("-")[1]) def get_item(vulnerability, test): # Following the CVSS Scoring per https://nvd.nist.gov/vuln-metrics/cvss - if 'severity' in vulnerability: - if vulnerability['severity'] == 'Unknown': + if "severity" in vulnerability: + if vulnerability["severity"] == "Unknown": severity = "Info" else: - severity = vulnerability['severity'].title() + severity = vulnerability["severity"].title() # TODO: Needs UNKNOWN new status in the model. else: severity = "Info" @@ -68,44 +79,75 @@ def get_item(vulnerability, test): cvss_v3 = "No CVSS v3 score." mitigation = None extra_desc = "" - # Some entries have no CVE entries, despite they exist. Example CVE-2017-1000502. - cves = vulnerability['component_versions']['more_details'].get('cves', []) + # Some entries have no CVE entries, despite they exist. Example + # CVE-2017-1000502. + cves = vulnerability["component_versions"]["more_details"].get("cves", []) if len(cves) > 0: for item in cves: - if item.get('cve'): - vulnerability_ids.append(item.get('cve')) + if item.get("cve"): + vulnerability_ids.append(item.get("cve")) # take only the first one for now, limitation of DD model. - if len(cves[0].get('cwe', [])) > 0: - cwe = decode_cwe_number(cves[0].get('cwe', [])[0]) - if 'cvss_v3' in cves[0]: - cvss_v3 = cves[0]['cvss_v3'] + if len(cves[0].get("cwe", [])) > 0: + cwe = decode_cwe_number(cves[0].get("cwe", [])[0]) + if "cvss_v3" in cves[0]: + cvss_v3 = cves[0]["cvss_v3"] # this dedicated package will clean the vector cvssv3 = CVSS3.from_rh_vector(cvss_v3).clean_vector() - if 'fixed_versions' in vulnerability['component_versions']: + if "fixed_versions" in vulnerability["component_versions"]: mitigation = "**Versions containing a fix:**\n" - mitigation = mitigation + "\n".join(vulnerability['component_versions']['fixed_versions']) + mitigation = mitigation + "\n".join( + vulnerability["component_versions"]["fixed_versions"] + ) - if 'vulnerable_versions' in vulnerability['component_versions']: + if "vulnerable_versions" in vulnerability["component_versions"]: extra_desc = "\n**Versions that are vulnerable:**\n" - extra_desc += "\n".join(vulnerability['component_versions']['vulnerable_versions']) - - provider = vulnerability.get('component_versions').get('more_details').get('provider') + extra_desc += "\n".join( + vulnerability["component_versions"]["vulnerable_versions"] + ) + + provider = ( + vulnerability.get("component_versions") + .get("more_details") + .get("provider") + ) if provider: extra_desc += f"\n**Provider:** {provider}" - component_name = vulnerability.get('component') - component_version = vulnerability.get('source_comp_id')[len(vulnerability.get('source_id', '')) + 1:] + component_name = vulnerability.get("component") + component_version = vulnerability.get("source_comp_id")[ + len(vulnerability.get("source_id", "")) + 1: + ] # The 'id' field is empty? (at least in my sample file) if vulnerability_ids: - if vulnerability['id']: - title = vulnerability['id'] + " - " + str(vulnerability_ids[0]) + " - " + component_name + ":" + component_version + if vulnerability["id"]: + title = ( + vulnerability["id"] + + " - " + + str(vulnerability_ids[0]) + + " - " + + component_name + + ":" + + component_version + ) else: - title = str(vulnerability_ids[0]) + " - " + component_name + ":" + component_version + title = ( + str(vulnerability_ids[0]) + + " - " + + component_name + + ":" + + component_version + ) else: - if vulnerability['id']: - title = vulnerability['id'] + " - " + component_name + ":" + component_version + if vulnerability["id"]: + title = ( + vulnerability["id"] + + " - " + + component_name + + ":" + + component_version + ) else: title = "No CVE - " + component_name + ":" + component_version @@ -115,14 +157,15 @@ def get_item(vulnerability, test): cwe=cwe, test=test, severity=severity, - description=(vulnerability['summary'] + extra_desc).strip(), + description=(vulnerability["summary"] + extra_desc).strip(), mitigation=mitigation, component_name=component_name, component_version=component_version, - file_path=vulnerability.get('source_comp_id'), + file_path=vulnerability.get("source_comp_id"), static_finding=True, dynamic_finding=False, - cvssv3=cvssv3) + cvssv3=cvssv3, + ) if vulnerability_ids: finding.unsaved_vulnerability_ids = vulnerability_ids return finding diff --git a/dojo/tools/kics/parser.py b/dojo/tools/kics/parser.py index a3942cca7e..365a508cb9 100644 --- a/dojo/tools/kics/parser.py +++ b/dojo/tools/kics/parser.py @@ -28,36 +28,42 @@ def get_description_for_scan_types(self, scan_type): def get_findings(self, filename, test): data = json.load(filename) dupes = {} - for query in data['queries']: - name = query.get('query_name') - query_url = query.get('query_url') - if query.get('severity') in self.SEVERITY: - severity = self.SEVERITY[query.get('severity')] + for query in data["queries"]: + name = query.get("query_name") + query_url = query.get("query_url") + if query.get("severity") in self.SEVERITY: + severity = self.SEVERITY[query.get("severity")] else: severity = "Medium" - platform = query.get('platform') - category = query.get('category') - for item in query.get('files'): - file_name = item.get('file_name') - line_number = item.get('line') - issue_type = item.get('issue_type') - expected_value = item.get('expected_value') - actual_value = item.get('actual_value') + platform = query.get("platform") + category = query.get("category") + for item in query.get("files"): + file_name = item.get("file_name") + line_number = item.get("line") + issue_type = item.get("issue_type") + expected_value = item.get("expected_value") + actual_value = item.get("actual_value") description = f"{query.get('description','')}\n" if platform: - description += f'**Platform:** {platform}\n' + description += f"**Platform:** {platform}\n" if category: - description += f'**Category:** {category}\n' + description += f"**Category:** {category}\n" if issue_type: - description += f'**Issue type:** {issue_type}\n' + description += f"**Issue type:** {issue_type}\n" if actual_value: - description += f'**Actual value:** {actual_value}\n' - if description.endswith('\n'): + description += f"**Actual value:** {actual_value}\n" + if description.endswith("\n"): description = description[:-1] dupe_key = hashlib.sha256( - (platform + category + issue_type + file_name + str(line_number)).encode("utf-8") + ( + platform + + category + + issue_type + + file_name + + str(line_number) + ).encode("utf-8") ).hexdigest() if dupe_key in dupes: diff --git a/dojo/tools/kiuwan/parser.py b/dojo/tools/kiuwan/parser.py index 70cbcdec58..14bbc85f53 100644 --- a/dojo/tools/kiuwan/parser.py +++ b/dojo/tools/kiuwan/parser.py @@ -4,28 +4,28 @@ from dojo.models import Finding -__author__ = 'dr3dd589' +__author__ = "dr3dd589" -class Severityfilter(): +class Severityfilter: def __init__(self): - self.severity_mapping = {'Very Low': 'Info', - 'Low': 'Low', - 'Normal': 'Medium', - 'High': 'High', - 'Very High': 'Critical' - } + self.severity_mapping = { + "Very Low": "Info", + "Low": "Low", + "Normal": "Medium", + "High": "High", + "Very High": "Critical", + } self.severity = None def eval_column(self, column_value): if column_value in list(self.severity_mapping.keys()): self.severity = self.severity_mapping[column_value] else: - self.severity = 'Info' + self.severity = "Info" class KiuwanParser(object): - def get_scan_types(self): return ["Kiuwan Scan"] @@ -37,9 +37,11 @@ def get_description_for_scan_types(self, scan_type): def get_findings(self, filename, test): content = filename.read() - if type(content) is bytes: - content = content.decode('utf-8') - reader = csv.DictReader(io.StringIO(content), delimiter=',', quotechar='"') + if isinstance(content, bytes): + content = content.decode("utf-8") + reader = csv.DictReader( + io.StringIO(content), delimiter=",", quotechar='"' + ) csvarray = [] for row in reader: @@ -50,33 +52,55 @@ def get_findings(self, filename, test): finding = Finding(test=test) findingdict = {} severityfilter = Severityfilter() - severityfilter.eval_column(row['Priority']) - findingdict['severity'] = severityfilter.severity - findingdict['title'] = row['Rule'] - findingdict['file'] = row['File'] - findingdict['line_number'] = row['Line number'] - findingdict['description'] = "**Vulnerability type** : " + row['Vulnerability type'] + "\n\n" + \ - "**CWE Scope** : " + row['CWE Scope'] + "\n\n" + \ - "**Line number** : " + row['Line number'] + "\n\n" + \ - "**Code at line number** : " + row['Line text'] + "\n\n" + \ - "**Normative** : " + row['Normative'] + "\n\n" + \ - "**Rule code** : " + row['Rule code'] + "\n\n" + \ - "**Status** : " + row['Status'] + "\n\n" + \ - "**Source file** : " + row['Source file'] + "\n\n" + \ - "**Source line number** : " + row['Source line number'] + "\n\n" + \ - "**Code at sorce line number** : " + row['Source line text'] + "\n" - - finding.title = findingdict['title'] - finding.file_path = findingdict['file'] - finding.line = findingdict['line_number'] - finding.description = findingdict['description'] + severityfilter.eval_column(row["Priority"]) + findingdict["severity"] = severityfilter.severity + findingdict["title"] = row["Rule"] + findingdict["file"] = row["File"] + findingdict["line_number"] = row["Line number"] + findingdict["description"] = ( + "**Vulnerability type** : " + + row["Vulnerability type"] + + "\n\n" + + "**CWE Scope** : " + + row["CWE Scope"] + + "\n\n" + + "**Line number** : " + + row["Line number"] + + "\n\n" + + "**Code at line number** : " + + row["Line text"] + + "\n\n" + + "**Normative** : " + + row["Normative"] + + "\n\n" + + "**Rule code** : " + + row["Rule code"] + + "\n\n" + + "**Status** : " + + row["Status"] + + "\n\n" + + "**Source file** : " + + row["Source file"] + + "\n\n" + + "**Source line number** : " + + row["Source line number"] + + "\n\n" + + "**Code at sorce line number** : " + + row["Source line text"] + + "\n" + ) + + finding.title = findingdict["title"] + finding.file_path = findingdict["file"] + finding.line = findingdict["line_number"] + finding.description = findingdict["description"] finding.references = "Not provided!" finding.mitigation = "Not provided!" - finding.severity = findingdict['severity'] + finding.severity = findingdict["severity"] finding.static_finding = True try: - finding.cwe = int(row['CWE']) - except: + finding.cwe = int(row["CWE"]) + except Exception: pass if finding is not None: @@ -85,7 +109,15 @@ def get_findings(self, filename, test): if finding.description is None: finding.description = "" - key = hashlib.md5((finding.severity + '|' + finding.title + '|' + finding.description).encode("utf-8")).hexdigest() + key = hashlib.md5( + ( + finding.severity + + "|" + + finding.title + + "|" + + finding.description + ).encode("utf-8") + ).hexdigest() if key not in dupes: dupes[key] = finding diff --git a/dojo/tools/kubebench/parser.py b/dojo/tools/kubebench/parser.py index 6bff6bd088..a54bcaf480 100644 --- a/dojo/tools/kubebench/parser.py +++ b/dojo/tools/kubebench/parser.py @@ -4,7 +4,6 @@ class KubeBenchParser(object): - def get_scan_types(self): return ["kube-bench Scan"] @@ -16,8 +15,8 @@ def get_description_for_scan_types(self, scan_type): def get_findings(self, json_output, test): tree = json.load(json_output) - if 'Controls' in tree: - return self.get_chapters(tree['Controls'], test) + if "Controls" in tree: + return self.get_chapters(tree["Controls"], test) else: return self.get_chapters(tree, test) @@ -34,14 +33,14 @@ def get_chapters(self, tree, test): def get_tests(tree, test): items_from_tests = [] - description = '' - if 'id' in tree: - description += tree['id'] + " " - if 'text' in tree: - description += tree['text'] - description += '\n' + description = "" + if "id" in tree: + description += tree["id"] + " " + if "text" in tree: + description += tree["text"] + description += "\n" - for node in tree['tests']: + for node in tree["tests"]: items_from_results = get_results(node, test, description) items_from_tests += items_from_results @@ -51,13 +50,13 @@ def get_tests(tree, test): def get_results(tree, test, description): items_from_results = [] - if 'section' in tree: - description += tree['section'] + ' ' - if 'desc' in tree: - description += tree['desc'] - description += '\n' + if "section" in tree: + description += tree["section"] + " " + if "desc" in tree: + description += tree["desc"] + description += "\n" - for node in tree['results']: + for node in tree["results"]: item = get_item(node, test, description) if item: items_from_results.append(item) @@ -66,53 +65,55 @@ def get_results(tree, test, description): def get_item(vuln, test, description): - - status = vuln.get('status', None) - reason = vuln.get('reason', None) + status = vuln.get("status", None) + reason = vuln.get("reason", None) if status is None: return None - # kube-bench doesn't define severities. So we use the status to define the severity - if status.upper() == 'FAIL': - severity = 'Medium' - elif status.upper() == 'WARN' and reason != 'Test marked as a manual test': - severity = 'Info' + # kube-bench doesn't define severities. So we use the status to define the + # severity + if status.upper() == "FAIL": + severity = "Medium" + elif status.upper() == "WARN" and reason != "Test marked as a manual test": + severity = "Info" else: return None - test_number = vuln.get('test_number', 'Test number not found') - test_description = vuln.get('test_desc', 'Description not found') - - title = test_number + ' - ' + test_description - - if 'test_number' in vuln: - description += vuln['test_number'] + ' ' - if 'test_desc' in vuln: - description += vuln['test_desc'] - if 'audit' in vuln: - description += '\n' - description += 'Audit: {}\n'.format(vuln['audit']) - if 'reason' in vuln and vuln['reason'] != '': - description += '\n' - description += 'Reason: {}\n'.format(vuln['reason']) - if 'expected_result' in vuln and vuln['expected_result'] != '': - description += '\n' - description += 'Expected result: {}\n'.format(vuln['expected_result']) - if 'actual_value' in vuln and vuln['actual_value'] != '': - description += '\n' - description += 'Actual value: {}\n'.format(vuln['actual_value']) - - mitigation = vuln.get('remediation', None) + test_number = vuln.get("test_number", "Test number not found") + test_description = vuln.get("test_desc", "Description not found") + + title = test_number + " - " + test_description + + if "test_number" in vuln: + description += vuln["test_number"] + " " + if "test_desc" in vuln: + description += vuln["test_desc"] + if "audit" in vuln: + description += "\n" + description += "Audit: {}\n".format(vuln["audit"]) + if "reason" in vuln and vuln["reason"] != "": + description += "\n" + description += "Reason: {}\n".format(vuln["reason"]) + if "expected_result" in vuln and vuln["expected_result"] != "": + description += "\n" + description += "Expected result: {}\n".format(vuln["expected_result"]) + if "actual_value" in vuln and vuln["actual_value"] != "": + description += "\n" + description += "Actual value: {}\n".format(vuln["actual_value"]) + + mitigation = vuln.get("remediation", None) vuln_id_from_tool = test_number - finding = Finding(title=title, - test=test, - description=description, - severity=severity, - mitigation=mitigation, - vuln_id_from_tool=vuln_id_from_tool, - static_finding=True, - dynamic_finding=False) + finding = Finding( + title=title, + test=test, + description=description, + severity=severity, + mitigation=mitigation, + vuln_id_from_tool=vuln_id_from_tool, + static_finding=True, + dynamic_finding=False, + ) return finding diff --git a/dojo/tools/meterian/parser.py b/dojo/tools/meterian/parser.py index 223b56785a..e47cb46901 100644 --- a/dojo/tools/meterian/parser.py +++ b/dojo/tools/meterian/parser.py @@ -5,7 +5,6 @@ class MeterianParser(object): - def get_scan_types(self): return ["Meterian Scan"] @@ -20,9 +19,13 @@ def get_findings(self, report, test): report_json = json.load(report) security_reports = self.get_security_reports(report_json) - scan_date = str(datetime.fromisoformat(report_json["timestamp"]).date()) + scan_date = str( + datetime.fromisoformat(report_json["timestamp"]).date() + ) for single_security_report in security_reports: - findings += self.do_get_findings(single_security_report, scan_date, test) + findings += self.do_get_findings( + single_security_report, scan_date, test + ) return findings @@ -38,21 +41,23 @@ def do_get_findings(self, single_security_report, scan_date, test): findings = [] language = single_security_report["language"] for dependency_report in single_security_report["reports"]: - lib_name = dependency_report["dependency"]["name"] lib_ver = dependency_report["dependency"]["version"] finding_title = lib_name + ":" + lib_ver for advisory in dependency_report["advices"]: - severity = self.get_severity(advisory) finding = Finding( title=finding_title, date=scan_date, test=test, severity=severity, - severity_justification="Issue severity of: **" + severity + "** from a base " + - "CVSS score of: **" + str(advisory.get('cvss')) + "**", - description=advisory['description'], + severity_justification="Issue severity of: **" + + severity + + "** from a base " + + "CVSS score of: **" + + str(advisory.get("cvss")) + + "**", + description=advisory["description"], component_name=lib_name, component_version=lib_ver, false_p=False, @@ -62,11 +67,11 @@ def do_get_findings(self, single_security_report, scan_date, test): static_finding=True, dynamic_finding=False, file_path="Manifest file", - unique_id_from_tool=advisory['id'], - tags=[language] + unique_id_from_tool=advisory["id"], + tags=[language], ) - if 'cve' in advisory: + if "cve" in advisory: if "N/A" != advisory["cve"]: finding.unsaved_vulnerability_ids = [advisory["cve"]] @@ -76,11 +81,29 @@ def do_get_findings(self, single_security_report, scan_date, test): mitigation_msg = "## Remediation\n" safe_versions = dependency_report["safeVersions"] if "latestPatch" in safe_versions: - mitigation_msg += "Upgrade " + lib_name + " to version " + safe_versions["latestPatch"] + " or higher." + mitigation_msg += ( + "Upgrade " + + lib_name + + " to version " + + safe_versions["latestPatch"] + + " or higher." + ) elif "latestMinor" in safe_versions: - mitigation_msg += "Upgrade " + lib_name + " to version " + safe_versions["latestMinor"] + " or higher." + mitigation_msg += ( + "Upgrade " + + lib_name + + " to version " + + safe_versions["latestMinor"] + + " or higher." + ) elif "latestMajor" in safe_versions: - mitigation_msg += "Upgrade " + lib_name + " to version " + safe_versions["latestMajor"] + "." + mitigation_msg += ( + "Upgrade " + + lib_name + + " to version " + + safe_versions["latestMajor"] + + "." + ) else: mitigation_msg = "We were not able to provide a safe version for this library.\nYou should consider replacing this component as it could be an issue for the safety of your application." finding.mitigation = mitigation_msg @@ -99,17 +122,21 @@ def do_get_findings(self, single_security_report, scan_date, test): def get_severity(self, advisory): # Following the CVSS Scoring per https://nvd.nist.gov/vuln-metrics/cvss - if 'cvss' in advisory: - if advisory['cvss'] <= 3.9: + if "cvss" in advisory: + if advisory["cvss"] <= 3.9: severity = "Low" - elif advisory['cvss'] >= 4.0 and advisory['cvss'] <= 6.9: + elif advisory["cvss"] >= 4.0 and advisory["cvss"] <= 6.9: severity = "Medium" - elif advisory['cvss'] >= 7.0 and advisory['cvss'] <= 8.9: + elif advisory["cvss"] >= 7.0 and advisory["cvss"] <= 8.9: severity = "High" else: severity = "Critical" else: - if advisory["severity"] == "SUGGEST" or advisory["severity"] == "NA" or advisory["severity"] == "NONE": + if ( + advisory["severity"] == "SUGGEST" + or advisory["severity"] == "NA" + or advisory["severity"] == "NONE" + ): severity = "Info" else: severity = advisory["severity"].title() @@ -119,7 +146,10 @@ def get_severity(self, advisory): def get_reference_url(self, link_obj): url = link_obj["url"] if link_obj["type"] == "CVE": - url = "https://cve.mitre.org/cgi-bin/cvename.cgi?name=" + link_obj["url"] + url = ( + "https://cve.mitre.org/cgi-bin/cvename.cgi?name=" + + link_obj["url"] + ) elif link_obj["type"] == "NVD": url = "https://nvd.nist.gov/vuln/detail/" + link_obj["url"] diff --git a/dojo/tools/microfocus_webinspect/parser.py b/dojo/tools/microfocus_webinspect/parser.py index fcec9c5897..114e11d59c 100644 --- a/dojo/tools/microfocus_webinspect/parser.py +++ b/dojo/tools/microfocus_webinspect/parser.py @@ -23,41 +23,52 @@ def get_findings(self, file, test): tree = parse(file) # get root of tree. root = tree.getroot() - if 'Sessions' not in root.tag: - raise ValueError("This doesn't seem to be a valid Webinspect xml file.") + if "Sessions" not in root.tag: + raise ValueError( + "This doesn't seem to be a valid Webinspect xml file." + ) dupes = dict() for session in root: - url = session.find('URL').text + url = session.find("URL").text endpoint = Endpoint.from_uri(url) - issues = session.find('Issues') - for issue in issues.findall('Issue'): + issues = session.find("Issues") + for issue in issues.findall("Issue"): mitigation = None reference = None - severity = MicrofocusWebinspectParser.convert_severity(issue.find('Severity').text) - for content in issue.findall('ReportSection'): - name = content.find('Name').text - if 'Summary' in name: - if content.find('SectionText').text: - description = content.find('SectionText').text - if 'Fix' in name: - if content.find('SectionText').text: - mitigation = content.find('SectionText').text - if 'Reference' in name: - if name and content.find('SectionText').text: - reference = html2text.html2text(content.find('SectionText').text) + severity = MicrofocusWebinspectParser.convert_severity( + issue.find("Severity").text + ) + for content in issue.findall("ReportSection"): + name = content.find("Name").text + if "Summary" in name: + if content.find("SectionText").text: + description = content.find("SectionText").text + if "Fix" in name: + if content.find("SectionText").text: + mitigation = content.find("SectionText").text + if "Reference" in name: + if name and content.find("SectionText").text: + reference = html2text.html2text( + content.find("SectionText").text + ) cwe = 0 description = "" - classifications = issue.find('Classifications') - for content in classifications.findall('Classification'): + classifications = issue.find("Classifications") + for content in classifications.findall("Classification"): # detect CWE number # TODO support more than one CWE number - if "kind" in content.attrib and "CWE" == content.attrib["kind"]: - cwe = MicrofocusWebinspectParser.get_cwe(content.attrib['identifier']) + if ( + "kind" in content.attrib + and "CWE" == content.attrib["kind"] + ): + cwe = MicrofocusWebinspectParser.get_cwe( + content.attrib["identifier"] + ) description += "\n\n" + content.text + "\n" finding = Finding( - title=issue.findtext('Name'), + title=issue.findtext("Name"), test=test, cwe=cwe, description=description, @@ -74,11 +85,15 @@ def get_findings(self, file, test): finding.unsaved_endpoints = [endpoint] # make dupe hash key - dupe_key = hashlib.sha256("|".join([ - finding.description, - finding.title, - finding.severity, - ]).encode('utf-8')).hexdigest() + dupe_key = hashlib.sha256( + "|".join( + [ + finding.description, + finding.title, + finding.severity, + ] + ).encode("utf-8") + ).hexdigest() # check if dupes are present. if dupe_key in dupes: find = dupes[dupe_key] diff --git a/dojo/tools/mobsfscan/parser.py b/dojo/tools/mobsfscan/parser.py index 9b5dc57de1..58514eaea8 100644 --- a/dojo/tools/mobsfscan/parser.py +++ b/dojo/tools/mobsfscan/parser.py @@ -26,23 +26,29 @@ def get_description_for_scan_types(self, scan_type): def get_findings(self, filename, test): data = json.load(filename) - if len(data.get('results')) == 0: + if len(data.get("results")) == 0: return [] else: dupes = {} - for key, item in data.get('results').items(): - metadata = item.get('metadata') - cwe = int(re.match(r'(cwe|CWE)-([0-9]+)', metadata.get('cwe')).group(2)) - masvs = metadata.get('masvs') - owasp_mobile = metadata.get('owasp-mobile') - description = "\n".join([ - f"**Description:** `{metadata.get('description')}`", - f"**OWASP MASVS:** `{masvs}`", - f"**OWASP Mobile:** `{owasp_mobile}`", - ]) - references = metadata.get('reference') - if metadata.get('severity') in self.SEVERITY: - severity = self.SEVERITY[metadata.get('severity')] + for key, item in data.get("results").items(): + metadata = item.get("metadata") + cwe = int( + re.match(r"(cwe|CWE)-([0-9]+)", metadata.get("cwe")).group( + 2 + ) + ) + masvs = metadata.get("masvs") + owasp_mobile = metadata.get("owasp-mobile") + description = "\n".join( + [ + f"**Description:** `{metadata.get('description')}`", + f"**OWASP MASVS:** `{masvs}`", + f"**OWASP Mobile:** `{owasp_mobile}`", + ] + ) + references = metadata.get("reference") + if metadata.get("severity") in self.SEVERITY: + severity = self.SEVERITY[metadata.get("severity")] else: severity = "Info" @@ -55,15 +61,15 @@ def get_findings(self, filename, test): description=description, references=references, ) - if item.get('files'): - for file in item.get('files'): - file_path = file.get('file_path') - line = file.get('match_lines')[0] + if item.get("files"): + for file in item.get("files"): + file_path = file.get("file_path") + line = file.get("match_lines")[0] finding.file_path = file_path finding.line = line dupe_key = hashlib.sha256( - (key + str(cwe) + masvs + owasp_mobile).encode('utf-8') + (key + str(cwe) + masvs + owasp_mobile).encode("utf-8") ).hexdigest() if dupe_key in dupes: From c78499722dc78d57f03327a13dd58a274f4ec8bd Mon Sep 17 00:00:00 2001 From: Alejandro Tortolero Date: Mon, 3 Jul 2023 15:14:40 -0500 Subject: [PATCH 10/85] Update files with PEP8 standards in folder dojo/tools #005 (#8305) * Update files in folder dojo/tools/mozilla_observatory with PEP8 standars. * Update files in folder dojo/tools/netsparker with PEP8 standars. * Update files in folder dojo/tools/neuvector with PEP8 standars. * Update files in folder dojo/tools/neuvector_compliance with PEP8 standars. * Update files in folder dojo/tools/nexpose with PEP8 standars. * Update files in folder dojo/tools/nikto with PEP8 standars. * Update files in folder dojo/tools/nmap with PEP8 standars. * Update files in folder dojo/tools/npm_audit with PEP8 standars. * Update files in folder dojo/tools/nsp with PEP8 standars. * Update files in folder dojo/tools/nuclei with PEP8 standars. * Update files in folder dojo/tools/openscap with PEP8 standars. * Update files in folder dojo/tools/openvas_csv with PEP8 standars. * Update files in folder dojo/tools/ort with PEP8 standars. * Update files in folder dojo/tools/ossindex_devaudit with PEP8 standars. * Update files in folder dojo/tools/outpost24 with PEP8 standars. * Update files in folder dojo/tools/php_security_audit_v2 with PEP8 standars. * Update files in folder dojo/tools/php_symfony_security_check with PEP8 standars. * Update files in folder dojo/tools/pip_audit with PEP8 standars. * Update files in folder dojo/tools/pmd with PEP8 standars. * Update files in folder dojo/tools/popeye with PEP8 standars. * Update files in folder dojo/tools/pwn_sast with PEP8 standars. * Update files in folder dojo/tools/qualys with PEP8 standars. * Update files in folder dojo/tools/qualys_infrascan_webgui with PEP8 standars. * Update files in folder dojo/tools/qualys_webapp with PEP8 standars. * Update files in folder dojo/tools/retirejs with PEP8 standars. * Update files in folder dojo/tools/risk_recon with PEP8 standars. * Update files in folder dojo/tools/rubocop with PEP8 standars. * Update files in folder dojo/tools/rusty_hog with PEP8 standars. * Change BaseException to Exception --- dojo/tools/mozilla_observatory/parser.py | 22 +- dojo/tools/netsparker/parser.py | 50 +-- dojo/tools/neuvector/parser.py | 100 ++++-- dojo/tools/neuvector_compliance/parser.py | 114 ++++--- dojo/tools/nexpose/__init__.py | 2 +- dojo/tools/nexpose/parser.py | 311 ++++++++++-------- dojo/tools/nikto/__init__.py | 2 +- dojo/tools/nikto/parser.py | 73 ++-- dojo/tools/nmap/__init__.py | 2 +- dojo/tools/nmap/parser.py | 143 +++++--- dojo/tools/npm_audit/parser.py | 140 ++++---- dojo/tools/nsp/parser.py | 66 ++-- dojo/tools/nuclei/parser.py | 144 ++++---- dojo/tools/openscap/parser.py | 90 +++-- dojo/tools/openvas_csv/parser.py | 92 +++--- dojo/tools/ort/parser.py | 121 ++++--- dojo/tools/ossindex_devaudit/parser.py | 90 ++--- dojo/tools/outpost24/parser.py | 74 +++-- dojo/tools/php_security_audit_v2/parser.py | 26 +- .../php_symfony_security_check/parser.py | 74 +++-- dojo/tools/pip_audit/parser.py | 28 +- dojo/tools/pmd/parser.py | 29 +- dojo/tools/popeye/parser.py | 47 ++- dojo/tools/pwn_sast/parser.py | 65 ++-- dojo/tools/qualys/csv_parser.py | 87 +++-- dojo/tools/qualys/parser.py | 296 +++++++++-------- dojo/tools/qualys_infrascan_webgui/parser.py | 124 +++---- dojo/tools/qualys_webapp/parser.py | 304 +++++++++++------ dojo/tools/retirejs/parser.py | 57 ++-- dojo/tools/risk_recon/api.py | 55 ++-- dojo/tools/risk_recon/parser.py | 84 +++-- dojo/tools/rubocop/parser.py | 1 - dojo/tools/rusty_hog/parser.py | 180 ++++++---- 33 files changed, 1840 insertions(+), 1253 deletions(-) diff --git a/dojo/tools/mozilla_observatory/parser.py b/dojo/tools/mozilla_observatory/parser.py index 0a268e5e52..72e6a6d623 100644 --- a/dojo/tools/mozilla_observatory/parser.py +++ b/dojo/tools/mozilla_observatory/parser.py @@ -34,21 +34,25 @@ def get_findings(self, file, test): for key in nodes: node = nodes[key] - description = "\n".join([ - "**Score Description** : `" + node['score_description'] + "`", - "**Result** : `" + node['result'] + "`" - "**expectation** : " + str(node.get('expectation')) + "`", - ]) + description = "\n".join( + [ + "**Score Description** : `" + + node["score_description"] + + "`", + "**Result** : `" + node["result"] + "`" + "**expectation** : " + str(node.get("expectation")) + "`", + ] + ) finding = Finding( - title=node['score_description'], + title=node["score_description"], test=test, - active=not node['pass'], + active=not node["pass"], description=description, - severity=self.get_severity(int(node['score_modifier'])), + severity=self.get_severity(int(node["score_modifier"])), static_finding=False, dynamic_finding=True, - vuln_id_from_tool=node.get('name', key) + vuln_id_from_tool=node.get("name", key), ) findings.append(finding) diff --git a/dojo/tools/netsparker/parser.py b/dojo/tools/netsparker/parser.py index efc382ce37..9b4b2d3113 100644 --- a/dojo/tools/netsparker/parser.py +++ b/dojo/tools/netsparker/parser.py @@ -7,7 +7,6 @@ class NetsparkerParser(object): - def get_scan_types(self): return ["Netsparker Scan"] @@ -20,26 +19,27 @@ def get_description_for_scan_types(self, scan_type): def get_findings(self, filename, test): tree = filename.read() try: - data = json.loads(str(tree, 'utf-8-sig')) - except: + data = json.loads(str(tree, "utf-8-sig")) + except Exception: data = json.loads(tree) dupes = dict() - scan_date = datetime.datetime.strptime(data["Generated"], "%d/%m/%Y %H:%M %p").date() + scan_date = datetime.datetime.strptime( + data["Generated"], "%d/%m/%Y %H:%M %p" + ).date() for item in data["Vulnerabilities"]: - title = item["Name"] findingdetail = html2text.html2text(item.get("Description", "")) if "Cwe" in item["Classification"]: try: - cwe = int(item["Classification"]["Cwe"].split(',')[0]) - except: + cwe = int(item["Classification"]["Cwe"].split(",")[0]) + except Exception: cwe = None else: cwe = None sev = item["Severity"] - if sev not in ['Info', 'Low', 'Medium', 'High', 'Critical']: - sev = 'Info' + if sev not in ["Info", "Low", "Medium", "High", "Critical"]: + sev = "Info" mitigation = html2text.html2text(item.get("RemedialProcedure", "")) references = html2text.html2text(item.get("RemedyReferences", "")) url = item["Url"] @@ -48,16 +48,18 @@ def get_findings(self, filename, test): request = item["HttpRequest"]["Content"] response = item["HttpResponse"]["Content"] - finding = Finding(title=title, - test=test, - description=findingdetail, - severity=sev.title(), - mitigation=mitigation, - impact=impact, - date=scan_date, - references=references, - cwe=cwe, - static_finding=True) + finding = Finding( + title=title, + test=test, + description=findingdetail, + severity=sev.title(), + mitigation=mitigation, + impact=impact, + date=scan_date, + references=references, + cwe=cwe, + static_finding=True, + ) if item["State"].find("FalsePositive") != -1: finding.active = False @@ -69,8 +71,14 @@ def get_findings(self, filename, test): if item["State"].find("AcceptedRisk") != -1: finding.risk_accepted = True - if (item["Classification"] is not None) and (item["Classification"]["Cvss"] is not None) and (item["Classification"]["Cvss"]["Vector"] is not None): - cvss_objects = cvss_parser.parse_cvss_from_text(item["Classification"]["Cvss"]["Vector"]) + if ( + (item["Classification"] is not None) + and (item["Classification"]["Cvss"] is not None) + and (item["Classification"]["Cvss"]["Vector"] is not None) + ): + cvss_objects = cvss_parser.parse_cvss_from_text( + item["Classification"]["Cvss"]["Vector"] + ) if len(cvss_objects) > 0: finding.cvssv3 = cvss_objects[0].clean_vector() diff --git a/dojo/tools/neuvector/parser.py b/dojo/tools/neuvector/parser.py index 2607cfc1ef..17be763568 100644 --- a/dojo/tools/neuvector/parser.py +++ b/dojo/tools/neuvector/parser.py @@ -5,9 +5,9 @@ logger = logging.getLogger(__name__) -NEUVECTOR_SCAN_NAME = 'NeuVector (REST)' -NEUVECTOR_IMAGE_SCAN_ENGAGEMENT_NAME = 'NV image scan' -NEUVECTOR_CONTAINER_SCAN_ENGAGEMENT_NAME = 'NV container scan' +NEUVECTOR_SCAN_NAME = "NeuVector (REST)" +NEUVECTOR_IMAGE_SCAN_ENGAGEMENT_NAME = "NV image scan" +NEUVECTOR_CONTAINER_SCAN_ENGAGEMENT_NAME = "NV container scan" class NeuVectorJsonParser(object): @@ -22,59 +22,92 @@ def parse_json(self, json_output): try: data = json_output.read() try: - tree = json.loads(str(data, 'utf-8')) - except: + tree = json.loads(str(data, "utf-8")) + except Exception: tree = json.loads(data) - except: + except Exception: raise ValueError("Invalid format") return tree def get_items(self, tree, test): items = {} - if 'report' in tree: - vulnerabilityTree = tree.get('report').get('vulnerabilities', []) + if "report" in tree: + vulnerabilityTree = tree.get("report").get("vulnerabilities", []) for node in vulnerabilityTree: item = get_item(node, test) - package_name = node.get('package_name') + package_name = node.get("package_name") if len(package_name) > 64: package_name = package_name[-64:] - unique_key = node.get('name') + str(package_name + str( - node.get('package_version')) + str(node.get('severity'))) + unique_key = node.get("name") + str( + package_name + + str(node.get("package_version")) + + str(node.get("severity")) + ) items[unique_key] = item return list(items.values()) def get_item(vulnerability, test): - severity = convert_severity(vulnerability.get('severity')) if 'severity' in vulnerability else "Info" - vector = vulnerability.get('vectors_v3') if 'vectors_v3' in vulnerability else "CVSSv3 vector not provided. " - fixed_version = vulnerability.get('fixed_version') if 'fixed_version' in vulnerability else "There seems to be no fix yet. Please check description field." - score_v3 = vulnerability.get('score_v3') if 'score_v3' in vulnerability else "No CVSSv3 score yet." - package_name = vulnerability.get('package_name') + severity = ( + convert_severity(vulnerability.get("severity")) + if "severity" in vulnerability + else "Info" + ) + vector = ( + vulnerability.get("vectors_v3") + if "vectors_v3" in vulnerability + else "CVSSv3 vector not provided. " + ) + fixed_version = ( + vulnerability.get("fixed_version") + if "fixed_version" in vulnerability + else "There seems to be no fix yet. Please check description field." + ) + score_v3 = ( + vulnerability.get("score_v3") + if "score_v3" in vulnerability + else "No CVSSv3 score yet." + ) + package_name = vulnerability.get("package_name") if len(package_name) > 64: package_name = package_name[-64:] - description = vulnerability.get('description') if 'description' in vulnerability else "" - link = vulnerability.get('link') if 'link' in vulnerability else "" + description = ( + vulnerability.get("description") + if "description" in vulnerability + else "" + ) + link = vulnerability.get("link") if "link" in vulnerability else "" # create the finding object finding = Finding( - title=vulnerability.get('name') + ": " + package_name + " - " + vulnerability.get('package_version'), + title=vulnerability.get("name") + + ": " + + package_name + + " - " + + vulnerability.get("package_version"), test=test, severity=severity, - description=description + "

Vulnerable Package: " + - package_name + "

Current Version: " + str( - vulnerability['package_version']) + "

", + description=description + + "

Vulnerable Package: " + + package_name + + "

Current Version: " + + str(vulnerability["package_version"]) + + "

", mitigation=fixed_version.title(), references=link, component_name=package_name, - component_version=vulnerability.get('package_version'), + component_version=vulnerability.get("package_version"), false_p=False, duplicate=False, out_of_scope=False, mitigated=None, - severity_justification="{} (CVSS v3 base score: {})\n".format(vector, score_v3), - impact=severity) - finding.unsaved_vulnerability_ids = [vulnerability.get('name')] + severity_justification="{} (CVSS v3 base score: {})\n".format( + vector, score_v3 + ), + impact=severity, + ) + finding.unsaved_vulnerability_ids = [vulnerability.get("name")] finding.description = finding.description.strip() return finding @@ -82,22 +115,21 @@ def get_item(vulnerability, test): # see neuvector/share/types.go def convert_severity(severity): - if severity.lower() == 'critical': + if severity.lower() == "critical": return "Critical" - elif severity.lower() == 'high': + elif severity.lower() == "high": return "High" - elif severity.lower() == 'medium': + elif severity.lower() == "medium": return "Medium" - elif severity.lower() == 'low': + elif severity.lower() == "low": return "Low" - elif severity == '': + elif severity == "": return "Info" else: return severity.title() class NeuVectorParser(object): - def get_scan_types(self): return [NEUVECTOR_SCAN_NAME] @@ -111,7 +143,7 @@ def get_findings(self, filename, test): if filename is None: return list() - if filename.name.lower().endswith('.json'): + if filename.name.lower().endswith(".json"): return NeuVectorJsonParser().parse(filename, test) else: - raise ValueError('Unknown File Format') + raise ValueError("Unknown File Format") diff --git a/dojo/tools/neuvector_compliance/parser.py b/dojo/tools/neuvector_compliance/parser.py index 16570caf3a..74e5e515fd 100644 --- a/dojo/tools/neuvector_compliance/parser.py +++ b/dojo/tools/neuvector_compliance/parser.py @@ -4,7 +4,7 @@ from dojo.models import Finding -NEUVECTOR_SCAN_NAME = 'NeuVector (compliance)' +NEUVECTOR_SCAN_NAME = "NeuVector (compliance)" def parse(json_output, test): @@ -19,10 +19,10 @@ def parse_json(json_output): try: data = json_output.read() try: - tree = json.loads(str(data, 'utf-8')) - except: + tree = json.loads(str(data, "utf-8")) + except Exception: tree = json.loads(data) - except: + except Exception: raise ValueError("Invalid format") return tree @@ -36,98 +36,106 @@ def get_items(tree, test): # /v1/host/{id}/compliance or similar. thus, we need to support items in a # bit different leafs. testsTree = None - if 'report' in tree: - testsTree = tree.get('report').get('checks', []) + if "report" in tree: + testsTree = tree.get("report").get("checks", []) else: - testsTree = tree.get('items', []) + testsTree = tree.get("items", []) for node in testsTree: item = get_item(node, test) - unique_key = node.get('type') + node.get('category') + node.get('test_number') + node.get('description') - unique_key = hashlib.md5(unique_key.encode('utf-8')).hexdigest() + unique_key = ( + node.get("type") + + node.get("category") + + node.get("test_number") + + node.get("description") + ) + unique_key = hashlib.md5(unique_key.encode("utf-8")).hexdigest() items[unique_key] = item return list(items.values()) def get_item(node, test): - if 'test_number' not in node: + if "test_number" not in node: return None - if 'category' not in node: + if "category" not in node: return None - if 'description' not in node: + if "description" not in node: return None - if 'level' not in node: + if "level" not in node: return None - test_number = node.get('test_number') - test_description = node.get('description').rstrip() + test_number = node.get("test_number") + test_description = node.get("description").rstrip() - title = test_number + ' - ' + test_description + title = test_number + " - " + test_description - test_severity = node.get('level') + test_severity = node.get("level") severity = convert_severity(test_severity) - mitigation = node.get('remediation', '').rstrip() + mitigation = node.get("remediation", "").rstrip() - category = node.get('category') + category = node.get("category") - vuln_id_from_tool = category + '_' + test_number + vuln_id_from_tool = category + "_" + test_number - test_profile = node.get('profile', 'profile unknown') + test_profile = node.get("profile", "profile unknown") - full_description = '{} ({}), {}:\n'.format(test_number, category, test_profile) - full_description += '{}\n'.format(test_description) - full_description += 'Audit: {}\n'.format(test_severity) - if 'evidence' in node: - full_description += 'Evidence:\n{}\n'.format(node.get('evidence')) - if 'location' in node: - full_description += 'Location:\n{}\n'.format(node.get('location')) - full_description += 'Mitigation:\n{}\n'.format(mitigation) + full_description = "{} ({}), {}:\n".format( + test_number, category, test_profile + ) + full_description += "{}\n".format(test_description) + full_description += "Audit: {}\n".format(test_severity) + if "evidence" in node: + full_description += "Evidence:\n{}\n".format(node.get("evidence")) + if "location" in node: + full_description += "Location:\n{}\n".format(node.get("location")) + full_description += "Mitigation:\n{}\n".format(mitigation) - tags = node.get('tags', []) + tags = node.get("tags", []) if len(tags) > 0: - full_description += 'Tags:\n' + full_description += "Tags:\n" for t in tags: - full_description += '{}\n'.format(str(t).rstrip()) + full_description += "{}\n".format(str(t).rstrip()) - messages = node.get('message', []) + messages = node.get("message", []) if len(messages) > 0: - full_description += 'Messages:\n' + full_description += "Messages:\n" for m in messages: - full_description += '{}\n'.format(str(m).rstrip()) - - finding = Finding(title=title, - test=test, - description=full_description, - severity=severity, - mitigation=mitigation, - vuln_id_from_tool=vuln_id_from_tool, - static_finding=True, - dynamic_finding=False) + full_description += "{}\n".format(str(m).rstrip()) + + finding = Finding( + title=title, + test=test, + description=full_description, + severity=severity, + mitigation=mitigation, + vuln_id_from_tool=vuln_id_from_tool, + static_finding=True, + dynamic_finding=False, + ) return finding # see neuvector/share/clus_apis.go def convert_severity(severity): - if severity.lower() == 'high': + if severity.lower() == "high": return "High" - elif severity.lower() == 'warn': + elif severity.lower() == "warn": return "Medium" - elif severity.lower() == 'info': + elif severity.lower() == "info": return "Low" - elif severity.lower() == 'pass': + elif severity.lower() == "pass": return "Info" - elif severity.lower() == 'note': + elif severity.lower() == "note": return "Info" - elif severity.lower() == 'error': + elif severity.lower() == "error": return "Info" else: return severity.title() class NeuVectorComplianceParser(object): - def get_scan_types(self): return [NEUVECTOR_SCAN_NAME] @@ -141,7 +149,7 @@ def get_findings(self, filename, test): if filename is None: return list() - if filename.name.lower().endswith('.json'): + if filename.name.lower().endswith(".json"): return parse(filename, test) else: - raise ValueError('Unknown File Format') + raise ValueError("Unknown File Format") diff --git a/dojo/tools/nexpose/__init__.py b/dojo/tools/nexpose/__init__.py index 369f2551a3..69e743a006 100644 --- a/dojo/tools/nexpose/__init__.py +++ b/dojo/tools/nexpose/__init__.py @@ -1 +1 @@ -__author__ = 'jay7958' +__author__ = "jay7958" diff --git a/dojo/tools/nexpose/parser.py b/dojo/tools/nexpose/parser.py index cee5bb4ae9..fc7a434440 100644 --- a/dojo/tools/nexpose/parser.py +++ b/dojo/tools/nexpose/parser.py @@ -40,8 +40,7 @@ def parse_html_type(self, node): ret = "" tag = node.tag.lower() - if tag == 'containerblockelement': - + if tag == "containerblockelement": if len(list(node)) > 0: for child in list(node): ret += self.parse_html_type(child) @@ -52,19 +51,25 @@ def parse_html_type(self, node): ret += str(node.tail).strip() + "" else: ret += "" - if tag == 'listitem': + if tag == "listitem": if len(list(node)) > 0: for child in list(node): ret += self.parse_html_type(child) else: if node.text: ret += "
  • " + str(node.text).strip() + "
  • " - if tag == 'orderedlist': + if tag == "orderedlist": i = 1 for item in list(node): - ret += "
      " + str(i) + " " + self.parse_html_type(item) + "
    " + ret += ( + "
      " + + str(i) + + " " + + self.parse_html_type(item) + + "
    " + ) i += 1 - if tag == 'paragraph': + if tag == "paragraph": if len(list(node)) > 0: for child in list(node): ret += self.parse_html_type(child) @@ -75,12 +80,12 @@ def parse_html_type(self, node): ret += str(node.tail).strip() + "

    " else: ret += "

    " - if tag == 'unorderedlist': + if tag == "unorderedlist": for item in list(node): unorderedlist = self.parse_html_type(item) if unorderedlist not in ret: ret += "* " + unorderedlist - if tag == 'urllink': + if tag == "urllink": if node.text: ret += str(node.text).strip() + " " last = "" @@ -101,17 +106,24 @@ def parse_tests_type(self, node, vulnsDefinitions): """ vulns = list() - for tests in node.findall('tests'): - for test in tests.findall('test'): - if test.get('id') in vulnsDefinitions and ( - test.get('status') in ['vulnerable-exploited', 'vulnerable-version', 'vulnerable-potential']): - vuln = vulnsDefinitions[test.get('id').lower()] + for tests in node.findall("tests"): + for test in tests.findall("test"): + if test.get("id") in vulnsDefinitions and ( + test.get("status") + in [ + "vulnerable-exploited", + "vulnerable-version", + "vulnerable-potential", + ] + ): + vuln = vulnsDefinitions[test.get("id").lower()] for desc in list(test): - if 'pluginOutput' in vuln: - vuln['pluginOutput'] += "\n\n" + \ - self.parse_html_type(desc) + if "pluginOutput" in vuln: + vuln[ + "pluginOutput" + ] += "\n\n" + self.parse_html_type(desc) else: - vuln['pluginOutput'] = self.parse_html_type(desc) + vuln["pluginOutput"] = self.parse_html_type(desc) vulns.append(vuln) return vulns @@ -122,109 +134,137 @@ def get_vuln_definitions(self, tree): """ vulns = dict() url_index = 0 - for vulnsDef in tree.findall('VulnerabilityDefinitions'): - for vulnDef in vulnsDef.findall('vulnerability'): - vid = vulnDef.get('id').lower() - severity_chk = int(vulnDef.get('severity')) + for vulnsDef in tree.findall("VulnerabilityDefinitions"): + for vulnDef in vulnsDef.findall("vulnerability"): + vid = vulnDef.get("id").lower() + severity_chk = int(vulnDef.get("severity")) if severity_chk >= 9: - sev = 'Critical' + sev = "Critical" elif severity_chk >= 7: - sev = 'High' + sev = "High" elif severity_chk >= 4: - sev = 'Medium' + sev = "Medium" elif 0 < severity_chk < 4: - sev = 'Low' + sev = "Low" else: - sev = 'Info' + sev = "Info" vuln = { - 'desc': "", - 'name': vulnDef.get('title'), - 'vector': vulnDef.get('cvssVector'), # this is CVSS v2 - 'refs': dict(), - 'resolution': "", - 'severity': sev, - 'tags': list() + "desc": "", + "name": vulnDef.get("title"), + "vector": vulnDef.get("cvssVector"), # this is CVSS v2 + "refs": dict(), + "resolution": "", + "severity": sev, + "tags": list(), } for item in list(vulnDef): - if item.tag == 'description': + if item.tag == "description": for htmlType in list(item): - vuln['desc'] += self.parse_html_type(htmlType) + vuln["desc"] += self.parse_html_type(htmlType) - elif item.tag == 'exploits': + elif item.tag == "exploits": for exploit in list(item): - vuln['refs'][exploit.get('title')] = str(exploit.get('title')).strip() + ' ' + \ - str(exploit.get('link')).strip() + vuln["refs"][exploit.get("title")] = ( + str(exploit.get("title")).strip() + + " " + + str(exploit.get("link")).strip() + ) - elif item.tag == 'references': + elif item.tag == "references": for ref in list(item): - if 'URL' in ref.get('source'): - vuln['refs'][ref.get('source') + str(url_index)] = str(ref.text).strip() + if "URL" in ref.get("source"): + vuln["refs"][ + ref.get("source") + str(url_index) + ] = str(ref.text).strip() url_index += 1 else: - vuln['refs'][ref.get('source')] = str(ref.text).strip() + vuln["refs"][ref.get("source")] = str( + ref.text + ).strip() - elif item.tag == 'solution': + elif item.tag == "solution": for htmlType in list(item): - vuln['resolution'] += self.parse_html_type(htmlType) + vuln["resolution"] += self.parse_html_type( + htmlType + ) # there is currently no method to register tags in vulns - elif item.tag == 'tags': + elif item.tag == "tags": for tag in list(item): - vuln['tags'].append(tag.text.lower()) + vuln["tags"].append(tag.text.lower()) vulns[vid] = vuln return vulns def get_items(self, tree, vulns, test): hosts = list() - for nodes in tree.findall('nodes'): - for node in nodes.findall('node'): + for nodes in tree.findall("nodes"): + for node in nodes.findall("node"): host = dict() - host['name'] = node.get('address') - host['hostnames'] = set() - host['os'] = "" - host['services'] = list() - host['vulns'] = self.parse_tests_type(node, vulns) - - host['vulns'].append({ - 'name': 'Host Up', - 'desc': 'Host is up because it replied on ICMP request or some TCP/UDP port is up', - 'severity': 'Info', - }) - - for names in node.findall('names'): - for name in names.findall('name'): - host['hostnames'].add(name.text) - - for endpoints in node.findall('endpoints'): - for endpoint in endpoints.findall('endpoint'): + host["name"] = node.get("address") + host["hostnames"] = set() + host["os"] = "" + host["services"] = list() + host["vulns"] = self.parse_tests_type(node, vulns) + + host["vulns"].append( + { + "name": "Host Up", + "desc": "Host is up because it replied on ICMP request or some TCP/UDP port is up", + "severity": "Info", + } + ) + + for names in node.findall("names"): + for name in names.findall("name"): + host["hostnames"].add(name.text) + + for endpoints in node.findall("endpoints"): + for endpoint in endpoints.findall("endpoint"): svc = { - 'protocol': endpoint.get('protocol'), - 'port': int(endpoint.get('port')), - 'status': endpoint.get('status'), + "protocol": endpoint.get("protocol"), + "port": int(endpoint.get("port")), + "status": endpoint.get("status"), } - for services in endpoint.findall('services'): - for service in services.findall('service'): - svc['name'] = service.get('name', '').lower() - svc['vulns'] = self.parse_tests_type(service, vulns) - - for configs in service.findall('configurations'): - for config in configs.findall('config'): - if "banner" in config.get('name'): - svc['version'] = config.get('name') - - svc['vulns'].append({ - 'name': 'Open port {}/{}'.format(svc['protocol'].upper(), svc['port']), - 'desc': '{}/{} port is open with "{}" service'.format(svc['protocol'], - svc['port'], - service.get('name')), - 'severity': 'Info', - 'tags': [ - re.sub("[^A-Za-z0-9]+", "-", service.get('name').lower()).rstrip('-') - ] if service.get('name') != "" else [] - }) - - host['services'].append(svc) + for services in endpoint.findall("services"): + for service in services.findall("service"): + svc["name"] = service.get("name", "").lower() + svc["vulns"] = self.parse_tests_type( + service, vulns + ) + + for configs in service.findall( + "configurations" + ): + for config in configs.findall("config"): + if "banner" in config.get("name"): + svc["version"] = config.get("name") + + svc["vulns"].append( + { + "name": "Open port {}/{}".format( + svc["protocol"].upper(), + svc["port"], + ), + "desc": '{}/{} port is open with "{}" service'.format( + svc["protocol"], + svc["port"], + service.get("name"), + ), + "severity": "Info", + "tags": [ + re.sub( + "[^A-Za-z0-9]+", + "-", + service.get("name").lower(), + ).rstrip("-") + ] + if service.get("name") != "" + else [], + } + ) + + host["services"].append(svc) hosts.append(host) @@ -232,82 +272,89 @@ def get_items(self, tree, vulns, test): for host in hosts: # manage findings by node only - for vuln in host['vulns']: - dupe_key = vuln['severity'] + vuln['name'] + for vuln in host["vulns"]: + dupe_key = vuln["severity"] + vuln["name"] find = self.findings(dupe_key, dupes, test, vuln) - endpoint = Endpoint(host=host['name']) + endpoint = Endpoint(host=host["name"]) find.unsaved_endpoints.append(endpoint) - find.unsaved_tags = vuln.get('tags', []) + find.unsaved_tags = vuln.get("tags", []) # manage findings by service - for service in host['services']: - for vuln in service['vulns']: - dupe_key = vuln['severity'] + vuln['name'] + for service in host["services"]: + for vuln in service["vulns"]: + dupe_key = vuln["severity"] + vuln["name"] find = self.findings(dupe_key, dupes, test, vuln) endpoint = Endpoint( - host=host['name'], - port=service['port'], - protocol=service['name'] if service['name'] in SCHEME_PORT_MAP else service['protocol'], - fragment=service['protocol'].lower() if service['name'] == "dns" else None - # A little dirty hack but in case of DNS it is important to know if vulnerability is on TCP or UDP + host=host["name"], + port=service["port"], + protocol=service["name"] + if service["name"] in SCHEME_PORT_MAP + else service["protocol"], + fragment=service["protocol"].lower() + if service["name"] == "dns" + else None + # A little dirty hack but in case of DNS it is + # important to know if vulnerability is on TCP or UDP ) find.unsaved_endpoints.append(endpoint) - find.unsaved_tags = vuln.get('tags', []) + find.unsaved_tags = vuln.get("tags", []) return list(dupes.values()) @staticmethod def findings(dupe_key, dupes, test, vuln): - """ - - - """ + """ """ if dupe_key in dupes: find = dupes[dupe_key] - dupe_text = html2text.html2text(vuln.get('pluginOutput', '')) + dupe_text = html2text.html2text(vuln.get("pluginOutput", "")) if dupe_text not in find.description: find.description += "\n\n" + dupe_text else: - find = Finding(title=vuln['name'], - description=html2text.html2text( - vuln['desc'].strip()) + "\n\n" + html2text.html2text(vuln.get('pluginOutput', '').strip()), - severity=vuln['severity'], - mitigation=html2text.html2text(vuln.get('resolution')) if vuln.get('resolution') else None, - impact=vuln.get('vector') if vuln.get('vector') else None, - test=test, - false_p=False, - duplicate=False, - out_of_scope=False, - mitigated=None, - dynamic_finding=True) + find = Finding( + title=vuln["name"], + description=html2text.html2text(vuln["desc"].strip()) + + "\n\n" + + html2text.html2text(vuln.get("pluginOutput", "").strip()), + severity=vuln["severity"], + mitigation=html2text.html2text(vuln.get("resolution")) + if vuln.get("resolution") + else None, + impact=vuln.get("vector") if vuln.get("vector") else None, + test=test, + false_p=False, + duplicate=False, + out_of_scope=False, + mitigated=None, + dynamic_finding=True, + ) # build references - refs = '' - for ref in vuln.get('refs', {}): - if ref.startswith('BID'): + refs = "" + for ref in vuln.get("refs", {}): + if ref.startswith("BID"): refs += f" * [{vuln['refs'][ref]}](https://www.securityfocus.com/bid/{vuln['refs'][ref]})" - elif ref.startswith('CA'): + elif ref.startswith("CA"): refs += f" * [{vuln['refs'][ref]}](https://www.cert.org/advisories/{vuln['refs'][ref]}.html)" - elif ref.startswith('CERT-VN'): + elif ref.startswith("CERT-VN"): refs += f" * [{vuln['refs'][ref]}](https://www.kb.cert.org/vuls/id/{vuln['refs'][ref]}.html)" - elif ref.startswith('CVE'): + elif ref.startswith("CVE"): refs += f" * [{vuln['refs'][ref]}](https://cve.mitre.org/cgi-bin/cvename.cgi?name={vuln['refs'][ref]})" - elif ref.startswith('DEBIAN'): + elif ref.startswith("DEBIAN"): refs += f" * [{vuln['refs'][ref]}](https://security-tracker.debian.org/tracker/{vuln['refs'][ref]})" - elif ref.startswith('XF'): + elif ref.startswith("XF"): refs += f" * [{vuln['refs'][ref]}](https://exchange.xforce.ibmcloud.com/vulnerabilities/{vuln['refs'][ref]})" - elif ref.startswith('URL'): + elif ref.startswith("URL"): refs += f" * URL: {vuln['refs'][ref]}" else: refs += f" * {ref}: {vuln['refs'][ref]}" refs += "\n" find.references = refs # update CVE - if "CVE" in vuln.get('refs', {}): - find.unsaved_vulnerability_ids = [vuln['refs']['CVE']] + if "CVE" in vuln.get("refs", {}): + find.unsaved_vulnerability_ids = [vuln["refs"]["CVE"]] find.unsaved_endpoints = list() dupes[dupe_key] = find return find diff --git a/dojo/tools/nikto/__init__.py b/dojo/tools/nikto/__init__.py index 369f2551a3..69e743a006 100644 --- a/dojo/tools/nikto/__init__.py +++ b/dojo/tools/nikto/__init__.py @@ -1 +1 @@ -__author__ = 'jay7958' +__author__ = "jay7958" diff --git a/dojo/tools/nikto/parser.py b/dojo/tools/nikto/parser.py index 37d969ab86..5092ba44d3 100644 --- a/dojo/tools/nikto/parser.py +++ b/dojo/tools/nikto/parser.py @@ -1,4 +1,3 @@ - import hashlib import logging import re @@ -30,47 +29,53 @@ def get_label_for_scan_types(self, scan_type): return scan_type # no custom label for now def get_description_for_scan_types(self, scan_type): - return "XML output (old and new nxvmlversion=\"1.2\" type) or JSON output" + return ( + 'XML output (old and new nxvmlversion="1.2" type) or JSON output' + ) def get_findings(self, filename, test): - if filename.name.lower().endswith('.xml'): + if filename.name.lower().endswith(".xml"): return self.process_xml(filename, test) - elif filename.name.lower().endswith('.json'): + elif filename.name.lower().endswith(".json"): return self.process_json(filename, test) else: - raise ValueError('Unknown File Format') + raise ValueError("Unknown File Format") def process_json(self, file, test): data = json.load(file) dupes = dict() - host = data.get('host') - port = data.get('port') + host = data.get("host") + port = data.get("port") if port is not None: port = int(port) - for vulnerability in data.get('vulnerabilities', []): + for vulnerability in data.get("vulnerabilities", []): finding = Finding( - title=vulnerability.get('msg'), + title=vulnerability.get("msg"), severity="Info", # Nikto doesn't assign severity, default to Info - description="\n".join([ - f"**id:** `{vulnerability.get('id')}`", - f"**msg:** `{vulnerability.get('msg')}`", - f"**HTTP Method:** `{vulnerability.get('method')}`", - f"**OSVDB:** `{vulnerability.get('OSVDB')}`", - ]), - vuln_id_from_tool=vulnerability.get('id'), + description="\n".join( + [ + f"**id:** `{vulnerability.get('id')}`", + f"**msg:** `{vulnerability.get('msg')}`", + f"**HTTP Method:** `{vulnerability.get('method')}`", + f"**OSVDB:** `{vulnerability.get('OSVDB')}`", + ] + ), + vuln_id_from_tool=vulnerability.get("id"), nb_occurences=1, ) # manage if we have an ID from OSVDB - if "OSVDB" in vulnerability and "0" != vulnerability.get('OSVDB'): - finding.unique_id_from_tool = "OSVDB-" + vulnerability.get('OSVDB') + if "OSVDB" in vulnerability and "0" != vulnerability.get("OSVDB"): + finding.unique_id_from_tool = "OSVDB-" + vulnerability.get( + "OSVDB" + ) finding.description += "\n*This finding is marked as medium as there is a link to OSVDB*" finding.severity = "Medium" # build the endpoint endpoint = Endpoint( host=host, port=port, - path=vulnerability.get('url'), + path=vulnerability.get("url"), ) finding.unsaved_endpoints = [endpoint] @@ -80,8 +85,12 @@ def process_json(self, file, test): find = dupes[dupe_key] find.description += "\n-----\n" + finding.description find.unsaved_endpoints.append(endpoint) - find.unique_id_from_tool = None # as it is an aggregated finding we erase ids - find.vuln_id_from_tool = None # as it is an aggregated finding we erase ids + find.unique_id_from_tool = ( + None # as it is an aggregated finding we erase ids + ) + find.vuln_id_from_tool = ( + None # as it is an aggregated finding we erase ids + ) find.nb_occurences += 1 else: dupes[dupe_key] = finding @@ -93,41 +102,45 @@ def process_xml(self, file, test): tree = ET.parse(file) root = tree.getroot() - scan = root.find('scandetails') + scan = root.find("scandetails") if scan is not None: self.process_scandetail(scan, test, dupes) else: # New versions of Nikto have a new file type (nxvmlversion="1.2") which adds an additional niktoscan tag - # This find statement below is to support new file format while not breaking older Nikto scan files versions. - for scan in root.findall('./niktoscan/scandetails'): + # This find statement below is to support new file format while not + # breaking older Nikto scan files versions. + for scan in root.findall("./niktoscan/scandetails"): self.process_scandetail(scan, test, dupes) return list(dupes.values()) def process_scandetail(self, scan, test, dupes): - for item in scan.findall('item'): + for item in scan.findall("item"): # Title titleText = None description = item.findtext("description") # Cut the title down to the first sentence sentences = re.split( - r'(? 0: titleText = sentences[0][:900] else: titleText = description[:900] # Description - description = "\n".join([ + description = "\n".join( + [ f"**Host:** `{item.findtext('iplink')}`", f"**Description:** `{item.findtext('description')}`", f"**HTTP Method:** `{item.attrib.get('method')}`", - ]) + ] + ) # Manage severity the same way with JSON severity = "Info" # Nikto doesn't assign severity, default to Info - if item.get('osvdbid') is not None and "0" != item.get('osvdbid'): + if item.get("osvdbid") is not None and "0" != item.get("osvdbid"): severity = "Medium" finding = Finding( @@ -137,7 +150,7 @@ def process_scandetail(self, scan, test, dupes): severity=severity, dynamic_finding=True, static_finding=False, - vuln_id_from_tool=item.attrib.get('id'), + vuln_id_from_tool=item.attrib.get("id"), nb_occurences=1, ) diff --git a/dojo/tools/nmap/__init__.py b/dojo/tools/nmap/__init__.py index 43f000e0f3..a7849c0c39 100644 --- a/dojo/tools/nmap/__init__.py +++ b/dojo/tools/nmap/__init__.py @@ -1 +1 @@ -__author__ = 'patriknordlen' +__author__ = "patriknordlen" diff --git a/dojo/tools/nmap/parser.py b/dojo/tools/nmap/parser.py index 5ac1f42290..171795126c 100755 --- a/dojo/tools/nmap/parser.py +++ b/dojo/tools/nmap/parser.py @@ -6,7 +6,6 @@ class NmapParser(object): - def get_scan_types(self): return ["Nmap Scan"] @@ -20,23 +19,29 @@ def get_findings(self, file, test): tree = parse(file) root = tree.getroot() dupes = dict() - if 'nmaprun' not in root.tag: + if "nmaprun" not in root.tag: raise ValueError("This doesn't seem to be a valid Nmap xml file.") report_date = None try: - report_date = datetime.datetime.fromtimestamp(int(root.attrib['start'])) + report_date = datetime.datetime.fromtimestamp( + int(root.attrib["start"]) + ) except ValueError: pass for host in root.findall("host"): host_info = "### Host\n\n" - ip = host.find("address[@addrtype='ipv4']").attrib['addr'] + ip = host.find("address[@addrtype='ipv4']").attrib["addr"] if ip is not None: host_info += "**IP Address:** %s\n" % ip - fqdn = host.find("hostnames/hostname[@type='PTR']").attrib['name'] if host.find("hostnames/hostname[@type='PTR']") is not None else None + fqdn = ( + host.find("hostnames/hostname[@type='PTR']").attrib["name"] + if host.find("hostnames/hostname[@type='PTR']") is not None + else None + ) if fqdn is not None: host_info += "**FQDN:** %s\n" % fqdn @@ -44,44 +49,70 @@ def get_findings(self, file, test): for os in host.iter("os"): for os_match in os.iter("osmatch"): - if 'name' in os_match.attrib: - host_info += "**Host OS:** %s\n" % os_match.attrib['name'] - if 'accuracy' in os_match.attrib: - host_info += "**Accuracy:** {0}%\n".format(os_match.attrib['accuracy']) + if "name" in os_match.attrib: + host_info += ( + "**Host OS:** %s\n" % os_match.attrib["name"] + ) + if "accuracy" in os_match.attrib: + host_info += "**Accuracy:** {0}%\n".format( + os_match.attrib["accuracy"] + ) host_info += "\n\n" for port_element in host.findall("ports/port"): - protocol = port_element.attrib['protocol'] - endpoint = Endpoint(host=fqdn if fqdn else ip, protocol=protocol) - if 'portid' in port_element.attrib and port_element.attrib['portid'].isdigit(): - endpoint.port = int(port_element.attrib['portid']) + protocol = port_element.attrib["protocol"] + endpoint = Endpoint( + host=fqdn if fqdn else ip, protocol=protocol + ) + if ( + "portid" in port_element.attrib + and port_element.attrib["portid"].isdigit() + ): + endpoint.port = int(port_element.attrib["portid"]) # filter on open ports - if 'open' != port_element.find("state").attrib.get('state'): + if "open" != port_element.find("state").attrib.get("state"): continue title = "Open port: %s/%s" % (endpoint.port, endpoint.protocol) description = host_info - description += "**Port/Protocol:** %s/%s\n" % (endpoint.port, endpoint.protocol) + description += "**Port/Protocol:** %s/%s\n" % ( + endpoint.port, + endpoint.protocol, + ) service_info = "\n\n" - if port_element.find('service') is not None: - if 'product' in port_element.find('service').attrib: - service_info += "**Product:** %s\n" % port_element.find('service').attrib['product'] - - if 'version' in port_element.find('service').attrib: - service_info += "**Version:** %s\n" % port_element.find('service').attrib['version'] - - if 'extrainfo' in port_element.find('service').attrib: - service_info += "**Extra Info:** %s\n" % port_element.find('service').attrib['extrainfo'] + if port_element.find("service") is not None: + if "product" in port_element.find("service").attrib: + service_info += ( + "**Product:** %s\n" + % port_element.find("service").attrib["product"] + ) + + if "version" in port_element.find("service").attrib: + service_info += ( + "**Version:** %s\n" + % port_element.find("service").attrib["version"] + ) + + if "extrainfo" in port_element.find("service").attrib: + service_info += ( + "**Extra Info:** %s\n" + % port_element.find("service").attrib["extrainfo"] + ) description += service_info description += "\n\n" - # manage some script like https://github.com/vulnersCom/nmap-vulners - for script_element in port_element.findall('script[@id="vulners"]'): - self.manage_vulner_script(test, dupes, script_element, endpoint, report_date) + # manage some script like + # https://github.com/vulnersCom/nmap-vulners + for script_element in port_element.findall( + 'script[@id="vulners"]' + ): + self.manage_vulner_script( + test, dupes, script_element, endpoint, report_date + ) severity = "Info" dupe_key = "nmap:" + str(endpoint.port) @@ -90,13 +121,14 @@ def get_findings(self, file, test): if description is not None: find.description += description else: - find = Finding(title=title, - test=test, - description=description, - severity=severity, - mitigation="N/A", - impact="No impact provided", - ) + find = Finding( + title=title, + test=test, + description=description, + severity=severity, + mitigation="N/A", + impact="No impact provided", + ) find.unsaved_endpoints = list() dupes[dupe_key] = find if report_date: @@ -124,37 +156,52 @@ def convert_cvss_score(self, raw_value): else: return "Critical" - def manage_vulner_script(self, test, dupes, script_element, endpoint, report_date=None): - for component_element in script_element.findall('table'): - component_cpe = CPE(component_element.attrib['key']) - for vuln in component_element.findall('table'): + def manage_vulner_script( + self, test, dupes, script_element, endpoint, report_date=None + ): + for component_element in script_element.findall("table"): + component_cpe = CPE(component_element.attrib["key"]) + for vuln in component_element.findall("table"): # convert elements in dict vuln_attributes = dict() - for elem in vuln.findall('elem'): - vuln_attributes[elem.attrib['key'].lower()] = elem.text + for elem in vuln.findall("elem"): + vuln_attributes[elem.attrib["key"].lower()] = elem.text - vuln_id = vuln_attributes['id'] + vuln_id = vuln_attributes["id"] description = "### Vulnerability\n\n" description += "**ID**: `" + str(vuln_id) + "`\n" description += "**CPE**: " + str(component_cpe) + "\n" for attribute in vuln_attributes: - description += "**" + attribute + "**: `" + vuln_attributes[attribute] + "`\n" - severity = self.convert_cvss_score(vuln_attributes['cvss']) + description += ( + "**" + + attribute + + "**: `" + + vuln_attributes[attribute] + + "`\n" + ) + severity = self.convert_cvss_score(vuln_attributes["cvss"]) finding = Finding( title=vuln_id, test=test, description=description, severity=severity, - component_name=component_cpe.get_product()[0] if len(component_cpe.get_product()) > 0 else '', - component_version=component_cpe.get_version()[0] if len(component_cpe.get_version()) > 0 else '', + component_name=component_cpe.get_product()[0] + if len(component_cpe.get_product()) > 0 + else "", + component_version=component_cpe.get_version()[0] + if len(component_cpe.get_version()) > 0 + else "", vuln_id_from_tool=vuln_id, nb_occurences=1, ) finding.unsaved_endpoints = [endpoint] # manage if CVE is in metadata - if "type" in vuln_attributes and "cve" == vuln_attributes["type"]: + if ( + "type" in vuln_attributes + and "cve" == vuln_attributes["type"] + ): finding.unsaved_vulnerability_ids = [vuln_attributes["id"]] if report_date: @@ -164,7 +211,9 @@ def manage_vulner_script(self, test, dupes, script_element, endpoint, report_dat if dupe_key in dupes: find = dupes[dupe_key] if description is not None: - find.description += "\n-----\n\n" + finding.description # fives '-' produces an horizontal line + find.description += ( + "\n-----\n\n" + finding.description + ) # fives '-' produces an horizontal line find.unsaved_endpoints.extend(finding.unsaved_endpoints) find.nb_occurences += finding.nb_occurences else: diff --git a/dojo/tools/npm_audit/parser.py b/dojo/tools/npm_audit/parser.py index 94aa5fae93..968d00e0c9 100644 --- a/dojo/tools/npm_audit/parser.py +++ b/dojo/tools/npm_audit/parser.py @@ -9,7 +9,6 @@ class NpmAuditParser(object): - def get_scan_types(self): return ["NPM Audit Scan"] @@ -29,22 +28,26 @@ def parse_json(self, json_output): try: data = json_output.read() try: - tree = json.loads(str(data, 'utf-8')) - except: + tree = json.loads(str(data, "utf-8")) + except Exception: tree = json.loads(data) - except: + except Exception: raise ValueError("Invalid format, unable to parse json.") - if tree.get('auditReportVersion'): - raise ValueError('npm7 with auditReportVersion 2 or higher not yet supported as it lacks the most important fields in the reports') + if tree.get("auditReportVersion"): + raise ValueError( + "npm7 with auditReportVersion 2 or higher not yet supported as it lacks the most important fields in the reports" + ) - if tree.get('error'): - error = tree.get('error') - code = error['code'] - summary = error['summary'] - raise ValueError('npm audit report contains errors: %s, %s', code, summary) + if tree.get("error"): + error = tree.get("error") + code = error["code"] + summary = error["summary"] + raise ValueError( + "npm audit report contains errors: %s, %s", code, summary + ) - subtree = tree.get('advisories') + subtree = tree.get("advisories") return subtree @@ -53,74 +56,97 @@ def get_items(self, tree, test): for key, node in tree.items(): item = get_item(node, test) - unique_key = str(node['id']) + str(node['module_name']) + unique_key = str(node["id"]) + str(node["module_name"]) items[unique_key] = item return list(items.values()) def censor_path_hashes(path): - """ https://github.com/npm/npm/issues/20739 for dependencies installed from git, npm audit replaces the name with a (random?) hash """ + """https://github.com/npm/npm/issues/20739 for dependencies installed from git, npm audit replaces the name with a (random?) hash""" """ this hash changes on every run of npm audit, so defect dojo might think it's a new finding every run """ """ we strip the hash and replace it with 'censored_by_npm_audit` """ if not path: return None - return re.sub('[a-f0-9]{64}', 'censored_by_npm_audit', path) + return re.sub("[a-f0-9]{64}", "censored_by_npm_audit", path) def get_item(item_node, test): - - if item_node['severity'] == 'low': - severity = 'Low' - elif item_node['severity'] == 'moderate': - severity = 'Medium' - elif item_node['severity'] == 'high': - severity = 'High' - elif item_node['severity'] == 'critical': - severity = 'Critical' + if item_node["severity"] == "low": + severity = "Low" + elif item_node["severity"] == "moderate": + severity = "Medium" + elif item_node["severity"] == "high": + severity = "High" + elif item_node["severity"] == "critical": + severity = "Critical" else: - severity = 'Info' + severity = "Info" - paths = '' + paths = "" component_version = None - for npm_finding in item_node['findings']: + for npm_finding in item_node["findings"]: # use first version as component_version - component_version = npm_finding['version'] if not component_version else component_version - paths += "\n - " + str(npm_finding['version']) + ":" + str(','.join(npm_finding['paths'][:25])) - if len(npm_finding['paths']) > 25: + component_version = ( + npm_finding["version"] + if not component_version + else component_version + ) + paths += ( + "\n - " + + str(npm_finding["version"]) + + ":" + + str(",".join(npm_finding["paths"][:25])) + ) + if len(npm_finding["paths"]) > 25: paths += "\n - ..... (list of paths truncated after 25 paths)" cwe = get_npm_cwe(item_node) - dojo_finding = Finding(title=item_node['title'] + " - " + "(" + item_node['module_name'] + ", " + item_node['vulnerable_versions'] + ")", - test=test, - severity=severity, - file_path=censor_path_hashes(item_node['findings'][0]['paths'][0]), - description=item_node['url'] + "\n" + - item_node['overview'] + "\n Vulnerable Module: " + - item_node['module_name'] + "\n Vulnerable Versions: " + - str(item_node['vulnerable_versions']) + "\n Patched Version: " + - str(item_node['patched_versions']) + "\n Vulnerable Paths: " + - str(paths) + "\n CWE: " + - str(item_node['cwe']) + "\n Access: " + - str(item_node['access']), - cwe=cwe, - mitigation=item_node['recommendation'], - references=item_node['url'], - component_name=item_node['module_name'], - component_version=component_version, - false_p=False, - duplicate=False, - out_of_scope=False, - mitigated=None, - impact="No impact provided", - static_finding=True, - dynamic_finding=False) - - if len(item_node['cves']) > 0: + dojo_finding = Finding( + title=item_node["title"] + + " - " + + "(" + + item_node["module_name"] + + ", " + + item_node["vulnerable_versions"] + + ")", + test=test, + severity=severity, + file_path=censor_path_hashes(item_node["findings"][0]["paths"][0]), + description=item_node["url"] + + "\n" + + item_node["overview"] + + "\n Vulnerable Module: " + + item_node["module_name"] + + "\n Vulnerable Versions: " + + str(item_node["vulnerable_versions"]) + + "\n Patched Version: " + + str(item_node["patched_versions"]) + + "\n Vulnerable Paths: " + + str(paths) + + "\n CWE: " + + str(item_node["cwe"]) + + "\n Access: " + + str(item_node["access"]), + cwe=cwe, + mitigation=item_node["recommendation"], + references=item_node["url"], + component_name=item_node["module_name"], + component_version=component_version, + false_p=False, + duplicate=False, + out_of_scope=False, + mitigated=None, + impact="No impact provided", + static_finding=True, + dynamic_finding=False, + ) + + if len(item_node["cves"]) > 0: dojo_finding.unsaved_vulnerability_ids = list() - for vulnerability_id in item_node['cves']: + for vulnerability_id in item_node["cves"]: dojo_finding.unsaved_vulnerability_ids.append(vulnerability_id) return dojo_finding diff --git a/dojo/tools/nsp/parser.py b/dojo/tools/nsp/parser.py index e628916c9e..40a7dcb66a 100644 --- a/dojo/tools/nsp/parser.py +++ b/dojo/tools/nsp/parser.py @@ -4,7 +4,6 @@ class NspParser(object): - def get_scan_types(self): return ["Node Security Platform Scan"] @@ -25,10 +24,10 @@ def parse_json(self, json_output): try: data = json_output.read() try: - tree = json.loads(str(data, 'utf-8')) - except: + tree = json.loads(str(data, "utf-8")) + except Exception: tree = json.loads(data) - except: + except Exception: raise ValueError("Invalid format") return tree @@ -38,41 +37,56 @@ def get_items(self, tree, test): for node in tree: item = get_item(node, test) - unique_key = node['title'] + str(node['path']) + unique_key = node["title"] + str(node["path"]) items[unique_key] = item return list(items.values()) def get_item(item_node, test): - # Following the CVSS Scoring per https://nvd.nist.gov/vuln-metrics/cvss - if item_node['cvss_score'] <= 3.9: + if item_node["cvss_score"] <= 3.9: severity = "Low" - elif item_node['cvss_score'] > 4.0 and item_node['cvss_score'] <= 6.9: + elif item_node["cvss_score"] > 4.0 and item_node["cvss_score"] <= 6.9: severity = "Medium" - elif item_node['cvss_score'] > 7.0 and item_node['cvss_score'] <= 8.9: + elif item_node["cvss_score"] > 7.0 and item_node["cvss_score"] <= 8.9: severity = "High" else: severity = "Critical" - finding = Finding(title=item_node['title'] + " - " + "(" + item_node['module'] + ", " + item_node['version'] + ")", - test=test, - severity=severity, - description=item_node['overview'] + "\n Vulnerable Module: " + - item_node['module'] + "\n Vulnerable Versions: " + - str(item_node['vulnerable_versions']) + "\n Current Version: " + - str(item_node['version']) + "\n Patched Version: " + - str(item_node['patched_versions']) + "\n Vulnerable Path: " + " > ".join(item_node['path']) + "\n CVSS Score: " + - str(item_node['cvss_score']) + "\n CVSS Vector: " + - str(item_node['cvss_vector']), - mitigation=item_node['recommendation'], - references=item_node['advisory'], - false_p=False, - duplicate=False, - out_of_scope=False, - mitigated=None, - impact="No impact provided") + finding = Finding( + title=item_node["title"] + + " - " + + "(" + + item_node["module"] + + ", " + + item_node["version"] + + ")", + test=test, + severity=severity, + description=item_node["overview"] + + "\n Vulnerable Module: " + + item_node["module"] + + "\n Vulnerable Versions: " + + str(item_node["vulnerable_versions"]) + + "\n Current Version: " + + str(item_node["version"]) + + "\n Patched Version: " + + str(item_node["patched_versions"]) + + "\n Vulnerable Path: " + + " > ".join(item_node["path"]) + + "\n CVSS Score: " + + str(item_node["cvss_score"]) + + "\n CVSS Vector: " + + str(item_node["cvss_vector"]), + mitigation=item_node["recommendation"], + references=item_node["advisory"], + false_p=False, + duplicate=False, + out_of_scope=False, + mitigated=None, + impact="No impact provided", + ) return finding diff --git a/dojo/tools/nuclei/parser.py b/dojo/tools/nuclei/parser.py index 21f6d07ffc..782f0cf578 100644 --- a/dojo/tools/nuclei/parser.py +++ b/dojo/tools/nuclei/parser.py @@ -14,7 +14,7 @@ class NucleiParser(object): A class that can be used to parse the nuclei (https://github.com/projectdiscovery/nuclei) JSON report file """ - DEFAULT_SEVERITY = 'Low' + DEFAULT_SEVERITY = "Low" def get_scan_types(self): return ["Nuclei Scan"] @@ -32,23 +32,26 @@ def get_findings(self, filename, test): dupes = {} for item in data: - logger.debug('Item %s.', str(item)) - template_id = item.get('templateID', item.get('template-id', '')) - info = item.get('info') - name = info.get('name') - severity = info.get('severity').title() + logger.debug("Item %s.", str(item)) + template_id = item.get("templateID", item.get("template-id", "")) + info = item.get("info") + name = info.get("name") + severity = info.get("severity").title() if severity not in Finding.SEVERITIES: - logger.debug('Unsupported severity value "%s", change to "%s"', - severity, self.DEFAULT_SEVERITY) + logger.debug( + 'Unsupported severity value "%s", change to "%s"', + severity, + self.DEFAULT_SEVERITY, + ) severity = self.DEFAULT_SEVERITY - item_type = item.get('type') + item_type = item.get("type") if item_type is None: - item_type = '' - matched = item.get('matched', item.get('matched-at', '')) - if '://' in matched: + item_type = "" + matched = item.get("matched", item.get("matched-at", "")) + if "://" in matched: endpoint = Endpoint.from_uri(matched) else: - endpoint = Endpoint.from_uri('//' + matched) + endpoint = Endpoint.from_uri("//" + matched) finding = Finding( title=f"{name}", @@ -57,72 +60,97 @@ def get_findings(self, filename, test): nb_occurences=1, vuln_id_from_tool=template_id, ) - if item.get('timestamp'): - finding.date = date_parser.parse(item.get('timestamp')) - if info.get('description'): - finding.description = info.get('description') - if item.get('extracted-results'): - finding.description += "\n**Results:**\n" + '\n'.join(item.get('extracted-results')) - if info.get('tags'): - finding.unsaved_tags = info.get('tags') - if info.get('reference'): - reference = info.get('reference') - if type(reference) is list: - finding.references = '\n'.join(info.get('reference')) + if item.get("timestamp"): + finding.date = date_parser.parse(item.get("timestamp")) + if info.get("description"): + finding.description = info.get("description") + if item.get("extracted-results"): + finding.description += "\n**Results:**\n" + "\n".join( + item.get("extracted-results") + ) + if info.get("tags"): + finding.unsaved_tags = info.get("tags") + if info.get("reference"): + reference = info.get("reference") + if isinstance(reference, list): + finding.references = "\n".join(info.get("reference")) else: - finding.references = info.get('reference') + finding.references = info.get("reference") finding.unsaved_endpoints.append(endpoint) - classification = info.get('classification') + classification = info.get("classification") if classification: - if 'cve-id' in classification and classification['cve-id']: - cve_ids = classification['cve-id'] - finding.unsaved_vulnerability_ids = list(map(lambda x: x.upper(), cve_ids)) - if ('cwe-id' in classification and classification['cwe-id'] - and len(classification['cwe-id']) > 0): - cwe = classification['cwe-id'][0] + if "cve-id" in classification and classification["cve-id"]: + cve_ids = classification["cve-id"] + finding.unsaved_vulnerability_ids = list( + map(lambda x: x.upper(), cve_ids) + ) + if ( + "cwe-id" in classification + and classification["cwe-id"] + and len(classification["cwe-id"]) > 0 + ): + cwe = classification["cwe-id"][0] finding.cwe = int(cwe[4:]) - if 'cvss-metrics' in classification and classification['cvss-metrics']: + if ( + "cvss-metrics" in classification + and classification["cvss-metrics"] + ): cvss_objects = cvss_parser.parse_cvss_from_text( - classification['cvss-metrics']) + classification["cvss-metrics"] + ) if len(cvss_objects) > 0: finding.cvssv3 = cvss_objects[0].clean_vector() - if 'cvss-score' in classification and classification['cvss-score']: - finding.cvssv3_score = classification['cvss-score'] + if ( + "cvss-score" in classification + and classification["cvss-score"] + ): + finding.cvssv3_score = classification["cvss-score"] - matcher = item.get('matcher-name', item.get('matcher_name')) + matcher = item.get("matcher-name", item.get("matcher_name")) if matcher: finding.component_name = matcher else: - matcher = '' - - if info.get('remediation'): - finding.mitigation = info.get('remediation') - - host = item.get('host', '') - - if item.get('curl-command'): - finding.steps_to_reproduce = 'curl command to reproduce the request:\n`' + \ - item.get('curl-command') + '`' - - if item.get('request'): - finding.unsaved_request = item.get('request') - if item.get('response'): - finding.unsaved_response = item.get('response') - - logger.debug('dupe keys %s, %s, %s, %s.', template_id, item_type, matcher, host) + matcher = "" + + if info.get("remediation"): + finding.mitigation = info.get("remediation") + + host = item.get("host", "") + + if item.get("curl-command"): + finding.steps_to_reproduce = ( + "curl command to reproduce the request:\n`" + + item.get("curl-command") + + "`" + ) + + if item.get("request"): + finding.unsaved_request = item.get("request") + if item.get("response"): + finding.unsaved_response = item.get("response") + + logger.debug( + "dupe keys %s, %s, %s, %s.", + template_id, + item_type, + matcher, + host, + ) dupe_key = hashlib.sha256( - (template_id + item_type + matcher + endpoint.host).encode('utf-8') + (template_id + item_type + matcher + endpoint.host).encode( + "utf-8" + ) ).hexdigest() if dupe_key in dupes: - logger.debug('dupe_key %s exists.', str(dupe_key)) + logger.debug("dupe_key %s exists.", str(dupe_key)) finding = dupes[dupe_key] if endpoint not in finding.unsaved_endpoints: finding.unsaved_endpoints.append(endpoint) - logger.debug('Appended endpoint %s', endpoint) + logger.debug("Appended endpoint %s", endpoint) finding.nb_occurences += 1 else: dupes[dupe_key] = finding diff --git a/dojo/tools/openscap/parser.py b/dojo/tools/openscap/parser.py index 784c335103..9f3ba66132 100644 --- a/dojo/tools/openscap/parser.py +++ b/dojo/tools/openscap/parser.py @@ -9,7 +9,6 @@ class OpenscapParser(object): - def get_scan_types(self): return ["Openscap Vulnerability Scan"] @@ -26,52 +25,75 @@ def get_findings(self, file, test): namespace = self.get_namespace(root) # check if xml file hash correct root or not. - if 'Benchmark' not in root.tag: - raise ValueError("This doesn't seem to be a valid Openscap vulnerability scan xml file.") - if 'http://checklists.nist.gov/xccdf/' not in namespace: - raise ValueError("This doesn't seem to be a valid Openscap vulnerability scan xml file.") + if "Benchmark" not in root.tag: + raise ValueError( + "This doesn't seem to be a valid Openscap vulnerability scan xml file." + ) + if "http://checklists.nist.gov/xccdf/" not in namespace: + raise ValueError( + "This doesn't seem to be a valid Openscap vulnerability scan xml file." + ) # read rules rules = {} - for rule in root.findall('.//{0}Rule'.format(namespace)): - rules[rule.attrib['id']] = { - "title": rule.findtext('./{0}title'.format(namespace)) + for rule in root.findall(".//{0}Rule".format(namespace)): + rules[rule.attrib["id"]] = { + "title": rule.findtext("./{0}title".format(namespace)) } # go to test result - test_result = tree.find('./{0}TestResult'.format(namespace)) + test_result = tree.find("./{0}TestResult".format(namespace)) ips = [] # append all target in a list. - for ip in test_result.findall('./{0}target'.format(namespace)): + for ip in test_result.findall("./{0}target".format(namespace)): ips.append(ip.text) - for ip in test_result.findall('./{0}target-address'.format(namespace)): + for ip in test_result.findall("./{0}target-address".format(namespace)): ips.append(ip.text) dupes = dict() - # run both rule, and rule-result in parallel so that we can get title for failed test from rule. - for rule_result in test_result.findall('./{0}rule-result'.format(namespace)): - result = rule_result.findtext('./{0}result'.format(namespace)) + # run both rule, and rule-result in parallel so that we can get title + # for failed test from rule. + for rule_result in test_result.findall( + "./{0}rule-result".format(namespace) + ): + result = rule_result.findtext("./{0}result".format(namespace)) # find only failed report. if "fail" in result: # get rule corresponding to rule-result - rule = rules[rule_result.attrib['idref']] - title = rule['title'] - description = "\n".join([ - "**IdRef:** `" + rule_result.attrib['idref'] + "`", - "**Title:** `" + title + "`", - ]) + rule = rules[rule_result.attrib["idref"]] + title = rule["title"] + description = "\n".join( + [ + "**IdRef:** `" + rule_result.attrib["idref"] + "`", + "**Title:** `" + title + "`", + ] + ) vulnerability_ids = [] - for vulnerability_id in rule_result.findall("./{0}ident[@system='http://cve.mitre.org']".format(namespace)): + for vulnerability_id in rule_result.findall( + "./{0}ident[@system='http://cve.mitre.org']".format( + namespace + ) + ): vulnerability_ids.append(vulnerability_id.text) # get severity. - severity = rule_result.attrib.get('severity', 'medium').lower().capitalize() + severity = ( + rule_result.attrib.get("severity", "medium") + .lower() + .capitalize() + ) # according to the spec 'unknown' is a possible value - if severity == 'Unknown': - severity = 'Info' + if severity == "Unknown": + severity = "Info" references = "" # get references. - for check_content in rule_result.findall('./{0}check/{0}check-content-ref'.format(namespace)): - references += "**name:** : " + check_content.attrib['name'] + "\n" - references += "**href** : " + check_content.attrib['href'] + "\n" + for check_content in rule_result.findall( + "./{0}check/{0}check-content-ref".format(namespace) + ): + references += ( + "**name:** : " + check_content.attrib["name"] + "\n" + ) + references += ( + "**href** : " + check_content.attrib["href"] + "\n" + ) finding = Finding( title=title, @@ -80,7 +102,7 @@ def get_findings(self, file, test): references=references, dynamic_finding=True, static_finding=False, - unique_id_from_tool=rule_result.attrib['idref'], + unique_id_from_tool=rule_result.attrib["idref"], ) if vulnerability_ids: finding.unsaved_vulnerability_ids = vulnerability_ids @@ -90,13 +112,15 @@ def get_findings(self, file, test): validate_ipv46_address(ip) endpoint = Endpoint(host=ip) except ValidationError: - if '://' in ip: + if "://" in ip: endpoint = Endpoint.from_uri(ip) else: - endpoint = Endpoint.from_uri('//' + ip) + endpoint = Endpoint.from_uri("//" + ip) finding.unsaved_endpoints.append(endpoint) - dupe_key = hashlib.sha256(references.encode('utf-8')).hexdigest() + dupe_key = hashlib.sha256( + references.encode("utf-8") + ).hexdigest() if dupe_key in dupes: find = dupes[dupe_key] if finding.references: @@ -109,5 +133,5 @@ def get_findings(self, file, test): def get_namespace(self, element): """Extract namespace present in XML file.""" - m = re.match(r'\{.*\}', element.tag) - return m.group(0) if m else '' + m = re.match(r"\{.*\}", element.tag) + return m.group(0) if m else "" diff --git a/dojo/tools/openvas_csv/parser.py b/dojo/tools/openvas_csv/parser.py index 8dd5cd0e35..04d6166b23 100644 --- a/dojo/tools/openvas_csv/parser.py +++ b/dojo/tools/openvas_csv/parser.py @@ -1,4 +1,3 @@ - import csv import hashlib import io @@ -9,7 +8,6 @@ class ColumnMappingStrategy(object): - mapped_column = None def __init__(self): @@ -20,25 +18,26 @@ def map_column_value(self, finding, column_value): @staticmethod def evaluate_bool_value(column_value): - if column_value.lower() == 'true': + if column_value.lower() == "true": return True - elif column_value.lower() == 'false': + elif column_value.lower() == "false": return False else: return None def process_column(self, column_name, column_value, finding): - - if column_name.lower() == self.mapped_column and column_value is not None: + if ( + column_name.lower() == self.mapped_column + and column_value is not None + ): self.map_column_value(finding, column_value) elif self.successor is not None: self.successor.process_column(column_name, column_value, finding) class DateColumnMappingStrategy(ColumnMappingStrategy): - def __init__(self): - self.mapped_column = 'timestamp' + self.mapped_column = "timestamp" super(DateColumnMappingStrategy, self).__init__() def map_column_value(self, finding, column_value): @@ -46,9 +45,8 @@ def map_column_value(self, finding, column_value): class TitleColumnMappingStrategy(ColumnMappingStrategy): - def __init__(self): - self.mapped_column = 'nvt name' + self.mapped_column = "nvt name" super(TitleColumnMappingStrategy, self).__init__() def map_column_value(self, finding, column_value): @@ -56,9 +54,8 @@ def map_column_value(self, finding, column_value): class CweColumnMappingStrategy(ColumnMappingStrategy): - def __init__(self): - self.mapped_column = 'cweid' + self.mapped_column = "cweid" super(CweColumnMappingStrategy, self).__init__() def map_column_value(self, finding, column_value): @@ -67,9 +64,8 @@ def map_column_value(self, finding, column_value): class PortColumnMappingStrategy(ColumnMappingStrategy): - def __init__(self): - self.mapped_column = 'port' + self.mapped_column = "port" super(PortColumnMappingStrategy, self).__init__() def map_column_value(self, finding, column_value): @@ -78,9 +74,8 @@ def map_column_value(self, finding, column_value): class ProtocolColumnMappingStrategy(ColumnMappingStrategy): - def __init__(self): - self.mapped_column = 'port protocol' + self.mapped_column = "port protocol" super(ProtocolColumnMappingStrategy, self).__init__() def map_column_value(self, finding, column_value): @@ -89,20 +84,20 @@ def map_column_value(self, finding, column_value): class IpColumnMappingStrategy(ColumnMappingStrategy): - def __init__(self): - self.mapped_column = 'ip' + self.mapped_column = "ip" super(IpColumnMappingStrategy, self).__init__() def map_column_value(self, finding, column_value): - if not finding.unsaved_endpoints[0].host: # process only if host is not already defined (by field hostname) + if not finding.unsaved_endpoints[ + 0 + ].host: # process only if host is not already defined (by field hostname) finding.unsaved_endpoints[0].host = column_value class HostnameColumnMappingStrategy(ColumnMappingStrategy): - def __init__(self): - self.mapped_column = 'hostname' + self.mapped_column = "hostname" super(HostnameColumnMappingStrategy, self).__init__() def map_column_value(self, finding, column_value): @@ -111,27 +106,25 @@ def map_column_value(self, finding, column_value): class SeverityColumnMappingStrategy(ColumnMappingStrategy): - @staticmethod def is_valid_severity(severity): - valid_severity = ('Info', 'Low', 'Medium', 'High', 'Critical') + valid_severity = ("Info", "Low", "Medium", "High", "Critical") return severity in valid_severity def __init__(self): - self.mapped_column = 'severity' + self.mapped_column = "severity" super(SeverityColumnMappingStrategy, self).__init__() def map_column_value(self, finding, column_value): if self.is_valid_severity(column_value): finding.severity = column_value else: - finding.severity = 'Info' + finding.severity = "Info" class DescriptionColumnMappingStrategy(ColumnMappingStrategy): - def __init__(self): - self.mapped_column = 'summary' + self.mapped_column = "summary" super(DescriptionColumnMappingStrategy, self).__init__() def map_column_value(self, finding, column_value): @@ -139,9 +132,8 @@ def map_column_value(self, finding, column_value): class MitigationColumnMappingStrategy(ColumnMappingStrategy): - def __init__(self): - self.mapped_column = 'solution' + self.mapped_column = "solution" super(MitigationColumnMappingStrategy, self).__init__() def map_column_value(self, finding, column_value): @@ -149,9 +141,8 @@ def map_column_value(self, finding, column_value): class ImpactColumnMappingStrategy(ColumnMappingStrategy): - def __init__(self): - self.mapped_column = 'vulnerability insight' + self.mapped_column = "vulnerability insight" super(ImpactColumnMappingStrategy, self).__init__() def map_column_value(self, finding, column_value): @@ -159,9 +150,8 @@ def map_column_value(self, finding, column_value): class ReferencesColumnMappingStrategy(ColumnMappingStrategy): - def __init__(self): - self.mapped_column = 'specific result' + self.mapped_column = "specific result" super(ReferencesColumnMappingStrategy, self).__init__() def map_column_value(self, finding, column_value): @@ -169,9 +159,8 @@ def map_column_value(self, finding, column_value): class ActiveColumnMappingStrategy(ColumnMappingStrategy): - def __init__(self): - self.mapped_column = 'active' + self.mapped_column = "active" super(ActiveColumnMappingStrategy, self).__init__() def map_column_value(self, finding, column_value): @@ -179,9 +168,8 @@ def map_column_value(self, finding, column_value): class VerifiedColumnMappingStrategy(ColumnMappingStrategy): - def __init__(self): - self.mapped_column = 'verified' + self.mapped_column = "verified" super(VerifiedColumnMappingStrategy, self).__init__() def map_column_value(self, finding, column_value): @@ -189,9 +177,8 @@ def map_column_value(self, finding, column_value): class FalsePositiveColumnMappingStrategy(ColumnMappingStrategy): - def __init__(self): - self.mapped_column = 'falsepositive' + self.mapped_column = "falsepositive" super(FalsePositiveColumnMappingStrategy, self).__init__() def map_column_value(self, finding, column_value): @@ -199,9 +186,8 @@ def map_column_value(self, finding, column_value): class DuplicateColumnMappingStrategy(ColumnMappingStrategy): - def __init__(self): - self.mapped_column = 'duplicate' + self.mapped_column = "duplicate" super(DuplicateColumnMappingStrategy, self).__init__() def map_column_value(self, finding, column_value): @@ -209,7 +195,6 @@ def map_column_value(self, finding, column_value): class OpenVASCsvParser(object): - def create_chain(self): date_column_strategy = DateColumnMappingStrategy() title_column_strategy = TitleColumnMappingStrategy() @@ -264,15 +249,14 @@ def get_description_for_scan_types(self, scan_type): return "Import OpenVAS Scan in CSV format. Export as CSV Results on OpenVAS." def get_findings(self, filename, test): - column_names = dict() dupes = dict() chain = self.create_chain() content = filename.read() - if type(content) is bytes: - content = content.decode('utf-8') - reader = csv.reader(io.StringIO(content), delimiter=',', quotechar='"') + if isinstance(content, bytes): + content = content.decode("utf-8") + reader = csv.reader(io.StringIO(content), delimiter=",", quotechar='"') row_number = 0 for row in reader: @@ -286,7 +270,9 @@ def get_findings(self, filename, test): column_number = 0 for column in row: - chain.process_column(column_names[column_number], column, finding) + chain.process_column( + column_names[column_number], column, finding + ) column_number += 1 if finding is not None and row_number > 0: @@ -295,7 +281,17 @@ def get_findings(self, filename, test): if finding.description is None: finding.description = "" - key = hashlib.sha256((str(finding.unsaved_endpoints[0]) + '|' + finding.severity + '|' + finding.title + '|' + finding.description).encode('utf-8')).hexdigest() + key = hashlib.sha256( + ( + str(finding.unsaved_endpoints[0]) + + "|" + + finding.severity + + "|" + + finding.title + + "|" + + finding.description + ).encode("utf-8") + ).hexdigest() if key not in dupes: dupes[key] = finding diff --git a/dojo/tools/ort/parser.py b/dojo/tools/ort/parser.py index 30ffcb853f..d2811d3e17 100644 --- a/dojo/tools/ort/parser.py +++ b/dojo/tools/ort/parser.py @@ -18,7 +18,6 @@ def get_description_for_scan_types(self, scan_type): return "Import Outpost24 endpoint vulnerability scan in XML format." def get_findings(self, json_output, test): - if json_output is None: return list() @@ -32,27 +31,32 @@ def parse_json(self, json_output): try: data = json_output.read() try: - tree = json.loads(str(data, 'utf-8')) - except: + tree = json.loads(str(data, "utf-8")) + except Exception: tree = json.loads(data) - except: + except Exception: raise ValueError("Invalid format") return tree def get_items(self, evaluatedModel, test): items = {} - packages = evaluatedModel['packages'] - dependency_trees = evaluatedModel['dependency_trees'] - rule_violations = evaluatedModel['rule_violations'] - licenses = evaluatedModel['licenses'] - rule_violations_unresolved = get_unresolved_rule_violations(rule_violations) - rule_violations_models = get_rule_violation_models(rule_violations_unresolved, packages, licenses, - dependency_trees) + packages = evaluatedModel["packages"] + dependency_trees = evaluatedModel["dependency_trees"] + rule_violations = evaluatedModel["rule_violations"] + licenses = evaluatedModel["licenses"] + rule_violations_unresolved = get_unresolved_rule_violations( + rule_violations + ) + rule_violations_models = get_rule_violation_models( + rule_violations_unresolved, packages, licenses, dependency_trees + ) for model in rule_violations_models: item = get_item(model, test) - unique_key = hashlib.md5((item.title + item.references).encode()).hexdigest() + unique_key = hashlib.md5( + (item.title + item.references).encode() + ).hexdigest() items[unique_key] = item return list(items.values()) @@ -67,16 +71,16 @@ def get_unresolved_rule_violations(rule_violations): def is_rule_violation_unresolved(rule_violation): - return 'resolutions' not in rule_violation + return "resolutions" not in rule_violation def find_in_dependency_tree(tree, package_id): - if 'pkg' in tree and tree['pkg'] == package_id: + if "pkg" in tree and tree["pkg"] == package_id: return True else: - if 'children' in tree: + if "children" in tree: found_in_child = False - for child in tree['children']: + for child in tree["children"]: if found_in_child: break else: @@ -90,57 +94,69 @@ def get_project_ids_for_package(dependency_trees, package_id): project_ids = [] for project in dependency_trees: if find_in_dependency_tree(project, package_id): - project_ids.append(project['pkg']) + project_ids.append(project["pkg"]) return project_ids def get_name_id_for_package(packages, package__id): name = "" for package in packages: - if package['_id'] == package__id: - name = package['id'] + if package["_id"] == package__id: + name = package["id"] break return name -def get_rule_violation_models(rule_violations_unresolved, packages, licenses, dependency_trees): +def get_rule_violation_models( + rule_violations_unresolved, packages, licenses, dependency_trees +): models = [] for violation in rule_violations_unresolved: - models.append(get_rule_violation_model(violation, packages, licenses, dependency_trees)) + models.append( + get_rule_violation_model( + violation, packages, licenses, dependency_trees + ) + ) return models -def get_rule_violation_model(rule_violation_unresolved, packages, licenses, dependency_trees): - project_ids = get_project_ids_for_package(dependency_trees, rule_violation_unresolved['pkg']) +def get_rule_violation_model( + rule_violation_unresolved, packages, licenses, dependency_trees +): + project_ids = get_project_ids_for_package( + dependency_trees, rule_violation_unresolved["pkg"] + ) project_names = [] for id in project_ids: project_names.append(get_name_id_for_package(packages, id)) - package = find_package_by_id(packages, rule_violation_unresolved['pkg']) - if 'license' in rule_violation_unresolved: - license_tmp = rule_violation_unresolved['license'] + package = find_package_by_id(packages, rule_violation_unresolved["pkg"]) + if "license" in rule_violation_unresolved: + license_tmp = rule_violation_unresolved["license"] else: - license_tmp = 'unset' - if 'license_source' not in rule_violation_unresolved: - rule_violation_unresolved['license_source'] = 'unset' + license_tmp = "unset" + if "license_source" not in rule_violation_unresolved: + rule_violation_unresolved["license_source"] = "unset" license_id = find_license_id(licenses, license_tmp) - return RuleViolationModel(package, license_id, project_names, rule_violation_unresolved) + return RuleViolationModel( + package, license_id, project_names, rule_violation_unresolved + ) def find_package_by_id(packages, pkg_id): package = None for pkg in packages: - if pkg['_id'] == pkg_id: + if pkg["_id"] == pkg_id: package = pkg break return package def find_license_id(licenses, license_id): - id = '' + id = "" for lic in licenses: - if lic['_id'] == license_id: - id = lic['id'] + if lic["_id"] == license_id: + id = lic["id"] break return id @@ -155,12 +171,14 @@ def get_item(model, test): severity = get_severity(model.rule_violation) - finding = Finding(title=model.rule_violation['rule'], - test=test, - references=model.rule_violation['message'], - description=desc, - severity=severity, - static_finding=True) + finding = Finding( + title=model.rule_violation["rule"], + test=test, + references=model.rule_violation["message"], + description=desc, + severity=severity, + static_finding=True, + ) return finding @@ -173,20 +191,17 @@ def get_item(model, test): # projects: [] # rule_violation: dict -RuleViolationModel = namedtuple('RuleViolationModel', [ - 'pkg', - 'license_id', - 'projects', - 'rule_violation' -]) +RuleViolationModel = namedtuple( + "RuleViolationModel", ["pkg", "license_id", "projects", "rule_violation"] +) def get_severity(rule_violation): - if rule_violation['severity'] == 'ERROR': - return 'High' - elif rule_violation['severity'] == 'WARNING': - return 'Medium' - elif rule_violation['severity'] == 'HINT': - return 'Info' + if rule_violation["severity"] == "ERROR": + return "High" + elif rule_violation["severity"] == "WARNING": + return "Medium" + elif rule_violation["severity"] == "HINT": + return "Info" else: - return 'Critical' + return "Critical" diff --git a/dojo/tools/ossindex_devaudit/parser.py b/dojo/tools/ossindex_devaudit/parser.py index 7cee84546b..8d04bac2d4 100644 --- a/dojo/tools/ossindex_devaudit/parser.py +++ b/dojo/tools/ossindex_devaudit/parser.py @@ -20,7 +20,6 @@ def get_description_for_scan_types(self, scan_type): return "Import OssIndex Devaudit SCA Scan in json format." def get_findings(self, json_file, test): - tree = self.parse_json(json_file) if tree: @@ -39,66 +38,77 @@ def parse_json(self, json_file): return tree def get_items(self, tree, test): - items = {} results = {key: value for (key, value) in tree.items()} - for package in results.get('Packages', []): - package_data = package['Package'] - if len(package.get('Vulnerabilities', [])) > 0: - for vulnerability in package.get('Vulnerabilities', []): + for package in results.get("Packages", []): + package_data = package["Package"] + if len(package.get("Vulnerabilities", [])) > 0: + for vulnerability in package.get("Vulnerabilities", []): item = get_item( - dependency_name=package_data['name'], - dependency_version=package_data['version'], - dependency_source=package_data['pm'], + dependency_name=package_data["name"], + dependency_version=package_data["version"], + dependency_source=package_data["pm"], vulnerability=vulnerability, - test=test + test=test, ) - unique_key = vulnerability['id'] + unique_key = vulnerability["id"] items[unique_key] = item return items.values() -def get_item(dependency_name, dependency_version, dependency_source, vulnerability, test): - - cwe_data = vulnerability.get('cwe', 'CWE-1035') - if cwe_data is None or cwe_data.startswith('CWE') is False: - cwe_data = 'CWE-1035' +def get_item( + dependency_name, dependency_version, dependency_source, vulnerability, test +): + cwe_data = vulnerability.get("cwe", "CWE-1035") + if cwe_data is None or cwe_data.startswith("CWE") is False: + cwe_data = "CWE-1035" try: - cwe = int(cwe_data.split('-')[1]) + cwe = int(cwe_data.split("-")[1]) except ValueError: - raise ValueError('Attempting to convert the CWE value to an integer failed') - - finding = Finding(title=dependency_source + ":" + dependency_name + " - " + "(" + dependency_version + ", " + cwe_data + ")", - test=test, - severity=get_severity(vulnerability.get('cvssScore', '')), - description=vulnerability['title'], - cwe=cwe, - cvssv3=vulnerability['cvssVector'].replace('CVSS:3.0', ''), - mitigation='Upgrade the component to the latest non-vulnerable version, or remove the package if it is not in use.', - references=vulnerability.get('reference', ''), - false_p=False, - duplicate=False, - out_of_scope=False, - mitigated=None, - static_finding=False, - dynamic_finding=False, - impact="No impact provided by scan") + raise ValueError( + "Attempting to convert the CWE value to an integer failed" + ) + + finding = Finding( + title=dependency_source + + ":" + + dependency_name + + " - " + + "(" + + dependency_version + + ", " + + cwe_data + + ")", + test=test, + severity=get_severity(vulnerability.get("cvssScore", "")), + description=vulnerability["title"], + cwe=cwe, + cvssv3=vulnerability["cvssVector"].replace("CVSS:3.0", ""), + mitigation="Upgrade the component to the latest non-vulnerable version, or remove the package if it is not in use.", + references=vulnerability.get("reference", ""), + false_p=False, + duplicate=False, + out_of_scope=False, + mitigated=None, + static_finding=False, + dynamic_finding=False, + impact="No impact provided by scan", + ) return finding def get_severity(cvss_score): - - result = 'Info' + result = "Info" if cvss_score != "": ratings = [ - ('Critical', 9.0, 10.0), - ('High', 7.0, 8.9), - ('Medium', 4.0, 6.9), - ('Low', 0.1, 3.9) + ("Critical", 9.0, 10.0), + ("High", 7.0, 8.9), + ("Medium", 4.0, 6.9), + ("Low", 0.1, 3.9), ] for severity, low, high in ratings: diff --git a/dojo/tools/outpost24/parser.py b/dojo/tools/outpost24/parser.py index 13be837541..8fd244cc42 100644 --- a/dojo/tools/outpost24/parser.py +++ b/dojo/tools/outpost24/parser.py @@ -8,7 +8,6 @@ class Outpost24Parser(object): - def get_scan_types(self): return ["Outpost24 Scan"] @@ -21,55 +20,72 @@ def get_description_for_scan_types(self, scan_type): def get_findings(self, file, test): tree = ElementTree.parse(file) items = list() - for detail in tree.iterfind('.//detaillist/detail'): + for detail in tree.iterfind(".//detaillist/detail"): # finding details - title = detail.findtext('name') + title = detail.findtext("name") # date = detail.findtext('date') # can be used for Finding.date? - vulnerability_id = detail.findtext('./cve/id') - url = detail.findtext('./referencelist/reference/[type=\'solution\']/../url') - description = detail.findtext('description') - mitigation = detail.findtext('solution') - impact = detail.findtext('information') - cvss_score = detail.findtext('cvss_v3_score') or detail.findtext('cvss_score') + vulnerability_id = detail.findtext("./cve/id") + url = detail.findtext( + "./referencelist/reference/[type='solution']/../url" + ) + description = detail.findtext("description") + mitigation = detail.findtext("solution") + impact = detail.findtext("information") + cvss_score = detail.findtext("cvss_v3_score") or detail.findtext( + "cvss_score" + ) if not cvss_score: cvss_score = 0 if cvss_score: score = float(cvss_score) if score < 4: - severity = 'Low' + severity = "Low" elif score < 7: - severity = 'Medium' + severity = "Medium" elif score < 9: - severity = 'High' + severity = "High" else: - severity = 'Critical' + severity = "Critical" else: - risk = int(detail.findtext('risk')) + risk = int(detail.findtext("risk")) if risk == 0: - severity = 'Low' + severity = "Low" elif risk == 1: - severity = 'Medium' + severity = "Medium" elif risk == 2: - severity = 'High' + severity = "High" else: - severity = 'Critical' - cvss_description = detail.findtext('cvss_vector_description') - cvss_vector = detail.findtext('cvss_v3_vector') or detail.findtext('cvss_vector') - severity_justification = "{}\n{}".format(cvss_score, cvss_description) - finding = Finding(title=title, test=test, url=url, description=description, mitigation=mitigation, - impact=impact, severity=severity, - severity_justification=severity_justification) + severity = "Critical" + cvss_description = detail.findtext("cvss_vector_description") + cvss_vector = detail.findtext("cvss_v3_vector") or detail.findtext( + "cvss_vector" + ) + severity_justification = "{}\n{}".format( + cvss_score, cvss_description + ) + finding = Finding( + title=title, + test=test, + url=url, + description=description, + mitigation=mitigation, + impact=impact, + severity=severity, + severity_justification=severity_justification, + ) if vulnerability_id: finding.unsaved_vulnerability_ids = [vulnerability_id] # endpoint details - host = detail.findtext('ip') + host = detail.findtext("ip") if host: - protocol = detail.findtext('./portinfo/service') + protocol = detail.findtext("./portinfo/service") try: - port = int(detail.findtext('./portinfo/portnumber')) - except ValueError as ve: + port = int(detail.findtext("./portinfo/portnumber")) + except ValueError: logger.debug("General port given. Assigning 0 as default.") port = 0 - finding.unsaved_endpoints.append(Endpoint(protocol=protocol, host=host, port=port)) + finding.unsaved_endpoints.append( + Endpoint(protocol=protocol, host=host, port=port) + ) items.append(finding) return items diff --git a/dojo/tools/php_security_audit_v2/parser.py b/dojo/tools/php_security_audit_v2/parser.py index 4df82d3de5..f1ee8022c1 100644 --- a/dojo/tools/php_security_audit_v2/parser.py +++ b/dojo/tools/php_security_audit_v2/parser.py @@ -5,7 +5,6 @@ class PhpSecurityAuditV2Parser(object): - def get_scan_types(self): return ["PHP Security Audit v2"] @@ -18,8 +17,8 @@ def get_description_for_scan_types(self, scan_type): def get_findings(self, filename, test): tree = filename.read() try: - data = json.loads(str(tree, 'utf-8')) - except: + data = json.loads(str(tree, "utf-8")) + except Exception: data = json.loads(tree) dupes = dict() @@ -36,9 +35,16 @@ def get_findings(self, filename, test): findingdetail += "Rule Source: " + issue["source"] + "\n" findingdetail += "Details: " + issue["message"] + "\n" - sev = PhpSecurityAuditV2Parser.get_severity_word(issue["severity"]) + sev = PhpSecurityAuditV2Parser.get_severity_word( + issue["severity"] + ) - dupe_key = title + filepath + str(issue["line"]) + str(issue["column"]) + dupe_key = ( + title + + filepath + + str(issue["line"]) + + str(issue["column"]) + ) if dupe_key in dupes: find = dupes[dupe_key] @@ -57,7 +63,7 @@ def get_findings(self, filename, test): ) dupes[dupe_key] = find - findingdetail = '' + findingdetail = "" return list(dupes.values()) @@ -66,10 +72,10 @@ def get_severity_word(severity): sev = math.ceil(severity / 2) if sev == 5: - return 'Critical' + return "Critical" elif sev == 4: - return 'High' + return "High" elif sev == 3: - return 'Medium' + return "Medium" else: - return 'Low' + return "Low" diff --git a/dojo/tools/php_symfony_security_check/parser.py b/dojo/tools/php_symfony_security_check/parser.py index fbc2e8d8b5..c5fb511880 100644 --- a/dojo/tools/php_symfony_security_check/parser.py +++ b/dojo/tools/php_symfony_security_check/parser.py @@ -4,7 +4,6 @@ class PhpSymfonySecurityCheckParser(object): - def get_scan_types(self): return ["PHP Symfony Security Check"] @@ -24,10 +23,10 @@ def parse_json(self, json_file): try: data = json_file.read() try: - tree = json.loads(str(data, 'utf-8')) - except: + tree = json.loads(str(data, "utf-8")) + except Exception: tree = json.loads(data) - except: + except Exception: raise Exception("Invalid format") return tree @@ -36,41 +35,54 @@ def get_items(self, tree, test): items = {} for dependency_name, dependency_data in list(tree.items()): - advisories = dependency_data.get('advisories') - dependency_version = dependency_data['version'] - if dependency_version and dependency_version.startswith('v'): + advisories = dependency_data.get("advisories") + dependency_version = dependency_data["version"] + if dependency_version and dependency_version.startswith("v"): dependency_version = dependency_version[1:] for advisory in advisories: - item = get_item(dependency_name, dependency_version, advisory, test) - unique_key = str(dependency_name) + str(dependency_data['version'] + str(advisory['cve'])) + item = get_item( + dependency_name, dependency_version, advisory, test + ) + unique_key = str(dependency_name) + str( + dependency_data["version"] + str(advisory["cve"]) + ) items[unique_key] = item return list(items.values()) def get_item(dependency_name, dependency_version, advisory, test): - - finding = Finding(title=dependency_name + " - " + "(" + dependency_version + ", " + advisory['cve'] + ")", - test=test, - # TODO decide how to handle the fact we don't have a severity. None will lead to problems handling minimum severity on import - severity='Info', - description=advisory['title'], - # TODO Decide if the default '1035: vulnerable 3rd party component' is OK to use? - cwe=1035, - mitigation='upgrade', - references=advisory['link'], - false_p=False, - duplicate=False, - out_of_scope=False, - mitigated=None, - impact="No impact provided", - static_finding=True, - dynamic_finding=False, - component_name=dependency_name, - component_version=dependency_version) - - if advisory['cve']: - finding.unsaved_vulnerability_ids = [advisory['cve']] + finding = Finding( + title=dependency_name + + " - " + + "(" + + dependency_version + + ", " + + advisory["cve"] + + ")", + test=test, + # TODO decide how to handle the fact we don't have a severity. None + # will lead to problems handling minimum severity on import + severity="Info", + description=advisory["title"], + # TODO Decide if the default '1035: vulnerable 3rd party component' is + # OK to use? + cwe=1035, + mitigation="upgrade", + references=advisory["link"], + false_p=False, + duplicate=False, + out_of_scope=False, + mitigated=None, + impact="No impact provided", + static_finding=True, + dynamic_finding=False, + component_name=dependency_name, + component_version=dependency_version, + ) + + if advisory["cve"]: + finding.unsaved_vulnerability_ids = [advisory["cve"]] return finding diff --git a/dojo/tools/pip_audit/parser.py b/dojo/tools/pip_audit/parser.py index 7c2871a05c..726667987f 100644 --- a/dojo/tools/pip_audit/parser.py +++ b/dojo/tools/pip_audit/parser.py @@ -4,7 +4,6 @@ class PipAuditParser: - def get_scan_types(self): return ["pip-audit Scan"] @@ -18,39 +17,40 @@ def requires_file(self, scan_type): return True def get_findings(self, scan_file, test): - data = json.load(scan_file) findings = list() for item in data: - vulnerabilities = item.get('vulns', []) + vulnerabilities = item.get("vulns", []) if vulnerabilities: - component_name = item['name'] - component_version = item.get('version') + component_name = item["name"] + component_version = item.get("version") for vulnerability in vulnerabilities: - vuln_id = vulnerability.get('id') - vuln_fix_versions = vulnerability.get('fix_versions') - vuln_description = vulnerability.get('description') + vuln_id = vulnerability.get("id") + vuln_fix_versions = vulnerability.get("fix_versions") + vuln_description = vulnerability.get("description") - title = f'{vuln_id} in {component_name}:{component_version}' + title = ( + f"{vuln_id} in {component_name}:{component_version}" + ) - description = '' + description = "" description += vuln_description mitigation = None if vuln_fix_versions: - mitigation = 'Upgrade to version:' + mitigation = "Upgrade to version:" if len(vuln_fix_versions) == 1: - mitigation += f' {vuln_fix_versions[0]}' + mitigation += f" {vuln_fix_versions[0]}" else: for fix_version in vuln_fix_versions: - mitigation += f'\n- {fix_version}' + mitigation += f"\n- {fix_version}" finding = Finding( test=test, title=title, cwe=1352, - severity='Medium', + severity="Medium", description=description, mitigation=mitigation, component_name=component_name, diff --git a/dojo/tools/pmd/parser.py b/dojo/tools/pmd/parser.py index 22296ebe8c..d3f8c5eda2 100644 --- a/dojo/tools/pmd/parser.py +++ b/dojo/tools/pmd/parser.py @@ -5,7 +5,6 @@ class PmdParser(object): - def get_scan_types(self): return ["PMD Scan"] @@ -19,9 +18,11 @@ def get_findings(self, filename, test): dupes = dict() content = filename.read() - if type(content) is bytes: - content = content.decode('utf-8') - reader = list(csv.DictReader(io.StringIO(content), delimiter=',', quotechar='"')) + if isinstance(content, bytes): + content = content.decode("utf-8") + reader = list( + csv.DictReader(io.StringIO(content), delimiter=",", quotechar='"') + ) for row in reader: finding = Finding(test=test) @@ -40,7 +41,9 @@ def get_findings(self, filename, test): priority = "Info" finding.severity = priority - description = "Description: {}\n".format(row['Description'].strip()) + description = "Description: {}\n".format( + row["Description"].strip() + ) description += "Rule set: {}\n".format(row["Rule set"].strip()) description += "Problem: {}\n".format(row["Problem"].strip()) description += "Package: {}\n".format(row["Package"].strip()) @@ -50,12 +53,16 @@ def get_findings(self, filename, test): finding.impact = "No impact provided" finding.mitigation = "No mitigation provided" - key = hashlib.sha256("|".join([ - finding.title, - finding.description, - finding.file_path, - finding.line - ]).encode("utf-8")).hexdigest() + key = hashlib.sha256( + "|".join( + [ + finding.title, + finding.description, + finding.file_path, + finding.line, + ] + ).encode("utf-8") + ).hexdigest() if key not in dupes: dupes[key] = finding diff --git a/dojo/tools/popeye/parser.py b/dojo/tools/popeye/parser.py index 6c49a27fb5..67e176a911 100644 --- a/dojo/tools/popeye/parser.py +++ b/dojo/tools/popeye/parser.py @@ -22,20 +22,41 @@ def get_findings(self, file, test): data = json.load(file) dupes = dict() - for sanitizer in data['popeye']['sanitizers']: - issues = sanitizer.get('issues') + for sanitizer in data["popeye"]["sanitizers"]: + issues = sanitizer.get("issues") if issues: for issue_group, issue_list in issues.items(): for issue in issue_list: - if issue['level'] != 0: - title = sanitizer['sanitizer'] + " " + issue_group + " " + issue['message'] - severity = self.get_defect_dojo_severity(issue['level']) - description = "**Sanitizer** : " + sanitizer['sanitizer'] + "\n\n" + \ - "**Resource** : " + issue_group + "\n\n" + \ - "**Group** : " + issue['group'] + "\n\n" + \ - "**Severity** : " + self.get_popeye_level_string(issue['level']) + "\n\n" + \ - "**Message** : " + issue['message'] - vuln_id_from_tool = re.search(r'\[(POP-\d+)\].+', issue['message']).group(1) + if issue["level"] != 0: + title = ( + sanitizer["sanitizer"] + + " " + + issue_group + + " " + + issue["message"] + ) + severity = self.get_defect_dojo_severity( + issue["level"] + ) + description = ( + "**Sanitizer** : " + + sanitizer["sanitizer"] + + "\n\n" + + "**Resource** : " + + issue_group + + "\n\n" + + "**Group** : " + + issue["group"] + + "\n\n" + + "**Severity** : " + + self.get_popeye_level_string(issue["level"]) + + "\n\n" + + "**Message** : " + + issue["message"] + ) + vuln_id_from_tool = re.search( + r"\[(POP-\d+)\].+", issue["message"] + ).group(1) finding = Finding( title=title, test=test, @@ -46,7 +67,9 @@ def get_findings(self, file, test): vuln_id_from_tool=vuln_id_from_tool, ) # internal de-duplication - dupe_key = hashlib.sha256(str(description + title).encode('utf-8')).hexdigest() + dupe_key = hashlib.sha256( + str(description + title).encode("utf-8") + ).hexdigest() if dupe_key not in dupes: dupes[dupe_key] = finding return list(dupes.values()) diff --git a/dojo/tools/pwn_sast/parser.py b/dojo/tools/pwn_sast/parser.py index d25ebaff62..f86b8cbd2a 100644 --- a/dojo/tools/pwn_sast/parser.py +++ b/dojo/tools/pwn_sast/parser.py @@ -19,17 +19,16 @@ def get_description_for_scan_types(self, scan_type): return "Import pwn_sast Driver findings in JSON format." def get_findings(self, filename, test): - results = json.load(filename) if results is not None: - report_name = results.get("report_name") + results.get("report_name") data_arr = results.get("data") findings = {} for data_hash in data_arr: - timestamp = data_hash.get("timestamp") + data_hash.get("timestamp") security_references = data_hash.get("security_references") if security_references is not None: @@ -54,37 +53,45 @@ def get_findings(self, filename, test): offending_file = None line_no_and_contents = data_hash.get("line_no_and_contents") - test_case_filter = data_hash.get("test_case_filter") - steps_to_reproduce = "\n".join([ - "Install pwn_sast Driver via: https://github.com/0dayinc/pwn#installation", - "Execute the pwn_sast Driver via:", - f"```pwn_sast --dir-path . --uri-source-root {git_repo_root_uri} -s```" - ]) + data_hash.get("test_case_filter") + steps_to_reproduce = "\n".join( + [ + "Install pwn_sast Driver via: https://github.com/0dayinc/pwn#installation", + "Execute the pwn_sast Driver via:", + f"```pwn_sast --dir-path . --uri-source-root {git_repo_root_uri} -s```", + ] + ) for line in line_no_and_contents: offending_uri = f"{git_repo_root_uri}/{offending_file}" line_no = line.get("line_no") contents = line.get("contents") author = line.get("author") - severity = 'Info' - description = "\n".join([ - f"SAST Module: {sast_module}", - f"Offending URI: {offending_uri}", - f"Line: {line_no}", - f"Committed By: {author}", - "Line Contents:", - f"```{contents}```" - ]) - - impact = "\n".join([ - f"Security Control Impacted: {section}", - f"NIST 800-53 Security Control Details: {nist_800_53_uri}", - f"CWE Details: {cwe_uri}" - ]) - - mitigation = "\n".join([ - f"NIST 800-53 Security Control Details / Mitigation Strategy: {nist_800_53_uri}", - ]) + severity = "Info" + description = "\n".join( + [ + f"SAST Module: {sast_module}", + f"Offending URI: {offending_uri}", + f"Line: {line_no}", + f"Committed By: {author}", + "Line Contents:", + f"```{contents}```", + ] + ) + + impact = "\n".join( + [ + f"Security Control Impacted: {section}", + f"NIST 800-53 Security Control Details: {nist_800_53_uri}", + f"CWE Details: {cwe_uri}", + ] + ) + + mitigation = "\n".join( + [ + f"NIST 800-53 Security Control Details / Mitigation Strategy: {nist_800_53_uri}", + ] + ) unique_finding_key = hashlib.sha256( (offending_uri + contents).encode("utf-8") @@ -106,7 +113,7 @@ def get_findings(self, filename, test): cwe=cwe_id, nb_occurences=1, steps_to_reproduce=steps_to_reproduce, - file_path=offending_file + file_path=offending_file, ) findings[unique_finding_key] = finding diff --git a/dojo/tools/qualys/csv_parser.py b/dojo/tools/qualys/csv_parser.py index e7377153dd..e210c7aea9 100644 --- a/dojo/tools/qualys/csv_parser.py +++ b/dojo/tools/qualys/csv_parser.py @@ -19,11 +19,9 @@ def parse_csv(csv_file) -> [Finding]: content = csv_file.read() if isinstance(content, bytes): - content = content.decode('utf-8') + content = content.decode("utf-8") csv_reader = csv.DictReader( - io.StringIO(content), - delimiter=',', - quotechar='"' + io.StringIO(content), delimiter=",", quotechar='"' ) report_findings = get_report_findings(csv_reader) @@ -45,7 +43,7 @@ def get_report_findings(csv_reader) -> [dict]: report_findings = [] for row in csv_reader: - if row.get('Title') and row['Title'] != 'Title': + if row.get("Title") and row["Title"] != "Title": report_findings.append(row) return report_findings @@ -64,27 +62,31 @@ def _extract_cvss_vectors(cvss_base, cvss_temporal): A CVSS3 Vector including both Base and Temporal if available """ - vector_pattern = r'^\d{1,2}.\d \((.*)\)' - cvss_vector = 'CVSS:3.0/' + vector_pattern = r"^\d{1,2}.\d \((.*)\)" + cvss_vector = "CVSS:3.0/" if cvss_base: try: cvss_vector += re.search(vector_pattern, cvss_base).group(1) except IndexError: - _logger.error(f'CVSS3 Base Vector not found in {cvss_base}') + _logger.error(f"CVSS3 Base Vector not found in {cvss_base}") except AttributeError: - _logger.error(f'CVSS3 Base Vector not found in {cvss_base}') + _logger.error(f"CVSS3 Base Vector not found in {cvss_base}") if cvss_temporal: try: - cvss_temporal_vector = re.search(vector_pattern, cvss_temporal).group(1) - cvss_vector += '/' + cvss_temporal_vector = re.search( + vector_pattern, cvss_temporal + ).group(1) + cvss_vector += "/" cvss_vector += cvss_temporal_vector except IndexError: _logger.error( - f'CVSS3 Temporal Vector not found in {cvss_base}') + f"CVSS3 Temporal Vector not found in {cvss_base}" + ) except AttributeError: _logger.error( - f'CVSS3 Temporal Vector not found in {cvss_base}') + f"CVSS3 Temporal Vector not found in {cvss_base}" + ) return cvss_vector @@ -98,42 +100,55 @@ def build_findings_from_dict(report_findings: [dict]) -> [Finding]: """ severity_lookup = { - '1': 'Info', - '2': 'Low', - '3': 'Medium', - '4': 'High', - '5': 'Critical'} + "1": "Info", + "2": "Low", + "3": "Medium", + "4": "High", + "5": "Critical", + } dojo_findings = [] for report_finding in report_findings: - if report_finding.get('FQDN'): - endpoint = Endpoint.from_uri(report_finding.get('FQDN')) + if report_finding.get("FQDN"): + endpoint = Endpoint.from_uri(report_finding.get("FQDN")) else: - endpoint = Endpoint(host=report_finding['IP']) + endpoint = Endpoint(host=report_finding["IP"]) finding = Finding( title=f"QID-{report_finding['QID']} | {report_finding['Title']}", - mitigation=report_finding['Solution'], + mitigation=report_finding["Solution"], description=f"{report_finding['Threat']}\nResult Evidence: \n{report_finding.get('Threat', 'Not available')}", - severity=severity_lookup.get(report_finding['Severity'], 'Info'), - impact=report_finding['Impact'], - date=datetime.strptime(report_finding['Last Detected'], "%m/%d/%Y %H:%M:%S").date(), - vuln_id_from_tool=report_finding['QID'], + severity=severity_lookup.get(report_finding["Severity"], "Info"), + impact=report_finding["Impact"], + date=datetime.strptime( + report_finding["Last Detected"], "%m/%d/%Y %H:%M:%S" + ).date(), + vuln_id_from_tool=report_finding["QID"], cvssv3=_extract_cvss_vectors( - report_finding['CVSS3 Base'], - report_finding['CVSS3 Temporal'])) - - cve_data = report_finding.get('CVE ID') - finding.unsaved_vulnerability_ids = cve_data.split(',') if ',' in cve_data else [cve_data] - - # Qualys reports regression findings as active, but with a Date Last Fixed. - if report_finding['Date Last Fixed']: - finding.mitigated = datetime.strptime(report_finding['Date Last Fixed'], "%m/%d/%Y %H:%M:%S") + report_finding["CVSS3 Base"], report_finding["CVSS3 Temporal"] + ), + ) + + cve_data = report_finding.get("CVE ID") + finding.unsaved_vulnerability_ids = ( + cve_data.split(",") if "," in cve_data else [cve_data] + ) + + # Qualys reports regression findings as active, but with a Date Last + # Fixed. + if report_finding["Date Last Fixed"]: + finding.mitigated = datetime.strptime( + report_finding["Date Last Fixed"], "%m/%d/%Y %H:%M:%S" + ) finding.is_mitigated = True else: finding.is_mitigated = False - finding.active = report_finding['Vuln Status'] in ('Active', 'Re-Opened', 'New') + finding.active = report_finding["Vuln Status"] in ( + "Active", + "Re-Opened", + "New", + ) if finding.active: finding.mitigated = None diff --git a/dojo/tools/qualys/parser.py b/dojo/tools/qualys/parser.py index a757cb4733..d86c7f4c50 100644 --- a/dojo/tools/qualys/parser.py +++ b/dojo/tools/qualys/parser.py @@ -10,41 +10,43 @@ logger = logging.getLogger(__name__) -CUSTOM_HEADERS = {'CVSS_score': 'CVSS Score', - 'ip_address': 'IP Address', - 'fqdn': 'FQDN', - 'os': 'OS', - 'port_status': 'Port', - 'vuln_name': 'Vulnerability', - 'vuln_description': 'Description', - 'solution': 'Solution', - 'links': 'Links', - 'cve': 'CVE', - 'vuln_severity': 'Severity', - 'QID': 'QID', - 'first_found': 'First Found', - 'last_found': 'Last Found', - 'found_times': 'Found Times', - 'category': 'Category' - } - -REPORT_HEADERS = ['CVSS_score', - 'ip_address', - 'fqdn', - 'os', - 'port_status', - 'vuln_name', - 'vuln_description', - 'solution', - 'links', - 'cve', - 'Severity', - 'QID', - 'first_found', - 'last_found', - 'found_times', - 'category', - ] +CUSTOM_HEADERS = { + "CVSS_score": "CVSS Score", + "ip_address": "IP Address", + "fqdn": "FQDN", + "os": "OS", + "port_status": "Port", + "vuln_name": "Vulnerability", + "vuln_description": "Description", + "solution": "Solution", + "links": "Links", + "cve": "CVE", + "vuln_severity": "Severity", + "QID": "QID", + "first_found": "First Found", + "last_found": "Last Found", + "found_times": "Found Times", + "category": "Category", +} + +REPORT_HEADERS = [ + "CVSS_score", + "ip_address", + "fqdn", + "os", + "port_status", + "vuln_name", + "vuln_description", + "solution", + "links", + "cve", + "Severity", + "QID", + "first_found", + "last_found", + "found_times", + "category", +] def htmltext(blob): @@ -59,11 +61,13 @@ def split_cvss(value, _temp): return if len(value) > 4: split = value.split(" (") - _temp['CVSS_value'] = float(split[0]) + _temp["CVSS_value"] = float(split[0]) # remove ")" at the end - _temp['CVSS_vector'] = CVSS3("CVSS:3.0/" + split[1][:-1]).clean_vector() + _temp["CVSS_vector"] = CVSS3( + "CVSS:3.0/" + split[1][:-1] + ).clean_vector() else: - _temp['CVSS_value'] = float(value) + _temp["CVSS_value"] = float(value) def parse_finding(host, tree): @@ -71,158 +75,173 @@ def parse_finding(host, tree): issue_row = {} # IP ADDRESS - issue_row['ip_address'] = host.findtext('IP') + issue_row["ip_address"] = host.findtext("IP") # FQDN - issue_row['fqdn'] = host.findtext('DNS') + issue_row["fqdn"] = host.findtext("DNS") # Create Endpoint - if issue_row['fqdn']: - ep = Endpoint(host=issue_row['fqdn']) + if issue_row["fqdn"]: + ep = Endpoint(host=issue_row["fqdn"]) else: - ep = Endpoint(host=issue_row['ip_address']) + ep = Endpoint(host=issue_row["ip_address"]) # OS NAME - issue_row['os'] = host.findtext('OPERATING_SYSTEM') + issue_row["os"] = host.findtext("OPERATING_SYSTEM") # Scan details - for vuln_details in host.iterfind('VULN_INFO_LIST/VULN_INFO'): + for vuln_details in host.iterfind("VULN_INFO_LIST/VULN_INFO"): _temp = issue_row # Port - _gid = vuln_details.find('QID').attrib['id'] - _port = vuln_details.findtext('PORT') - _temp['port_status'] = _port - - _category = str(vuln_details.findtext('CATEGORY')) - _result = str(vuln_details.findtext('RESULT')) - _first_found = str(vuln_details.findtext('FIRST_FOUND')) - _last_found = str(vuln_details.findtext('LAST_FOUND')) - _times_found = str(vuln_details.findtext('TIMES_FOUND')) - - _temp['date'] = datetime.datetime.strptime(vuln_details.findtext('LAST_FOUND'), "%Y-%m-%dT%H:%M:%SZ").date() + _gid = vuln_details.find("QID").attrib["id"] + _port = vuln_details.findtext("PORT") + _temp["port_status"] = _port + + _category = str(vuln_details.findtext("CATEGORY")) + _result = str(vuln_details.findtext("RESULT")) + _first_found = str(vuln_details.findtext("FIRST_FOUND")) + _last_found = str(vuln_details.findtext("LAST_FOUND")) + _times_found = str(vuln_details.findtext("TIMES_FOUND")) + + _temp["date"] = datetime.datetime.strptime( + vuln_details.findtext("LAST_FOUND"), "%Y-%m-%dT%H:%M:%SZ" + ).date() # Vuln_status - status = vuln_details.findtext('VULN_STATUS') + status = vuln_details.findtext("VULN_STATUS") if status == "Active" or status == "Re-Opened" or status == "New": - _temp['active'] = True - _temp['mitigated'] = False - _temp['mitigation_date'] = None + _temp["active"] = True + _temp["mitigated"] = False + _temp["mitigation_date"] = None else: - _temp['active'] = False - _temp['mitigated'] = True - last_fixed = vuln_details.findtext('LAST_FIXED') + _temp["active"] = False + _temp["mitigated"] = True + last_fixed = vuln_details.findtext("LAST_FIXED") if last_fixed is not None: - _temp['mitigation_date'] = datetime.datetime.strptime(last_fixed, "%Y-%m-%dT%H:%M:%SZ").date() + _temp["mitigation_date"] = datetime.datetime.strptime( + last_fixed, "%Y-%m-%dT%H:%M:%SZ" + ).date() else: - _temp['mitigation_date'] = None + _temp["mitigation_date"] = None # read cvss value if present - cvss3 = vuln_details.findtext('CVSS3_FINAL') + cvss3 = vuln_details.findtext("CVSS3_FINAL") if cvss3 is not None and cvss3 != "-": split_cvss(cvss3, _temp) else: - cvss2 = vuln_details.findtext('CVSS_FINAL') + cvss2 = vuln_details.findtext("CVSS_FINAL") if cvss2 is not None and cvss2 != "-": split_cvss(cvss2, _temp) # DefectDojo does not support cvssv2 - _temp['CVSS_vector'] = None + _temp["CVSS_vector"] = None - search = ".//GLOSSARY/VULN_DETAILS_LIST/VULN_DETAILS[@id='{}']".format(_gid) + search = ".//GLOSSARY/VULN_DETAILS_LIST/VULN_DETAILS[@id='{}']".format( + _gid + ) vuln_item = tree.find(search) if vuln_item is not None: finding = Finding() # Vuln name - _temp['vuln_name'] = vuln_item.findtext('TITLE') + _temp["vuln_name"] = vuln_item.findtext("TITLE") # Vuln Description - _description = str(vuln_item.findtext('THREAT')) + _description = str(vuln_item.findtext("THREAT")) # Solution Strips Heading Workaround(s) # _temp['solution'] = re.sub('Workaround(s)?:.+\n', '', htmltext(vuln_item.findtext('SOLUTION'))) - _temp['solution'] = htmltext(vuln_item.findtext('SOLUTION')) + _temp["solution"] = htmltext(vuln_item.findtext("SOLUTION")) # Vuln_description - _temp['vuln_description'] = "\n".join([htmltext(_description), - htmltext("Category: " + _category), - htmltext("QID: " + str(_gid)), - htmltext("Port: " + str(_port)), - htmltext("Result Evidence: " + _result), - htmltext("First Found: " + _first_found), - htmltext("Last Found: " + _last_found), - htmltext("Times Found: " + _times_found), - ]) + _temp["vuln_description"] = "\n".join( + [ + htmltext(_description), + htmltext("Category: " + _category), + htmltext("QID: " + str(_gid)), + htmltext("Port: " + str(_port)), + htmltext("Result Evidence: " + _result), + htmltext("First Found: " + _first_found), + htmltext("Last Found: " + _last_found), + htmltext("Times Found: " + _times_found), + ] + ) # Impact description - _temp['IMPACT'] = htmltext(vuln_item.findtext('IMPACT')) + _temp["IMPACT"] = htmltext(vuln_item.findtext("IMPACT")) # read cvss value if present and not already read from vuln - if _temp.get('CVSS_value') is None: - cvss3 = vuln_item.findtext('CVSS3_SCORE/CVSS3_BASE') - cvss2 = vuln_item.findtext('CVSS_SCORE/CVSS_BASE') + if _temp.get("CVSS_value") is None: + cvss3 = vuln_item.findtext("CVSS3_SCORE/CVSS3_BASE") + cvss2 = vuln_item.findtext("CVSS_SCORE/CVSS_BASE") if cvss3 is not None and cvss3 != "-": split_cvss(cvss3, _temp) else: - cvss2 = vuln_item.findtext('CVSS_FINAL') + cvss2 = vuln_item.findtext("CVSS_FINAL") if cvss2 is not None and cvss2 != "-": split_cvss(cvss2, _temp) # DefectDojo does not support cvssv2 - _temp['CVSS_vector'] = None + _temp["CVSS_vector"] = None # CVE and LINKS - _temp_cve_details = vuln_item.iterfind('CVE_ID_LIST/CVE_ID') + _temp_cve_details = vuln_item.iterfind("CVE_ID_LIST/CVE_ID") if _temp_cve_details: - _cl = {cve_detail.findtext('ID'): cve_detail.findtext('URL') for cve_detail in _temp_cve_details} - _temp['cve'] = "\n".join(list(_cl.keys())) - _temp['links'] = "\n".join(list(_cl.values())) + _cl = { + cve_detail.findtext("ID"): cve_detail.findtext("URL") + for cve_detail in _temp_cve_details + } + _temp["cve"] = "\n".join(list(_cl.keys())) + _temp["links"] = "\n".join(list(_cl.values())) # The CVE in Qualys report might not have a CVSS score, so findings are informational by default - # unless we can find map to a Severity OR a CVSS score from the findings detail. + # unless we can find map to a Severity OR a CVSS score from the + # findings detail. sev = None - if _temp.get('CVSS_value') is not None and _temp['CVSS_value'] > 0: - if 0.1 <= float(_temp['CVSS_value']) <= 3.9: - sev = 'Low' - elif 4.0 <= float(_temp['CVSS_value']) <= 6.9: - sev = 'Medium' - elif 7.0 <= float(_temp['CVSS_value']) <= 8.9: - sev = 'High' - elif float(_temp['CVSS_value']) >= 9.0: - sev = 'Critical' - elif vuln_item.findtext('SEVERITY') is not None: - if int(vuln_item.findtext('SEVERITY')) == 1: - sev = 'Informational' - elif int(vuln_item.findtext('SEVERITY')) == 2: - sev = 'Low' - elif int(vuln_item.findtext('SEVERITY')) == 3: - sev = 'Medium' - elif int(vuln_item.findtext('SEVERITY')) == 4: - sev = 'High' - elif int(vuln_item.findtext('SEVERITY')) == 5: - sev = 'Critical' + if _temp.get("CVSS_value") is not None and _temp["CVSS_value"] > 0: + if 0.1 <= float(_temp["CVSS_value"]) <= 3.9: + sev = "Low" + elif 4.0 <= float(_temp["CVSS_value"]) <= 6.9: + sev = "Medium" + elif 7.0 <= float(_temp["CVSS_value"]) <= 8.9: + sev = "High" + elif float(_temp["CVSS_value"]) >= 9.0: + sev = "Critical" + elif vuln_item.findtext("SEVERITY") is not None: + if int(vuln_item.findtext("SEVERITY")) == 1: + sev = "Informational" + elif int(vuln_item.findtext("SEVERITY")) == 2: + sev = "Low" + elif int(vuln_item.findtext("SEVERITY")) == 3: + sev = "Medium" + elif int(vuln_item.findtext("SEVERITY")) == 4: + sev = "High" + elif int(vuln_item.findtext("SEVERITY")) == 5: + sev = "Critical" elif sev is None: - sev = 'Informational' + sev = "Informational" finding = None if _temp_cve_details: refs = "\n".join(list(_cl.values())) - finding = Finding(title="QID-" + _gid[4:] + " | " + _temp['vuln_name'], - mitigation=_temp['solution'], - description=_temp['vuln_description'], - severity=sev, - references=refs, - impact=_temp['IMPACT'], - date=_temp['date'], - vuln_id_from_tool=_gid, - ) + finding = Finding( + title="QID-" + _gid[4:] + " | " + _temp["vuln_name"], + mitigation=_temp["solution"], + description=_temp["vuln_description"], + severity=sev, + references=refs, + impact=_temp["IMPACT"], + date=_temp["date"], + vuln_id_from_tool=_gid, + ) else: - finding = Finding(title="QID-" + _gid[4:] + " | " + _temp['vuln_name'], - mitigation=_temp['solution'], - description=_temp['vuln_description'], - severity=sev, - references=_gid, - impact=_temp['IMPACT'], - date=_temp['date'], - vuln_id_from_tool=_gid, - ) - finding.mitigated = _temp['mitigation_date'] - finding.is_mitigated = _temp['mitigated'] - finding.active = _temp['active'] - if _temp.get('CVSS_vector') is not None: - finding.cvssv3 = _temp.get('CVSS_vector') + finding = Finding( + title="QID-" + _gid[4:] + " | " + _temp["vuln_name"], + mitigation=_temp["solution"], + description=_temp["vuln_description"], + severity=sev, + references=_gid, + impact=_temp["IMPACT"], + date=_temp["date"], + vuln_id_from_tool=_gid, + ) + finding.mitigated = _temp["mitigation_date"] + finding.is_mitigated = _temp["mitigated"] + finding.active = _temp["active"] + if _temp.get("CVSS_vector") is not None: + finding.cvssv3 = _temp.get("CVSS_vector") finding.verified = True finding.unsaved_endpoints = list() finding.unsaved_endpoints.append(ep) @@ -233,7 +252,7 @@ def parse_finding(host, tree): def qualys_parser(qualys_xml_file): parser = etree.XMLParser() tree = etree.parse(qualys_xml_file, parser) - host_list = tree.find('HOST_LIST') + host_list = tree.find("HOST_LIST") finding_list = [] if host_list is not None: for host in host_list: @@ -242,7 +261,6 @@ def qualys_parser(qualys_xml_file): class QualysParser(object): - def get_scan_types(self): return ["Qualys Scan"] @@ -253,7 +271,7 @@ def get_description_for_scan_types(self, scan_type): return "Qualys WebGUI output files can be imported in XML format." def get_findings(self, file, test): - if file.name.lower().endswith('.csv'): + if file.name.lower().endswith(".csv"): return csv_parser.parse_csv(file) else: return qualys_parser(file) diff --git a/dojo/tools/qualys_infrascan_webgui/parser.py b/dojo/tools/qualys_infrascan_webgui/parser.py index 29c16742e6..e60084619a 100644 --- a/dojo/tools/qualys_infrascan_webgui/parser.py +++ b/dojo/tools/qualys_infrascan_webgui/parser.py @@ -21,76 +21,89 @@ def issue_r(raw_row, vuln, scan_date): issue_row = {} # IP ADDRESS - issue_row['ip_address'] = raw_row.get('value') + issue_row["ip_address"] = raw_row.get("value") # FQDN - issue_row['fqdn'] = raw_row.get('name') - if issue_row['fqdn'] == "No registered hostname": - issue_row['fqdn'] = None + issue_row["fqdn"] = raw_row.get("name") + if issue_row["fqdn"] == "No registered hostname": + issue_row["fqdn"] = None # port - _port = raw_row.get('port') + _port = raw_row.get("port") # Create Endpoint - if issue_row['fqdn']: - ep = Endpoint(host=issue_row['fqdn']) + if issue_row["fqdn"]: + ep = Endpoint(host=issue_row["fqdn"]) else: - ep = Endpoint(host=issue_row['ip_address']) + ep = Endpoint(host=issue_row["ip_address"]) # OS NAME - issue_row['os'] = raw_row.findtext('OS') + issue_row["os"] = raw_row.findtext("OS") - # Scan details - VULNS//VULN indicates we only care about confirmed vulnerabilities - for vuln_cat in raw_row.findall('VULNS/CAT'): - _category = str(vuln_cat.get('value')) - for vuln_details in vuln_cat.findall('VULN'): + # Scan details - VULNS//VULN indicates we only care about confirmed + # vulnerabilities + for vuln_cat in raw_row.findall("VULNS/CAT"): + _category = str(vuln_cat.get("value")) + for vuln_details in vuln_cat.findall("VULN"): _temp = issue_row - _gid = vuln_details.get('number') + _gid = vuln_details.get("number") - _temp['port_status'] = _port + _temp["port_status"] = _port - _result = str(vuln_details.findtext('RESULT')) + _result = str(vuln_details.findtext("RESULT")) # Vuln name - _temp['vuln_name'] = vuln_details.findtext('TITLE') + _temp["vuln_name"] = vuln_details.findtext("TITLE") # Vuln Description - _description = str(vuln_details.findtext('DIAGNOSIS')) + _description = str(vuln_details.findtext("DIAGNOSIS")) # Solution Strips Heading Workaround(s) - _temp['solution'] = htmltext(str(vuln_details.findtext('SOLUTION'))) + _temp["solution"] = htmltext( + str(vuln_details.findtext("SOLUTION")) + ) # Vuln_description - _temp['vuln_description'] = "\n".join([htmltext(_description), - htmltext("**Category:** " + _category), - htmltext("**QID:** " + str(_gid)), - htmltext("**Port:** " + str(_port)), - htmltext("**Result Evidence:** " + _result), - ]) + _temp["vuln_description"] = "\n".join( + [ + htmltext(_description), + htmltext("**Category:** " + _category), + htmltext("**QID:** " + str(_gid)), + htmltext("**Port:** " + str(_port)), + htmltext("**Result Evidence:** " + _result), + ] + ) # Impact description - _temp['IMPACT'] = htmltext(str(vuln_details.findtext('CONSEQUENCE'))) + _temp["IMPACT"] = htmltext( + str(vuln_details.findtext("CONSEQUENCE")) + ) # CVE and LINKS _cl = [] - _temp_cve_details = vuln_details.iterfind('CVE_ID_LIST/CVE_ID') + _temp_cve_details = vuln_details.iterfind("CVE_ID_LIST/CVE_ID") if _temp_cve_details: - _cl = {cve_detail.findtext('ID'): cve_detail.findtext('URL') for cve_detail in _temp_cve_details} - _temp['cve'] = "\n".join(list(_cl.keys())) - _temp['links'] = "\n".join(list(_cl.values())) + _cl = { + cve_detail.findtext("ID"): cve_detail.findtext("URL") + for cve_detail in _temp_cve_details + } + _temp["cve"] = "\n".join(list(_cl.keys())) + _temp["links"] = "\n".join(list(_cl.values())) # The CVE in Qualys report might not have a CVSS score, so findings are informational by default - # unless we can find map to a Severity OR a CVSS score from the findings detail. - sev = qualys_convert_severity(vuln_details.get('severity')) + # unless we can find map to a Severity OR a CVSS score from the + # findings detail. + sev = qualys_convert_severity(vuln_details.get("severity")) refs = "\n".join(list(_cl.values())) - finding = Finding(title=_temp['vuln_name'], - mitigation=_temp['solution'], - description=_temp['vuln_description'], - severity=sev, - references=refs, - impact=_temp['IMPACT'], - vuln_id_from_tool=_gid, - date=scan_date, - ) + finding = Finding( + title=_temp["vuln_name"], + mitigation=_temp["solution"], + description=_temp["vuln_description"], + severity=sev, + references=refs, + impact=_temp["IMPACT"], + vuln_id_from_tool=_gid, + date=scan_date, + ) finding.unsaved_endpoints = list() finding.unsaved_endpoints.append(ep) ret_rows.append(finding) @@ -99,22 +112,21 @@ def issue_r(raw_row, vuln, scan_date): def qualys_convert_severity(raw_val): val = str(raw_val).strip() - if '1' == val: - return 'Info' - elif '2' == val: - return 'Low' - elif '3' == val: - return 'Medium' - elif '4' == val: - return 'High' - elif '5' == val: - return 'Critical' + if "1" == val: + return "Info" + elif "2" == val: + return "Low" + elif "3" == val: + return "Medium" + elif "4" == val: + return "High" + elif "5" == val: + return "Critical" else: - return 'Info' + return "Info" class QualysInfrascanWebguiParser(object): - def get_scan_types(self): return ["Qualys Infrastructure Scan (WebGUI XML)"] @@ -129,11 +141,11 @@ def get_findings(self, file, test): # fetch scan date e.g.: 2020-01-30T09:45:41Z scan_date = datetime.now() - for i in data.findall('HEADER/KEY'): - if i.get('value') == 'DATE': + for i in data.findall("HEADER/KEY"): + if i.get("value") == "DATE": scan_date = parser.isoparse(i.text) master_list = [] - for issue in data.findall('IP'): + for issue in data.findall("IP"): master_list += issue_r(issue, data, scan_date) return master_list diff --git a/dojo/tools/qualys_webapp/parser.py b/dojo/tools/qualys_webapp/parser.py index eca2abc335..48b3b52dfc 100644 --- a/dojo/tools/qualys_webapp/parser.py +++ b/dojo/tools/qualys_webapp/parser.py @@ -24,16 +24,12 @@ # Since Info findings are not recroded in the Confirmed Vulnerability or # Potential Vulnerability categories, a severity of 1 is shown as low # in the portal. -SEVERITY_MATCH = ['Low', - 'Low', - 'Medium', - 'High', - 'Critical'] +SEVERITY_MATCH = ["Low", "Low", "Medium", "High", "Critical"] def truncate_str(value: str, maxlen: int): if len(value) > maxlen: - return value[:maxlen - 12] + " (truncated)" + return value[: maxlen - 12] + " (truncated)" return value @@ -46,7 +42,19 @@ def get_cwe(cwe): return 0 -def attach_unique_extras(endpoints, requests, responses, finding, date, qid, param, payload, unique_id, active_text, test): +def attach_unique_extras( + endpoints, + requests, + responses, + finding, + date, + qid, + param, + payload, + unique_id, + active_text, + test, +): # finding should always be none, since unique ID's are being used if finding is None: finding = Finding() @@ -73,24 +81,32 @@ def attach_unique_extras(endpoints, requests, responses, finding, date, qid, par port = "" # Set port to empty string by default # Split the returned network address into host and try: # If there is port number attached to host address - host, port = parsedUrl.netloc.split(':') - except: # there's no port attached to address + host, port = parsedUrl.netloc.split(":") + except BaseException: # there's no port attached to address host = parsedUrl.netloc - finding.unsaved_endpoints.append(Endpoint( - host=truncate_str(host, 500), port=port, - path=truncate_str(path, 500), - protocol=protocol, - query=truncate_str(query, 1000), fragment=truncate_str(fragment, 500))) + finding.unsaved_endpoints.append( + Endpoint( + host=truncate_str(host, 500), + port=port, + path=truncate_str(path, 500), + protocol=protocol, + query=truncate_str(query, 1000), + fragment=truncate_str(fragment, 500), + ) + ) for i in range(0, len(requests)): - if requests[i] != '' or responses[i] != '': - finding.unsaved_req_resp.append({"req": requests[i], "resp": responses[i]}) + if requests[i] != "" or responses[i] != "": + finding.unsaved_req_resp.append( + {"req": requests[i], "resp": responses[i]} + ) if active_text is not None: - if 'fixed' in active_text.lower(): + if "fixed" in active_text.lower(): finding.active = False - # TODO: may need to look up by finding ID and mark current finding as fixed + # TODO: may need to look up by finding ID and mark current finding + # as fixed else: finding.active = True @@ -118,8 +134,10 @@ def attach_extras(endpoints, requests, responses, finding, date, qid, test): finding.unsaved_endpoints.append(Endpoint.from_uri(endpoint)) for i in range(0, len(requests)): - if requests[i] != '' or responses[i] != '': - finding.unsaved_req_resp.append({"req": requests[i], "resp": responses[i]}) + if requests[i] != "" or responses[i] != "": + finding.unsaved_req_resp.append( + {"req": requests[i], "resp": responses[i]} + ) return finding @@ -128,23 +146,23 @@ def attach_extras(endpoints, requests, responses, finding, date, qid, test): # found in the this section of the report def get_request(request): if request is not None: - header = '' - header += str(request.findtext('METHOD')) + ': ' - header += str(request.findtext('URL')) + '\n' - headers = request.find('HEADERS') + header = "" + header += str(request.findtext("METHOD")) + ": " + header += str(request.findtext("URL")) + "\n" + headers = request.find("HEADERS") if headers is not None: - for head in headers.iter('HEADER'): - header += str(head.findtext('key')) + ': ' - header += str(head.findtext('value')) + '\n' + for head in headers.iter("HEADER"): + header += str(head.findtext("key")) + ": " + header += str(head.findtext("value")) + "\n" return str(header) - return '' + return "" # Build a response string def get_response(response): if response is not None: - return decode_tag(response.find('CONTENTS')) - return '' + return decode_tag(response.find("CONTENTS")) + return "" # Decode an XML tag with base64 if the tag has base64=true set. @@ -162,127 +180,152 @@ def decode_tag(tag): def get_request_response(payloads): requests = [] responses = [] - for payload in payloads.iter('PAYLOAD'): - requests.append(get_request(payload.find('REQUEST'))) - responses.append(get_response(payload.find('RESPONSE'))) + for payload in payloads.iter("PAYLOAD"): + requests.append(get_request(payload.find("REQUEST"))) + responses.append(get_response(payload.find("RESPONSE"))) return [requests, responses] -def get_unique_vulnerabilities(vulnerabilities, test, is_info=False, is_app_report=False): +def get_unique_vulnerabilities( + vulnerabilities, test, is_info=False, is_app_report=False +): findings = {} # Iterate through all vulnerabilites to pull necessary info for vuln in vulnerabilities: urls = [] - requests = response = '' - qid = int(vuln.findtext('QID')) - url = vuln.findtext('URL') + requests = response = "" + qid = int(vuln.findtext("QID")) + url = vuln.findtext("URL") if url is not None: urls.append(str(url)) - access_path = vuln.find('ACCESS_PATH') + access_path = vuln.find("ACCESS_PATH") if access_path is not None: - urls += [url.text for url in access_path.iter('URL')] - payloads = vuln.find('PAYLOADS') + urls += [url.text for url in access_path.iter("URL")] + payloads = vuln.find("PAYLOADS") if payloads is not None: req_resps = get_request_response(payloads) else: req_resps = [[], []] if is_info: - raw_finding_date = vuln.findtext('LAST_TIME_DETECTED') + raw_finding_date = vuln.findtext("LAST_TIME_DETECTED") elif is_app_report: - raw_finding_date = vuln.findtext('FIRST_TIME_DETECTED') + raw_finding_date = vuln.findtext("FIRST_TIME_DETECTED") else: - raw_finding_date = vuln.findtext('DETECTION_DATE') + raw_finding_date = vuln.findtext("DETECTION_DATE") # Qualys uses a non-standard date format. if raw_finding_date is not None: if raw_finding_date.endswith("GMT"): - finding_date = datetime.strptime(raw_finding_date, "%d %b %Y %I:%M%p GMT") + finding_date = datetime.strptime( + raw_finding_date, "%d %b %Y %I:%M%p GMT" + ) else: - finding_date = datetime.strptime(raw_finding_date, "%d %b %Y %I:%M%p GMT%z") + finding_date = datetime.strptime( + raw_finding_date, "%d %b %Y %I:%M%p GMT%z" + ) else: finding_date = None # Updating to include customized values - unique_id = vuln.findtext('UNIQUE_ID') - active_text = vuln.findtext('STATUS') + unique_id = vuln.findtext("UNIQUE_ID") + active_text = vuln.findtext("STATUS") param = None payload = None if not is_info: - param = vuln.findtext('PARAM') - payload = vuln.findtext('PAYLOADS/PAYLOAD/PAYLOAD') - - findings[unique_id] = attach_unique_extras(urls, req_resps[0], req_resps[1], None, finding_date, qid, param, payload, - unique_id, active_text, test) + param = vuln.findtext("PARAM") + payload = vuln.findtext("PAYLOADS/PAYLOAD/PAYLOAD") + + findings[unique_id] = attach_unique_extras( + urls, + req_resps[0], + req_resps[1], + None, + finding_date, + qid, + param, + payload, + unique_id, + active_text, + test, + ) return findings # Traverse and retreive any information in the VULNERABILITY_LIST # section of the report. This includes all endpoints and request/response pairs -def get_vulnerabilities(vulnerabilities, test, is_info=False, is_app_report=False): +def get_vulnerabilities( + vulnerabilities, test, is_info=False, is_app_report=False +): findings = {} # Iterate through all vulnerabilites to pull necessary info for vuln in vulnerabilities: urls = [] - requests = response = '' - qid = int(vuln.findtext('QID')) - url = vuln.findtext('URL') + requests = response = "" + qid = int(vuln.findtext("QID")) + url = vuln.findtext("URL") if url is not None: urls.append(str(url)) - access_path = vuln.find('ACCESS_PATH') + access_path = vuln.find("ACCESS_PATH") if access_path is not None: - urls += [url.text for url in access_path.iter('URL')] - payloads = vuln.find('PAYLOADS') + urls += [url.text for url in access_path.iter("URL")] + payloads = vuln.find("PAYLOADS") if payloads is not None: req_resps = get_request_response(payloads) else: req_resps = [[], []] if is_info: - raw_finding_date = vuln.findtext('LAST_TIME_DETECTED') + raw_finding_date = vuln.findtext("LAST_TIME_DETECTED") elif is_app_report: - raw_finding_date = vuln.findtext('FIRST_TIME_DETECTED') + raw_finding_date = vuln.findtext("FIRST_TIME_DETECTED") else: - raw_finding_date = vuln.findtext('DETECTION_DATE') + raw_finding_date = vuln.findtext("DETECTION_DATE") # Qualys uses a non-standard date format. if raw_finding_date is not None: if raw_finding_date.endswith("GMT"): - finding_date = datetime.strptime(raw_finding_date, "%d %b %Y %I:%M%p GMT") + finding_date = datetime.strptime( + raw_finding_date, "%d %b %Y %I:%M%p GMT" + ) else: - finding_date = datetime.strptime(raw_finding_date, "%d %b %Y %I:%M%p GMT%z") + finding_date = datetime.strptime( + raw_finding_date, "%d %b %Y %I:%M%p GMT%z" + ) else: finding_date = None finding = findings.get(qid, None) - findings[qid] = attach_extras(urls, req_resps[0], req_resps[1], finding, finding_date, qid, test) + findings[qid] = attach_extras( + urls, req_resps[0], req_resps[1], finding, finding_date, qid, test + ) return findings # Retrieve information from a single glossary entry such as description, # severity, title, impact, mitigation, and CWE def get_glossary_item(glossary, finding, is_info=False, enable_weakness=False): - title = glossary.findtext('TITLE') + title = glossary.findtext("TITLE") if title is not None: finding.title = str(title) - severity = glossary.findtext('SEVERITY') + severity = glossary.findtext("SEVERITY") if severity is not None: - group = glossary.findtext('GROUP') + group = glossary.findtext("GROUP") if is_info and (not enable_weakness or group in ("DIAG", "IG")): # Scan Diagnostics are always Info. finding.severity = "Info" else: finding.severity = SEVERITY_MATCH[int(severity) - 1] - description = glossary.findtext('DESCRIPTION') + description = glossary.findtext("DESCRIPTION") if description is not None: finding.description = str(description) - impact = glossary.findtext('IMPACT') + impact = glossary.findtext("IMPACT") if impact is not None: finding.impact = str(impact) - solution = glossary.findtext('SOLUTION') + solution = glossary.findtext("SOLUTION") if solution is not None: finding.mitigation = str(solution) - cwe = glossary.findtext('CWE') + cwe = glossary.findtext("CWE") if cwe is not None: finding.cwe = int(get_cwe(str(cwe))) return finding @@ -290,30 +333,44 @@ def get_glossary_item(glossary, finding, is_info=False, enable_weakness=False): # Retrieve information from a single information gathered entry def get_info_item(info_gathered, finding): - data = info_gathered.find('DATA') + data = info_gathered.find("DATA") if data is not None: - finding.description += '\n\n' + decode_tag(data) + finding.description += "\n\n" + decode_tag(data) return finding # Create findings report for all unique vulnerabilities in the report -def get_unique_items(vulnerabilities, info_gathered, glossary, is_app_report, test, enable_weakness=False): - ig_qid_list = [int(ig.findtext('QID')) for ig in info_gathered] - g_qid_list = [int(g.findtext('QID')) for g in glossary] +def get_unique_items( + vulnerabilities, + info_gathered, + glossary, + is_app_report, + test, + enable_weakness=False, +): + ig_qid_list = [int(ig.findtext("QID")) for ig in info_gathered] + g_qid_list = [int(g.findtext("QID")) for g in glossary] # This dict has findings mapped by unique ID to remove any duplicates findings = {} - total = 0 - for unique_id, finding in get_unique_vulnerabilities(vulnerabilities, test, False, is_app_report).items(): + for unique_id, finding in get_unique_vulnerabilities( + vulnerabilities, test, False, is_app_report + ).items(): qid = int(finding.vuln_id_from_tool) if qid in g_qid_list: index = g_qid_list.index(qid) - findings[unique_id] = get_glossary_item(glossary[index], finding, enable_weakness) - for unique_id, finding in get_unique_vulnerabilities(info_gathered, test, True, is_app_report).items(): + findings[unique_id] = get_glossary_item( + glossary[index], finding, enable_weakness + ) + for unique_id, finding in get_unique_vulnerabilities( + info_gathered, test, True, is_app_report + ).items(): qid = int(finding.vuln_id_from_tool) if qid in g_qid_list: index = g_qid_list.index(qid) - finding = get_glossary_item(glossary[index], finding, True, enable_weakness) + finding = get_glossary_item( + glossary[index], finding, True, enable_weakness + ) if qid in ig_qid_list: index = ig_qid_list.index(qid) findings[unique_id] = get_info_item(info_gathered[index], finding) @@ -321,21 +378,36 @@ def get_unique_items(vulnerabilities, info_gathered, glossary, is_app_report, te # Create finding items for all vulnerabilities in the report -def get_items(vulnerabilities, info_gathered, glossary, is_app_report, test, enable_weakness=False): - ig_qid_list = [int(ig.findtext('QID')) for ig in info_gathered] - g_qid_list = [int(g.findtext('QID')) for g in glossary] +def get_items( + vulnerabilities, + info_gathered, + glossary, + is_app_report, + test, + enable_weakness=False, +): + ig_qid_list = [int(ig.findtext("QID")) for ig in info_gathered] + g_qid_list = [int(g.findtext("QID")) for g in glossary] # This dict has findings mapped by QID to remove any duplicates findings = {} - for qid, finding in get_vulnerabilities(vulnerabilities, test, False, is_app_report).items(): + for qid, finding in get_vulnerabilities( + vulnerabilities, test, False, is_app_report + ).items(): if qid in g_qid_list: index = g_qid_list.index(qid) - findings[qid] = get_glossary_item(glossary[index], finding, enable_weakness) - for qid, finding in get_vulnerabilities(info_gathered, test, True, is_app_report).items(): + findings[qid] = get_glossary_item( + glossary[index], finding, enable_weakness + ) + for qid, finding in get_vulnerabilities( + info_gathered, test, True, is_app_report + ).items(): if qid in g_qid_list: index = g_qid_list.index(qid) - finding = get_glossary_item(glossary[index], finding, True, enable_weakness) + finding = get_glossary_item( + glossary[index], finding, True, enable_weakness + ) if qid in ig_qid_list: index = ig_qid_list.index(qid) findings[qid] = get_info_item(info_gathered[index], finding) @@ -347,28 +419,54 @@ def qualys_webapp_parser(qualys_xml_file, test, unique, enable_weakness=False): if qualys_xml_file is None: return [] - # supposed to be safe against XEE: https://docs.python.org/3/library/xml.html#xml-vulnerabilities + # supposed to be safe against XEE: + # https://docs.python.org/3/library/xml.html#xml-vulnerabilities tree = xml.etree.ElementTree.parse(qualys_xml_file) - is_app_report = tree.getroot().tag == 'WAS_WEBAPP_REPORT' + is_app_report = tree.getroot().tag == "WAS_WEBAPP_REPORT" if is_app_report: - vulnerabilities = tree.findall('./RESULTS/WEB_APPLICATION/VULNERABILITY_LIST/VULNERABILITY') - info_gathered = tree.findall('./RESULTS/WEB_APPLICATION/INFORMATION_GATHERED_LIST/INFORMATION_GATHERED') + vulnerabilities = tree.findall( + "./RESULTS/WEB_APPLICATION/VULNERABILITY_LIST/VULNERABILITY" + ) + info_gathered = tree.findall( + "./RESULTS/WEB_APPLICATION/INFORMATION_GATHERED_LIST/INFORMATION_GATHERED" + ) else: - vulnerabilities = tree.findall('./RESULTS/VULNERABILITY_LIST/VULNERABILITY') - info_gathered = tree.findall('./RESULTS/INFORMATION_GATHERED_LIST/INFORMATION_GATHERED') - glossary = tree.findall('./GLOSSARY/QID_LIST/QID') + vulnerabilities = tree.findall( + "./RESULTS/VULNERABILITY_LIST/VULNERABILITY" + ) + info_gathered = tree.findall( + "./RESULTS/INFORMATION_GATHERED_LIST/INFORMATION_GATHERED" + ) + glossary = tree.findall("./GLOSSARY/QID_LIST/QID") if unique: - items = list(get_unique_items(vulnerabilities, info_gathered, glossary, is_app_report, test, enable_weakness).values()) + items = list( + get_unique_items( + vulnerabilities, + info_gathered, + glossary, + is_app_report, + test, + enable_weakness, + ).values() + ) else: - items = list(get_items(vulnerabilities, info_gathered, glossary, is_app_report, test, enable_weakness).values()) + items = list( + get_items( + vulnerabilities, + info_gathered, + glossary, + is_app_report, + test, + enable_weakness, + ).values() + ) return items class QualysWebAppParser(object): - def get_scan_types(self): return ["Qualys Webapp Scan"] @@ -378,5 +476,9 @@ def get_label_for_scan_types(self, scan_type): def get_description_for_scan_types(self, scan_type): return "Qualys WebScan output files can be imported in XML format." - def get_findings(self, file, test, enable_weakness=QUALYS_WAS_WEAKNESS_IS_VULN): - return qualys_webapp_parser(file, test, QUALYS_WAS_UNIQUE_ID, enable_weakness) + def get_findings( + self, file, test, enable_weakness=QUALYS_WAS_WEAKNESS_IS_VULN + ): + return qualys_webapp_parser( + file, test, QUALYS_WAS_UNIQUE_ID, enable_weakness + ) diff --git a/dojo/tools/retirejs/parser.py b/dojo/tools/retirejs/parser.py index 2ddbe7e523..2482d517dc 100644 --- a/dojo/tools/retirejs/parser.py +++ b/dojo/tools/retirejs/parser.py @@ -5,7 +5,6 @@ class RetireJsParser(object): - def get_scan_types(self): return ["Retire.js Scan"] @@ -21,44 +20,56 @@ def get_findings(self, json_output, test): def get_items(self, tree, test): items = {} - if 'data' in tree: - tree = tree['data'] + if "data" in tree: + tree = tree["data"] for node in tree: - for result in node['results']: - if 'vulnerabilities' in result: - for vulnerability in result['vulnerabilities']: - item = self.get_item(vulnerability, test, node['file']) - item.title += " (" + result['component'] + ", " + result['version'] + ")" - item.description += "\n\n Raw Result: " + str(json.dumps(vulnerability, indent=4, sort_keys=True)) + for result in node["results"]: + if "vulnerabilities" in result: + for vulnerability in result["vulnerabilities"]: + item = self.get_item(vulnerability, test, node["file"]) + item.title += ( + " (" + + result["component"] + + ", " + + result["version"] + + ")" + ) + item.description += "\n\n Raw Result: " + str( + json.dumps(vulnerability, indent=4, sort_keys=True) + ) item.references = item.references - item.component_name = result.get('component') - item.component_version = result.get('version') - item.file_path = node['file'] + item.component_name = result.get("component") + item.component_version = result.get("version") + item.file_path = node["file"] - encrypted_file = node['file'] - unique_key = hashlib.md5((item.title + item.references + encrypted_file).encode()).hexdigest() + encrypted_file = node["file"] + unique_key = hashlib.md5( + ( + item.title + item.references + encrypted_file + ).encode() + ).hexdigest() items[unique_key] = item return list(items.values()) def get_item(self, item_node, test, file): title = "" - if 'identifiers' in item_node: - if 'summary' in item_node['identifiers']: - title = item_node['identifiers']['summary'] - elif 'CVE' in item_node['identifiers']: - title = "".join(item_node['identifiers']['CVE']) - elif 'osvdb' in item_node['identifiers']: - title = "".join(item_node['identifiers']['osvdb']) + if "identifiers" in item_node: + if "summary" in item_node["identifiers"]: + title = item_node["identifiers"]["summary"] + elif "CVE" in item_node["identifiers"]: + title = "".join(item_node["identifiers"]["CVE"]) + elif "osvdb" in item_node["identifiers"]: + title = "".join(item_node["identifiers"]["osvdb"]) finding = Finding( title=title, test=test, cwe=1035, # Vulnerable Third Party Component - severity=item_node['severity'].title(), + severity=item_node["severity"].title(), description=title + "\n\n Affected File - " + file, file_path=file, - references="\n".join(item_node['info']), + references="\n".join(item_node["info"]), false_p=False, duplicate=False, out_of_scope=False, diff --git a/dojo/tools/risk_recon/api.py b/dojo/tools/risk_recon/api.py index dc420067a3..0ac61f805d 100644 --- a/dojo/tools/risk_recon/api.py +++ b/dojo/tools/risk_recon/api.py @@ -11,17 +11,17 @@ def __init__(self, api_key, endpoint, data): if not self.key: raise Exception( - 'Please supply a Risk Recon API key. \n' - 'This can be generated in the system admin panel. \n' - 'See https://documentation.defectdojo.com/integrations/import/#risk-recon-api-importer \n' + "Please supply a Risk Recon API key. \n" + "This can be generated in the system admin panel. \n" + "See https://documentation.defectdojo.com/integrations/import/#risk-recon-api-importer \n" ) if not self.url: raise Exception( - 'Please supply a Risk Recon API url. \n' - 'A general url is https://api.riskrecon.com/v1/ \n' - 'See https://documentation.defectdojo.com/integrations/import/#risk-recon-api-importer \n' + "Please supply a Risk Recon API url. \n" + "A general url is https://api.riskrecon.com/v1/ \n" + "See https://documentation.defectdojo.com/integrations/import/#risk-recon-api-importer \n" ) - if self.url.endswith('/'): + if self.url.endswith("/"): self.url = endpoint[:-1] self.session = requests.Session() self.map_toes() @@ -29,11 +29,8 @@ def __init__(self, api_key, endpoint, data): def map_toes(self): response = self.session.get( - url='{}/toes'.format(self.url), - headers={ - 'accept': 'application/json', - 'Authorization': self.key - } + url="{}/toes".format(self.url), + headers={"accept": "application/json", "Authorization": self.key}, ) if response.ok: @@ -41,24 +38,26 @@ def map_toes(self): data = response.json() if isinstance(self.data, list): for company in self.data: - name = company.get('name', None) - filters = company.get('filters', None) + name = company.get("name", None) + filters = company.get("filters", None) if name: comps[name] = filters name_list = comps.keys() for item in data: - toe_id = item.get('toe_id', None) - name = item.get('toe_short_name', None) + toe_id = item.get("toe_id", None) + name = item.get("toe_short_name", None) if not comps or name in name_list: filters = comps.get(name, None) self.toe_map[toe_id] = filters if filters else self.data else: - raise Exception('Unable to query Target of Evaluations due to {} - {}'.format( - response.status_code, response.content - )) + raise Exception( + "Unable to query Target of Evaluations due to {} - {}".format( + response.status_code, response.content + ) + ) def filter_finding(self, finding): - filters = self.toe_map[finding['toe_id']] + filters = self.toe_map[finding["toe_id"]] if not filters: return False @@ -72,11 +71,11 @@ def filter_finding(self, finding): def get_findings(self): for toe in self.toe_map.keys(): response = self.session.get( - url='{}/findings/{}'.format(self.url, toe), + url="{}/findings/{}".format(self.url, toe), headers={ - 'accept': 'application/json', - 'Authorization': self.key - } + "accept": "application/json", + "Authorization": self.key, + }, ) if response.ok: @@ -85,6 +84,8 @@ def get_findings(self): if not self.filter_finding(finding): self.findings.append(finding) else: - raise Exception('Unable to collect findings from toe: {} due to {} - {}'.format( - toe, response.status_code, response.content - )) + raise Exception( + "Unable to collect findings from toe: {} due to {} - {}".format( + toe, response.status_code, response.content + ) + ) diff --git a/dojo/tools/risk_recon/parser.py b/dojo/tools/risk_recon/parser.py index 7c37c8bb5e..8c70496d69 100644 --- a/dojo/tools/risk_recon/parser.py +++ b/dojo/tools/risk_recon/parser.py @@ -6,7 +6,6 @@ class RiskReconParser(object): - def get_scan_types(self): return ["Risk Recon API Importer"] @@ -20,48 +19,75 @@ def get_findings(self, filename, test): if filename: tree = filename.read() try: - data = json.loads(str(tree, 'utf-8')) - except: + data = json.loads(str(tree, "utf-8")) + except Exception: data = json.loads(tree) findings = [] - if not data.get('test', None): + if not data.get("test", None): api = RiskReconAPI( - data.get('api_key', None), - data.get('url_endpoint', None), - data.get('companies', data.get('filters', [])), + data.get("api_key", None), + data.get("url_endpoint", None), + data.get("companies", data.get("filters", [])), ) findings = api.findings else: - findings = data.get('findings') + findings = data.get("findings") return self._get_findings_internal(findings, test) def _get_findings_internal(self, findings, test): dupes = dict() for item in findings: - findingdetail = '' - title = item.get('vendor') + ': ' + item.get('finding') + ' - ' + item.get('domain_name') + '(' + item.get('ip_address') + ')' + findingdetail = "" + title = ( + item.get("vendor") + + ": " + + item.get("finding") + + " - " + + item.get("domain_name") + + "(" + + item.get("ip_address") + + ")" + ) # Finding details information - findingdetail += '**ID:** ' + item.get('finding_id') + '\n' - findingdetail += '**Context:** ' + item.get('finding_context') + '\n' - findingdetail += '**Value:** ' + item.get('finding_data_value') + '\n' - findingdetail += '**Hosting Provider:** ' + item.get('hosting_provider') + '\n' - findingdetail += '**Host Name:** ' + item.get('host_name') + '\n' - findingdetail += '**Security Domain:** ' + item.get('security_domain') + '\n' - findingdetail += '**Security Criteria:** ' + item.get('security_criteria') + '\n' - findingdetail += '**Asset Value:** ' + item.get('asset_value') + '\n' - findingdetail += '**Country:** ' + item.get('country_name') + '\n' - findingdetail += '**Priority:** ' + item.get('priority') + '\n' - findingdetail += '**First Seen:** ' + item.get('first_seen') + '\n' - - date = dateutil.parser.parse(item.get('first_seen')) - - sev = item.get('severity', "").capitalize() + findingdetail += "**ID:** " + item.get("finding_id") + "\n" + findingdetail += ( + "**Context:** " + item.get("finding_context") + "\n" + ) + findingdetail += ( + "**Value:** " + item.get("finding_data_value") + "\n" + ) + findingdetail += ( + "**Hosting Provider:** " + item.get("hosting_provider") + "\n" + ) + findingdetail += "**Host Name:** " + item.get("host_name") + "\n" + findingdetail += ( + "**Security Domain:** " + item.get("security_domain") + "\n" + ) + findingdetail += ( + "**Security Criteria:** " + + item.get("security_criteria") + + "\n" + ) + findingdetail += ( + "**Asset Value:** " + item.get("asset_value") + "\n" + ) + findingdetail += "**Country:** " + item.get("country_name") + "\n" + findingdetail += "**Priority:** " + item.get("priority") + "\n" + findingdetail += "**First Seen:** " + item.get("first_seen") + "\n" + + date = dateutil.parser.parse(item.get("first_seen")) + + sev = item.get("severity", "").capitalize() sev = "Info" if not sev else sev - tags = item.get('security_domain')[:20] + ', ' + item.get('security_criteria')[:20] + tags = ( + item.get("security_domain")[:20] + + ", " + + item.get("security_criteria")[:20] + ) finding = Finding( title=title, @@ -71,12 +97,14 @@ def _get_findings_internal(self, findings, test): static_finding=False, dynamic_finding=True, date=date, - unique_id_from_tool=item.get('finding_id'), + unique_id_from_tool=item.get("finding_id"), nb_occurences=1, # there is no de-duplication ) finding.unsaved_tags = tags - dupe_key = item.get('finding_id', title + '|' + tags + '|' + findingdetail) + dupe_key = item.get( + "finding_id", title + "|" + tags + "|" + findingdetail + ) if dupe_key in dupes: find = dupes[dupe_key] diff --git a/dojo/tools/rubocop/parser.py b/dojo/tools/rubocop/parser.py index 99919ebd13..db18a4619b 100644 --- a/dojo/tools/rubocop/parser.py +++ b/dojo/tools/rubocop/parser.py @@ -4,7 +4,6 @@ class RubocopParser: - ID = "Rubocop Scan" # possible values are: diff --git a/dojo/tools/rusty_hog/parser.py b/dojo/tools/rusty_hog/parser.py index 165110214a..da0baa6c83 100644 --- a/dojo/tools/rusty_hog/parser.py +++ b/dojo/tools/rusty_hog/parser.py @@ -4,7 +4,6 @@ class RustyhogParser(object): - def get_scan_types(self): return ["Rusty Hog Scan"] @@ -24,7 +23,9 @@ def parse_json(self, json_output): def get_items(self, json_output, scanner, test): items = {} - findings = self.__getitem(vulnerabilities=self.parse_json(json_output), scanner=scanner) + findings = self.__getitem( + vulnerabilities=self.parse_json(json_output), scanner=scanner + ) for finding in findings: unique_key = "Finding {}".format(finding) items[unique_key] = finding @@ -35,16 +36,22 @@ def get_tests(self, scan_type, handle): tests = list() parsername = "Rusty Hog" for node in tree: - if 'commit' in node or 'commitHash' in node or 'parent_commit_hash' in node or 'old_file_id' in node or 'new_file_id' in node: + if ( + "commit" in node + or "commitHash" in node + or "parent_commit_hash" in node + or "old_file_id" in node + or "new_file_id" in node + ): parsername = "Choctaw Hog" break - if 'linenum' in node or 'diff' in node: + if "linenum" in node or "diff" in node: parsername = "Duroc Hog" break - if 'issue_id' in node or 'location' in node: + if "issue_id" in node or "location" in node: parsername = "Gottingen Hog" break - if 'page_id' in node: + if "page_id" in node: parsername = "Essex Hog" break test = ParserTest( @@ -52,17 +59,20 @@ def get_tests(self, scan_type, handle): type=parsername, version="", ) - if parsername == "Rusty Hog": # The outputfile is empty. A subscanner can't be classified + if ( + parsername == "Rusty Hog" + ): # The outputfile is empty. A subscanner can't be classified test.description = "The exact scanner within Rusty Hog could not be determined due to missing information within the scan result." else: test.description = parsername - test.findings = self.__getitem(vulnerabilities=tree, scanner=parsername) + test.findings = self.__getitem( + vulnerabilities=tree, scanner=parsername + ) tests.append(test) return tests def __getitem(self, vulnerabilities, scanner): findings = [] - line = "" found_secret_string = "" cwe = 200 for vulnerability in vulnerabilities: @@ -70,85 +80,131 @@ def __getitem(self, vulnerabilities, scanner): break elif scanner == "Choctaw Hog": """Choctaw Hog""" - found_secret_string = vulnerability.get('stringsFound') - description = "**This string was found:** {}".format(found_secret_string) - if vulnerability.get('commit') is not None: - description += "\n**Commit message:** {}".format(vulnerability.get('commit')) - if vulnerability.get('commitHash') is not None: - description += "\n**Commit hash:** {}".format(vulnerability.get('commitHash')) - if vulnerability.get('parent_commit_hash') is not None: - description += "\n**Parent commit hash:** {}".format(vulnerability.get('parent_commit_hash')) - if vulnerability.get('old_file_id') is not None and vulnerability.get('new_file_id') is not None: - description += "\n**Old and new file IDs:** {} - {}".format( - vulnerability.get('old_file_id'), - vulnerability.get('new_file_id')) - if vulnerability.get('old_line_num') is not None and vulnerability.get('new_line_num') is not None: - description += "\n**Old and new line numbers:** {} - {}".format( - vulnerability.get('old_line_num'), - vulnerability.get('new_line_num')) + found_secret_string = vulnerability.get("stringsFound") + description = "**This string was found:** {}".format( + found_secret_string + ) + if vulnerability.get("commit") is not None: + description += "\n**Commit message:** {}".format( + vulnerability.get("commit") + ) + if vulnerability.get("commitHash") is not None: + description += "\n**Commit hash:** {}".format( + vulnerability.get("commitHash") + ) + if vulnerability.get("parent_commit_hash") is not None: + description += "\n**Parent commit hash:** {}".format( + vulnerability.get("parent_commit_hash") + ) + if ( + vulnerability.get("old_file_id") is not None + and vulnerability.get("new_file_id") is not None + ): + description += ( + "\n**Old and new file IDs:** {} - {}".format( + vulnerability.get("old_file_id"), + vulnerability.get("new_file_id"), + ) + ) + if ( + vulnerability.get("old_line_num") is not None + and vulnerability.get("new_line_num") is not None + ): + description += ( + "\n**Old and new line numbers:** {} - {}".format( + vulnerability.get("old_line_num"), + vulnerability.get("new_line_num"), + ) + ) elif scanner == "Duroc Hog": """Duroc Hog""" - found_secret_string = vulnerability.get('stringsFound') - description = "**This string was found:** {}".format(found_secret_string) - if vulnerability.get('path') is not None: - description += "\n**Path of Issue:** {}".format(vulnerability.get('path')) - if vulnerability.get('linenum') is not None: - description += "\n**Linenum of Issue:** {}".format(vulnerability.get('linenum')) - if vulnerability.get('diff') is not None: - description += "\n**Diff:** {}".format(vulnerability.get('diff')) + found_secret_string = vulnerability.get("stringsFound") + description = "**This string was found:** {}".format( + found_secret_string + ) + if vulnerability.get("path") is not None: + description += "\n**Path of Issue:** {}".format( + vulnerability.get("path") + ) + if vulnerability.get("linenum") is not None: + description += "\n**Linenum of Issue:** {}".format( + vulnerability.get("linenum") + ) + if vulnerability.get("diff") is not None: + description += "\n**Diff:** {}".format( + vulnerability.get("diff") + ) elif scanner == "Gottingen Hog": """Gottingen Hog""" - found_secret_string = vulnerability.get('stringsFound') - description = "**This string was found:** {}".format(found_secret_string) - if vulnerability.get('issue_id') is not None: - description += "\n**JIRA Issue ID:** {}".format(vulnerability.get('issue_id')) - if vulnerability.get('location') is not None: - description += "\n**JIRA location:** {}".format(vulnerability.get('location')) - if vulnerability.get('url') is not None: - description += "\n**JIRA url:** [{}]({})".format(vulnerability.get('url'), vulnerability.get('url')) + found_secret_string = vulnerability.get("stringsFound") + description = "**This string was found:** {}".format( + found_secret_string + ) + if vulnerability.get("issue_id") is not None: + description += "\n**JIRA Issue ID:** {}".format( + vulnerability.get("issue_id") + ) + if vulnerability.get("location") is not None: + description += "\n**JIRA location:** {}".format( + vulnerability.get("location") + ) + if vulnerability.get("url") is not None: + description += "\n**JIRA url:** [{}]({})".format( + vulnerability.get("url"), vulnerability.get("url") + ) elif scanner == "Essex Hog": - found_secret_string = vulnerability.get('stringsFound') - description = "**This string was found:** {}".format(found_secret_string) - if vulnerability.get('page_id') is not None: - description += "\n**Confluence URL:** [{}]({})".format(vulnerability.get('url'), vulnerability.get('url')) - description += "\n**Confluence Page ID:** {}".format(vulnerability.get('page_id')) + found_secret_string = vulnerability.get("stringsFound") + description = "**This string was found:** {}".format( + found_secret_string + ) + if vulnerability.get("page_id") is not None: + description += "\n**Confluence URL:** [{}]({})".format( + vulnerability.get("url"), vulnerability.get("url") + ) + description += "\n**Confluence Page ID:** {}".format( + vulnerability.get("page_id") + ) """General - for all Rusty Hogs""" - file_path = vulnerability.get('path') - if vulnerability.get('date') is not None: - description += "\n**Date:** {}".format(vulnerability.get('date')) + file_path = vulnerability.get("path") + if vulnerability.get("date") is not None: + description += "\n**Date:** {}".format( + vulnerability.get("date") + ) """Finding Title""" if scanner == "Choctaw Hog": title = "{} found in Git path {} ({})".format( - vulnerability.get('reason'), - vulnerability.get('path'), - vulnerability.get('commitHash')) + vulnerability.get("reason"), + vulnerability.get("path"), + vulnerability.get("commitHash"), + ) elif scanner == "Duroc Hog": title = "{} found in path {}".format( - vulnerability.get('reason'), - vulnerability.get('path')) + vulnerability.get("reason"), vulnerability.get("path") + ) elif scanner == "Gottingen Hog": title = "{} found in Jira ID {} ({})".format( - vulnerability.get('reason'), - vulnerability.get('issue_id'), - vulnerability.get('location')) + vulnerability.get("reason"), + vulnerability.get("issue_id"), + vulnerability.get("location"), + ) elif scanner == "Essex Hog": title = "{} found in Confluence Page ID {}".format( - vulnerability.get('reason'), - vulnerability.get('page_id')) + vulnerability.get("reason"), vulnerability.get("page_id") + ) # create the finding object finding = Finding( title=title, - severity='High', + severity="High", cwe=cwe, description=description, file_path=file_path, static_finding=True, dynamic_finding=False, - payload=found_secret_string + payload=found_secret_string, ) finding.description = finding.description.strip() if scanner == "Choctaw Hog": - finding.line = int(vulnerability.get('new_line_num')) + finding.line = int(vulnerability.get("new_line_num")) finding.mitigation = "Please ensure no secret material nor confidential information is kept in clear within git repositories." elif scanner == "Duroc Hog": finding.mitigation = "Please ensure no secret material nor confidential information is kept in clear within directories, files, and archives." From 5e70b393153408ea508da70c83a8a89a273801a3 Mon Sep 17 00:00:00 2001 From: Nicolas Velasquez <7769945+nv-pipo@users.noreply.github.com> Date: Tue, 4 Jul 2023 19:23:38 +0200 Subject: [PATCH 11/85] Extract vulnerability type for Qualys scan import (#8330) --- dojo/tools/qualys/parser.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/dojo/tools/qualys/parser.py b/dojo/tools/qualys/parser.py index d86c7f4c50..14ecbe564d 100644 --- a/dojo/tools/qualys/parser.py +++ b/dojo/tools/qualys/parser.py @@ -48,6 +48,12 @@ "category", ] +TYPE_MAP = { + "Ig": "INFORMATION GATHERED", + "Practice": "POTENTIAL", + "Vuln": "CONFIRMED", +} + def htmltext(blob): h = html2text.HTML2Text() @@ -148,10 +154,14 @@ def parse_finding(host, tree): # _temp['solution'] = re.sub('Workaround(s)?:.+\n', '', htmltext(vuln_item.findtext('SOLUTION'))) _temp["solution"] = htmltext(vuln_item.findtext("SOLUTION")) + # type + _type = TYPE_MAP.get(vuln_details.findtext("TYPE"), "Unknown") + # Vuln_description _temp["vuln_description"] = "\n".join( [ htmltext(_description), + htmltext("Type: " + _type), htmltext("Category: " + _category), htmltext("QID: " + str(_gid)), htmltext("Port: " + str(_port)), From 3865dff9dbb9a510459968386e788ebeec490f40 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 5 Jul 2023 10:01:52 -0500 Subject: [PATCH 12/85] Bump djangosaml2 from 1.6.0 to 1.7.0 (#8343) Bumps [djangosaml2](https://github.com/IdentityPython/djangosaml2) from 1.6.0 to 1.7.0. - [Release notes](https://github.com/IdentityPython/djangosaml2/releases) - [Changelog](https://github.com/IdentityPython/djangosaml2/blob/master/CHANGES) - [Commits](https://github.com/IdentityPython/djangosaml2/compare/v1.6.0...v1.7.0) --- updated-dependencies: - dependency-name: djangosaml2 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 48ef7948f2..2e8c7645a4 100644 --- a/requirements.txt +++ b/requirements.txt @@ -72,7 +72,7 @@ cvss==2.6 django-fieldsignals==0.7.0 hyperlink==21.0.0 django-test-migrations==1.3.0 -djangosaml2==1.6.0 +djangosaml2==1.7.0 drf-spectacular==0.26.3 django-ratelimit==4.0.0 argon2-cffi==21.3.0 From f098bb4b1c811bd67203399a5e6d664ac798717c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 5 Jul 2023 10:02:13 -0500 Subject: [PATCH 13/85] Bump boto3 from 1.26.165 to 1.27.0 (#8342) Bumps [boto3](https://github.com/boto/boto3) from 1.26.165 to 1.27.0. - [Release notes](https://github.com/boto/boto3/releases) - [Changelog](https://github.com/boto/boto3/blob/develop/CHANGELOG.rst) - [Commits](https://github.com/boto/boto3/compare/1.26.165...1.27.0) --- updated-dependencies: - dependency-name: boto3 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 2e8c7645a4..1b7264d7cd 100644 --- a/requirements.txt +++ b/requirements.txt @@ -78,7 +78,7 @@ django-ratelimit==4.0.0 argon2-cffi==21.3.0 blackduck==1.1.0 pycurl==7.45.2 # Required for Celery Broker AWS (SQS) support -boto3==1.26.165 # Required for Celery Broker AWS (SQS) support +boto3==1.27.0 # Required for Celery Broker AWS (SQS) support netaddr==0.8.0 vulners==2.0.10 fontawesomefree==6.4.0 From 7c27a1472bd29244cca9dc780f490681d4dcdbce Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 5 Jul 2023 10:02:32 -0500 Subject: [PATCH 14/85] Bump pillow from 9.5.0 to 10.0.0 (#8335) Bumps [pillow](https://github.com/python-pillow/Pillow) from 9.5.0 to 10.0.0. - [Release notes](https://github.com/python-pillow/Pillow/releases) - [Changelog](https://github.com/python-pillow/Pillow/blob/main/CHANGES.rst) - [Commits](https://github.com/python-pillow/Pillow/compare/9.5.0...10.0.0) --- updated-dependencies: - dependency-name: pillow dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 1b7264d7cd..9fd4f7adfb 100644 --- a/requirements.txt +++ b/requirements.txt @@ -36,7 +36,7 @@ Markdown==3.4.3 mysqlclient==2.1.1 openpyxl==3.1.2 xlrd==1.2.0 -Pillow==9.5.0 # required by django-imagekit +Pillow==10.0.0 # required by django-imagekit psycopg2-binary==2.9.6 cryptography==41.0.1 python-dateutil==2.8.2 From 69cc53e91ffb1bc753513d01159395e3d40a213b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 5 Jul 2023 11:41:29 -0500 Subject: [PATCH 15/85] Bump lxml from 4.9.2 to 4.9.3 (#8348) Bumps [lxml](https://github.com/lxml/lxml) from 4.9.2 to 4.9.3. - [Release notes](https://github.com/lxml/lxml/releases) - [Changelog](https://github.com/lxml/lxml/blob/master/CHANGES.txt) - [Commits](https://github.com/lxml/lxml/compare/lxml-4.9.2...lxml-4.9.3) --- updated-dependencies: - dependency-name: lxml dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 9fd4f7adfb..f0cbe39cae 100644 --- a/requirements.txt +++ b/requirements.txt @@ -31,7 +31,7 @@ html2text==2020.1.16 humanize==4.7.0 jira==3.5.2 PyGithub==1.58.2 -lxml==4.9.2 +lxml==4.9.3 Markdown==3.4.3 mysqlclient==2.1.1 openpyxl==3.1.2 From ea9505ba8e09d7e34367617c7aa69995a5b5aa28 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Thu, 6 Jul 2023 21:18:52 -0500 Subject: [PATCH 16/85] Update dependency postcss from 8.4.24 to v8.4.25 (docs/package.json) (#8356) Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> --- docs/package-lock.json | 14 +++++++------- docs/package.json | 2 +- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/docs/package-lock.json b/docs/package-lock.json index 2421c15925..21416effb1 100644 --- a/docs/package-lock.json +++ b/docs/package-lock.json @@ -6,7 +6,7 @@ "": { "devDependencies": { "autoprefixer": "10.4.14", - "postcss": "8.4.24", + "postcss": "8.4.25", "postcss-cli": "10.1.0" } }, @@ -596,9 +596,9 @@ } }, "node_modules/postcss": { - "version": "8.4.24", - "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.24.tgz", - "integrity": "sha512-M0RzbcI0sO/XJNucsGjvWU9ERWxb/ytp1w6dKtxTKgixdtQDq4rmx/g8W1hnaheq9jgwL/oyEdH5Bc4WwJKMqg==", + "version": "8.4.25", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.25.tgz", + "integrity": "sha512-7taJ/8t2av0Z+sQEvNzCkpDynl0tX3uJMCODi6nT3PfASC7dYCWV9aQ+uiCf+KBD4SEFcu+GvJdGdwzQ6OSjCw==", "dev": true, "funding": [ { @@ -1366,9 +1366,9 @@ "dev": true }, "postcss": { - "version": "8.4.24", - "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.24.tgz", - "integrity": "sha512-M0RzbcI0sO/XJNucsGjvWU9ERWxb/ytp1w6dKtxTKgixdtQDq4rmx/g8W1hnaheq9jgwL/oyEdH5Bc4WwJKMqg==", + "version": "8.4.25", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.25.tgz", + "integrity": "sha512-7taJ/8t2av0Z+sQEvNzCkpDynl0tX3uJMCODi6nT3PfASC7dYCWV9aQ+uiCf+KBD4SEFcu+GvJdGdwzQ6OSjCw==", "dev": true, "requires": { "nanoid": "^3.3.6", diff --git a/docs/package.json b/docs/package.json index f3890a7b17..c7e2160786 100644 --- a/docs/package.json +++ b/docs/package.json @@ -1,6 +1,6 @@ { "devDependencies": { - "postcss": "8.4.24", + "postcss": "8.4.25", "autoprefixer": "10.4.14", "postcss-cli": "10.1.0" } From c5a347865d981789c6a30dbe81a2527cf5e22bdf Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 7 Jul 2023 09:56:44 -0500 Subject: [PATCH 17/85] Bump boto3 from 1.27.0 to 1.28.0 (#8362) Bumps [boto3](https://github.com/boto/boto3) from 1.27.0 to 1.28.0. - [Release notes](https://github.com/boto/boto3/releases) - [Changelog](https://github.com/boto/boto3/blob/develop/CHANGELOG.rst) - [Commits](https://github.com/boto/boto3/compare/1.27.0...1.28.0) --- updated-dependencies: - dependency-name: boto3 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index f0cbe39cae..5d7867bf26 100644 --- a/requirements.txt +++ b/requirements.txt @@ -78,7 +78,7 @@ django-ratelimit==4.0.0 argon2-cffi==21.3.0 blackduck==1.1.0 pycurl==7.45.2 # Required for Celery Broker AWS (SQS) support -boto3==1.27.0 # Required for Celery Broker AWS (SQS) support +boto3==1.28.0 # Required for Celery Broker AWS (SQS) support netaddr==0.8.0 vulners==2.0.10 fontawesomefree==6.4.0 From f13d20d36fda0da0fcabeaf5faddd5fc5e093bb9 Mon Sep 17 00:00:00 2001 From: Cody Maffucci <46459665+Maffooch@users.noreply.github.com> Date: Fri, 7 Jul 2023 15:29:06 -0500 Subject: [PATCH 18/85] Update naming convention for product tags in related objects (#8350) --- docs/content/en/getting_started/upgrading.md | 18 ++++++ dojo/filters.py | 64 ++++++++++---------- 2 files changed, 50 insertions(+), 32 deletions(-) diff --git a/docs/content/en/getting_started/upgrading.md b/docs/content/en/getting_started/upgrading.md index 33872b487c..b27f29e1aa 100644 --- a/docs/content/en/getting_started/upgrading.md +++ b/docs/content/en/getting_started/upgrading.md @@ -72,6 +72,24 @@ godojo installations If you have installed DefectDojo on "iron" and wish to upgrade the installation, please see the [instructions in the repo](https://github.com/DefectDojo/godojo/blob/master/docs-and-scripts/upgrading.md). +## Upgrading to DefectDojo Version 2.25.x. + +A few query parameters related to filtering object via API related to a products tags have been renamed to be more consistent with the other "related object tags": + +**Breaking Change** + + - Engagement + - `product__tags__name` -> `product__tags` + - `not_product__tags__name` -> `not_product__tags` + - Test + - `engagement__product__tags__name` -> `engagement__product__tags` + - `not_engagement__product__tags__name` -> `not_engagement__product__tags` + - Finding + - `test__engagement__product__tags__name` -> `test__engagement__product__tags` + - `not_test__engagement__product__tags__name` -> `not_test__engagement__product__tags` + +For all other changes, check the [Release Notes](https://github.com/DefectDojo/django-DefectDojo/releases/tag/2.25.0) for the contents of the release. + ## Upgrading to DefectDojo Version 2.24.x. There are no special instruction for upgrading to 2.24.0. Check the [Release Notes](https://github.com/DefectDojo/django-DefectDojo/releases/tag/2.24.0) for the contents of the release. diff --git a/dojo/filters.py b/dojo/filters.py index 170d5d7ba6..9bcfb6ad9e 100644 --- a/dojo/filters.py +++ b/dojo/filters.py @@ -830,17 +830,17 @@ class ApiEngagementFilter(DojoFilter): product__prod_type = NumberInFilter(field_name='product__prod_type', lookup_expr='in') tag = CharFilter(field_name='tags__name', lookup_expr='icontains', help_text='Tag name contains') tags = CharFieldInFilter(field_name='tags__name', lookup_expr='in', - help_text='Comma seperated list of exact tags') - product__tags__name = CharFieldInFilter(field_name='product__tags__name', + help_text='Comma separated list of exact tags') + product__tags = CharFieldInFilter(field_name='product__tags__name', lookup_expr='in', - help_text='Comma seperated list of exact tags present on product') + help_text='Comma separated list of exact tags present on product') not_tag = CharFilter(field_name='tags__name', lookup_expr='icontains', help_text='Not Tag name contains', exclude='True') not_tags = CharFieldInFilter(field_name='tags__name', lookup_expr='in', - help_text='Comma seperated list of exact tags not present on model', exclude='True') - not_product__tags__name = CharFieldInFilter(field_name='product__tags__name', + help_text='Comma separated list of exact tags not present on model', exclude='True') + not_product__tags = CharFieldInFilter(field_name='product__tags__name', lookup_expr='in', - help_text='Comma seperated list of exact tags not present on product', + help_text='Comma separated list of exact tags not present on product', exclude='True') o = OrderingFilter( @@ -1035,11 +1035,11 @@ class ApiProductFilter(DojoFilter): tag = CharFilter(field_name='tags__name', lookup_expr='icontains', label='Tag name contains') tags = CharFieldInFilter(field_name='tags__name', lookup_expr='in', - help_text='Comma seperated list of exact tags') + help_text='Comma separated list of exact tags') not_tag = CharFilter(field_name='tags__name', lookup_expr='icontains', help_text='Not Tag name contains', exclude='True') not_tags = CharFieldInFilter(field_name='tags__name', lookup_expr='in', - help_text='Comma seperated list of exact tags not present on product', exclude='True') + help_text='Comma separated list of exact tags not present on product', exclude='True') # DateRangeFilter created = DateRangeFilter() @@ -1145,26 +1145,26 @@ class ApiFindingFilter(DojoFilter): tag = CharFilter(field_name='tags__name', lookup_expr='icontains', help_text='Tag name contains') tags = CharFieldInFilter(field_name='tags__name', lookup_expr='in', - help_text='Comma seperated list of exact tags') + help_text='Comma separated list of exact tags') test__tags = CharFieldInFilter(field_name='test__tags__name', lookup_expr='in', - help_text='Comma seperated list of exact tags present on test') + help_text='Comma separated list of exact tags present on test') test__engagement__tags = CharFieldInFilter(field_name='test__engagement__tags', lookup_expr='in', - help_text='Comma seperated list of exact tags present on engagement') - test__engagement__product__tags__name = CharFieldInFilter(field_name='test__engagement__product__tags__name', + help_text='Comma separated list of exact tags present on engagement') + test__engagement__product__tags = CharFieldInFilter(field_name='test__engagement__product__tags__name', lookup_expr='in', - help_text='Comma seperated list of exact tags present on product') + help_text='Comma separated list of exact tags present on product') not_tag = CharFilter(field_name='tags__name', lookup_expr='icontains', help_text='Not Tag name contains', exclude='True') not_tags = CharFieldInFilter(field_name='tags__name', lookup_expr='in', - help_text='Comma seperated list of exact tags not present on model', exclude='True') + help_text='Comma separated list of exact tags not present on model', exclude='True') not_test__tags = CharFieldInFilter(field_name='test__tags__name', lookup_expr='in', - help_text='Comma seperated list of exact tags not present on test', exclude='True') + help_text='Comma separated list of exact tags not present on test', exclude='True') not_test__engagement__tags = CharFieldInFilter(field_name='test__engagement__tags', lookup_expr='in', - help_text='Comma seperated list of exact tags not present on engagement', + help_text='Comma separated list of exact tags not present on engagement', exclude='True') - not_test__engagement__product__tags__name = CharFieldInFilter(field_name='test__engagement__product__tags__name', + not_test__engagement__product__tags = CharFieldInFilter(field_name='test__engagement__product__tags__name', lookup_expr='in', - help_text='Comma seperated list of exact tags not present on product', + help_text='Comma separated list of exact tags not present on product', exclude='True') o = OrderingFilter( @@ -1556,11 +1556,11 @@ def __init__(self, *args, **kwargs): class ApiTemplateFindingFilter(DojoFilter): tag = CharFilter(field_name='tags__name', lookup_expr='icontains', help_text='Tag name contains') tags = CharFieldInFilter(field_name='tags__name', lookup_expr='in', - help_text='Comma seperated list of exact tags') + help_text='Comma separated list of exact tags') not_tag = CharFilter(field_name='tags__name', lookup_expr='icontains', help_text='Not Tag name contains', exclude='True') not_tags = CharFieldInFilter(field_name='tags__name', lookup_expr='in', - help_text='Comma seperated list of exact tags not present on model', exclude='True') + help_text='Comma separated list of exact tags not present on model', exclude='True') o = OrderingFilter( # tuple-mapping retains order @@ -1798,11 +1798,11 @@ class Meta: class ApiEndpointFilter(DojoFilter): tag = CharFilter(field_name='tags__name', lookup_expr='icontains', help_text='Tag name contains') tags = CharFieldInFilter(field_name='tags__name', lookup_expr='in', - help_text='Comma seperated list of exact tags') + help_text='Comma separated list of exact tags') not_tag = CharFilter(field_name='tags__name', lookup_expr='icontains', help_text='Not Tag name contains', exclude='True') not_tags = CharFieldInFilter(field_name='tags__name', lookup_expr='in', - help_text='Comma seperated list of exact tags not present on model', exclude='True') + help_text='Comma separated list of exact tags not present on model', exclude='True') o = OrderingFilter( # tuple-mapping retains order fields=( @@ -1897,22 +1897,22 @@ def __init__(self, *args, **kwargs): class ApiTestFilter(DojoFilter): tag = CharFilter(field_name='tags__name', lookup_expr='icontains', help_text='Tag name contains') tags = CharFieldInFilter(field_name='tags__name', lookup_expr='in', - help_text='Comma seperated list of exact tags') + help_text='Comma separated list of exact tags') engagement__tags = CharFieldInFilter(field_name='engagement__tags', lookup_expr='in', - help_text='Comma seperated list of exact tags present on engagement') - engagement__product__tags__name = CharFieldInFilter(field_name='engagement__product__tags__name', + help_text='Comma separated list of exact tags present on engagement') + engagement__product__tags = CharFieldInFilter(field_name='engagement__product__tags__name', lookup_expr='in', - help_text='Comma seperated list of exact tags present on product') + help_text='Comma separated list of exact tags present on product') not_tag = CharFilter(field_name='tags__name', lookup_expr='icontains', help_text='Not Tag name contains', exclude='True') not_tags = CharFieldInFilter(field_name='tags__name', lookup_expr='in', - help_text='Comma seperated list of exact tags not present on model', exclude='True') + help_text='Comma separated list of exact tags not present on model', exclude='True') not_engagement__tags = CharFieldInFilter(field_name='engagement__tags', lookup_expr='in', - help_text='Comma seperated list of exact tags not present on engagement', + help_text='Comma separated list of exact tags not present on engagement', exclude='True') - not_engagement__product__tags__name = CharFieldInFilter(field_name='engagement__product__tags__name', + not_engagement__product__tags = CharFieldInFilter(field_name='engagement__product__tags__name', lookup_expr='in', - help_text='Comma seperated list of exact tags not present on product', + help_text='Comma separated list of exact tags not present on product', exclude='True') o = OrderingFilter( @@ -1950,11 +1950,11 @@ class Meta: class ApiAppAnalysisFilter(DojoFilter): tag = CharFilter(field_name='tags__name', lookup_expr='icontains', help_text='Tag name contains') tags = CharFieldInFilter(field_name='tags__name', lookup_expr='in', - help_text='Comma seperated list of exact tags') + help_text='Comma separated list of exact tags') not_tag = CharFilter(field_name='tags__name', lookup_expr='icontains', help_text='Not Tag name contains', exclude='True') not_tags = CharFieldInFilter(field_name='tags__name', lookup_expr='in', - help_text='Comma seperated list of exact tags not present on model', exclude='True') + help_text='Comma separated list of exact tags not present on model', exclude='True') class Meta: model = App_Analysis From e15d1b1dd57aaa1b03d889d1127734271151e575 Mon Sep 17 00:00:00 2001 From: testaccount90009 <122134756+testaccount90009@users.noreply.github.com> Date: Fri, 7 Jul 2023 14:01:05 -0700 Subject: [PATCH 19/85] Update DOCKER.md "run with docker compose using https" (#8361) --- readme-docs/DOCKER.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/readme-docs/DOCKER.md b/readme-docs/DOCKER.md index 4b9a29d2a8..1b026b93bc 100644 --- a/readme-docs/DOCKER.md +++ b/readme-docs/DOCKER.md @@ -289,13 +289,13 @@ To secure the application by https, follow those steps * Generate a CSR (Certificate Signing Request) * Have the CSR signed by a certificate authority * Place the private key and the certificate under the nginx folder -* copy your secrets into: +* copy your secrets into ../nginx/nginx_TLS.conf: ``` server_name your.servername.com; ssl_certificate /etc/nginx/ssl/nginx.crt ssl_certificate_key /etc/nginx/ssl/nginx.key; ``` -*set the GENERATE_TLS_CERTIFICATE != True in the docker-compose.override.https.yml +*set the GENERATE_TLS_CERTIFICATE != True in the docker-compose.override.https.yml * Protect your private key from other users: ``` chmod 400 nginx/*.key From b15ff1813f2365a5356922922f94a15d1eafb605 Mon Sep 17 00:00:00 2001 From: Alejandro Tortolero Date: Fri, 7 Jul 2023 16:24:12 -0500 Subject: [PATCH 20/85] Update files with PEP8 standards in folder dojo/tools #006 (#8319) * Update files in folder dojo/tools/sarif with PEP8 standars. * Update files in folder dojo/tools/scantist with PEP8 standars. * Update files in folder dojo/tools/semgrep with PEP8 standars. * Update files in folder dojo/tools/skf with PEP8 standars. * Update files in folder dojo/tools/snyk with PEP8 standars. * Update files in folder dojo/tools/solar_appscreener with PEP8 standars. * Update files in folder dojo/tools/sonarqube with PEP8 standars. * Update files in folder dojo/tools/sonatype with PEP8 standars. * Update files in folder dojo/tools/spotbugs with PEP8 standars. * Update files in folder dojo/tools/ssl_labs with PEP8 standars. * Update files in folder dojo/tools/ssl_labs with PEP8 standars. * Update files in folder dojo/tools/sslscan with PEP8 standars. * Update files in folder dojo/tools/sslyze with PEP8 standars. * Update files in folder dojo/tools/stackhawk with PEP8 standars. * Update files in folder dojo/tools/talisman with PEP8 standars. * Update files in folder dojo/tools/tenable with PEP8 standars. * Update files in folder dojo/tools/terrascan with PEP8 standars. * Update files in folder dojo/tools/testssl with PEP8 standars. * Update files in folder dojo/tools/tfsec with PEP8 standars. * Update files in folder dojo/tools/trivy with PEP8 standars. * Update files in folder dojo/tools/trivy_operator with PEP8 standars. * Update files in folder dojo/tools/trufflehog with PEP8 standars. * Update files in folder dojo/tools/trufflehog3 and trustwave with PEP8 standars. * Update files in folder dojo/tools/trustwave_fusion_api with PEP8 standars. * Update files in folder dojo/tools/twistlock with PEP8 standars. * Update files in folder dojo/tools/vcg with PEP8 standars. * Update files in folder dojo/tools/veracode with PEP8 standars. * Update files in folder dojo/tools/veracode_sca with PEP8 standars. * Update files in folder dojo/tools/wapiti with PEP8 standars. * Update files in folder dojo/tools/wazuh with PEP8 standars. * Update files in folder dojo/tools/wfuzz with PEP8 standars. * Update files in folder dojo/tools/whispers with PEP8 standars. * Update files in folder dojo/tools/whitehat_sentinel with PEP8 standars. * Update files in folder dojo/tools/whitesource with PEP8 standars. * Update files in folder dojo/tools/wpscan with PEP8 standars. * Update files in folder dojo/tools/xanitizer with PEP8 standars. * Update files in folder dojo/tools/yarn_audit with PEP8 standars. * Update files in folder dojo/tools/zap with PEP8 standars. * Change BaseException to Exception * Removing blank space. * Removing unusing variable. --- dojo/tools/sarif/parser.py | 300 ++++++---- dojo/tools/scantist/parser.py | 13 +- dojo/tools/scout_suite/__init__.py | 2 +- dojo/tools/scout_suite/parser.py | 102 ++-- dojo/tools/semgrep/parser.py | 49 +- dojo/tools/skf/__init__.py | 2 +- dojo/tools/skf/parser.py | 48 +- dojo/tools/snyk/parser.py | 161 ++++-- dojo/tools/solar_appscreener/parser.py | 26 +- dojo/tools/sonarqube/parser.py | 187 ++++-- dojo/tools/sonatype/parser.py | 98 ++-- dojo/tools/spotbugs/parser.py | 91 +-- dojo/tools/ssl_labs/__init__.py | 2 +- dojo/tools/ssl_labs/parser.py | 281 ++++++--- dojo/tools/sslscan/parser.py | 66 ++- dojo/tools/sslyze/parser.py | 7 +- dojo/tools/sslyze/parser_json.py | 666 ++++++++++++++-------- dojo/tools/sslyze/parser_xml.py | 136 +++-- dojo/tools/stackhawk/parser.py | 102 ++-- dojo/tools/talisman/parser.py | 10 +- dojo/tools/tenable/csv_format.py | 54 +- dojo/tools/tenable/parser.py | 12 +- dojo/tools/tenable/xml_format.py | 103 +++- dojo/tools/terrascan/parser.py | 35 +- dojo/tools/testssl/parser.py | 78 ++- dojo/tools/tfsec/parser.py | 50 +- dojo/tools/trivy/parser.py | 179 +++--- dojo/tools/trivy_operator/parser.py | 106 ++-- dojo/tools/trufflehog/parser.py | 65 ++- dojo/tools/trufflehog3/parser.py | 132 +++-- dojo/tools/trustwave/__init__.py | 5 +- dojo/tools/trustwave/parser.py | 62 +- dojo/tools/trustwave_fusion_api/parser.py | 46 +- dojo/tools/twistlock/parser.py | 158 +++-- dojo/tools/vcg/parser.py | 123 ++-- dojo/tools/veracode/__init__.py | 2 +- dojo/tools/veracode/parser.py | 284 +++++---- dojo/tools/veracode_sca/parser.py | 152 +++-- dojo/tools/wapiti/parser.py | 46 +- dojo/tools/wazuh/parser.py | 33 +- dojo/tools/wfuzz/parser.py | 2 - dojo/tools/whispers/parser.py | 17 +- dojo/tools/whitehat_sentinel/parser.py | 180 +++--- dojo/tools/whitesource/parser.py | 163 +++--- dojo/tools/wpscan/parser.py | 49 +- dojo/tools/xanitizer/__init__.py | 2 +- dojo/tools/xanitizer/parser.py | 148 +++-- dojo/tools/yarn_audit/parser.py | 118 ++-- dojo/tools/zap/parser.py | 36 +- 49 files changed, 3009 insertions(+), 1780 deletions(-) diff --git a/dojo/tools/sarif/parser.py b/dojo/tools/sarif/parser.py index 1d5f3ec5ee..d604279218 100644 --- a/dojo/tools/sarif/parser.py +++ b/dojo/tools/sarif/parser.py @@ -8,7 +8,7 @@ logger = logging.getLogger(__name__) -CWE_REGEX = r'cwe-\d+' +CWE_REGEX = r"cwe-\d+" class SarifParser(object): @@ -31,18 +31,18 @@ def get_findings(self, filehandle, test): tree = json.load(filehandle) items = list() # for each runs we just aggregate everything - for run in tree.get('runs', list()): + for run in tree.get("runs", list()): items.extend(self.__get_items_from_run(run)) return items def get_tests(self, scan_type, handle): tree = json.load(handle) tests = list() - for run in tree.get('runs', list()): + for run in tree.get("runs", list()): test = ParserTest( - name=run['tool']['driver']['name'], - type=run['tool']['driver']['name'], - version=run['tool']['driver'].get('version'), + name=run["tool"]["driver"]["name"], + type=run["tool"]["driver"]["name"], + version=run["tool"]["driver"].get("version"), ) test.findings = self.__get_items_from_run(run) tests.append(test) @@ -55,18 +55,18 @@ def __get_items_from_run(self, run): artifacts = get_artifacts(run) # get the timestamp of the run if possible run_date = self.__get_last_invocation_date(run) - for result in run.get('results', list()): + for result in run.get("results", list()): item = get_item(result, rules, artifacts, run_date) if item is not None: items.append(item) return items def __get_last_invocation_date(self, data): - invocations = data.get('invocations', []) + invocations = data.get("invocations", []) if len(invocations) == 0: return None # try to get the last 'endTimeUtc' - raw_date = invocations[-1].get('endTimeUtc') + raw_date = invocations[-1].get("endTimeUtc") if raw_date is None: return None # if the data is here we try to convert it to datetime @@ -75,8 +75,8 @@ def __get_last_invocation_date(self, data): def get_rules(run): rules = {} - for item in run['tool']['driver'].get('rules', []): - rules[item['id']] = item + for item in run["tool"]["driver"].get("rules", []): + rules[item["id"]] = item return rules @@ -84,7 +84,7 @@ def get_rules(run): def get_properties_tags(value): if not value: return [] - return value.get('properties', {}).get('tags', []) + return value.get("properties", {}).get("tags", []) def search_cwe(value, cwes): @@ -96,9 +96,9 @@ def search_cwe(value, cwes): def get_rule_cwes(rule): cwes = [] # data of the specification - if 'relationships' in rule and type(rule['relationships']) == list: - for relationship in rule['relationships']: - value = relationship['target']['id'] + if "relationships" in rule and isinstance(rule["relationships"], list): + for relationship in rule["relationships"]: + value = relationship["target"]["id"] search_cwe(value, cwes) return cwes @@ -110,8 +110,8 @@ def get_rule_cwes(rule): def get_result_cwes_properties(result): """Some tools like njsscan store the CWE in the properties of the result""" cwes = [] - if 'properties' in result and 'cwe' in result['properties']: - value = result['properties']['cwe'] + if "properties" in result and "cwe" in result["properties"]: + value = result["properties"]["cwe"] search_cwe(value, cwes) return cwes @@ -119,8 +119,8 @@ def get_result_cwes_properties(result): def get_artifacts(run): artifacts = {} custom_index = 0 # hack because some tool doesn't generate this attribute - for tree_artifact in run.get('artifacts', []): - artifacts[tree_artifact.get('index', custom_index)] = tree_artifact + for tree_artifact in run.get("artifacts", []): + artifacts[tree_artifact.get("index", custom_index)] = tree_artifact custom_index += 1 return artifacts @@ -130,9 +130,9 @@ def get_message_from_multiformatMessageString(data, rule): See here for the specification: https://docs.oasis-open.org/sarif/sarif/v2.1.0/os/sarif-v2.1.0-os.html#_Toc34317468 """ - if rule is not None and 'id' in data: - text = rule['messageStrings'][data['id']].get('text') - arguments = data.get('arguments', []) + if rule is not None and "id" in data: + text = rule["messageStrings"][data["id"]].get("text") + arguments = data.get("arguments", []) # argument substitution for i in range(6): # the specification limit to 6 substitution_str = "{" + str(i) + "}" @@ -142,7 +142,7 @@ def get_message_from_multiformatMessageString(data, rule): return text else: # TODO manage markdown - return data.get('text') + return data.get("text") def cve_try(val): @@ -156,90 +156,129 @@ def cve_try(val): def get_title(result, rule): title = None - if 'message' in result: - title = get_message_from_multiformatMessageString(result['message'], rule) + if "message" in result: + title = get_message_from_multiformatMessageString( + result["message"], rule + ) if title is None and rule is not None: - if 'shortDescription' in rule: - title = get_message_from_multiformatMessageString(rule['shortDescription'], rule) - elif 'fullDescription' in rule: - title = get_message_from_multiformatMessageString(rule['fullDescription'], rule) - elif 'name' in rule: - title = rule['name'] - elif 'id' in rule: - title = rule['id'] + if "shortDescription" in rule: + title = get_message_from_multiformatMessageString( + rule["shortDescription"], rule + ) + elif "fullDescription" in rule: + title = get_message_from_multiformatMessageString( + rule["fullDescription"], rule + ) + elif "name" in rule: + title = rule["name"] + elif "id" in rule: + title = rule["id"] if title is None: - raise ValueError('No information found to create a title') + raise ValueError("No information found to create a title") return textwrap.shorten(title, 150) def get_snippet(result): snippet = None - if 'locations' in result: - location = result['locations'][0] - if 'physicalLocation' in location: - if 'region' in location['physicalLocation']: - if 'snippet' in location['physicalLocation']['region']: - if 'text' in location['physicalLocation']['region']['snippet']: - snippet = location['physicalLocation']['region']['snippet']['text'] - if snippet is None and 'contextRegion' in location['physicalLocation']: - if 'snippet' in location['physicalLocation']['contextRegion']: - if 'text' in location['physicalLocation']['contextRegion']['snippet']: - snippet = location['physicalLocation']['contextRegion']['snippet']['text'] + if "locations" in result: + location = result["locations"][0] + if "physicalLocation" in location: + if "region" in location["physicalLocation"]: + if "snippet" in location["physicalLocation"]["region"]: + if ( + "text" + in location["physicalLocation"]["region"]["snippet"] + ): + snippet = location["physicalLocation"]["region"][ + "snippet" + ]["text"] + if ( + snippet is None + and "contextRegion" in location["physicalLocation"] + ): + if "snippet" in location["physicalLocation"]["contextRegion"]: + if ( + "text" + in location["physicalLocation"]["contextRegion"][ + "snippet" + ] + ): + snippet = location["physicalLocation"][ + "contextRegion" + ]["snippet"]["text"] return snippet def get_codeFlowsDescription(codeFlows): - description = '' + description = "" for codeFlow in codeFlows: - if 'threadFlows' not in codeFlow: + if "threadFlows" not in codeFlow: continue - for threadFlow in codeFlow['threadFlows']: - if 'locations' not in threadFlow: + for threadFlow in codeFlow["threadFlows"]: + if "locations" not in threadFlow: continue - description = '**Code flow:**\n' - for location in threadFlow['locations']: - physicalLocation = location['location']['physicalLocation'] - region = physicalLocation['region'] - description += '\t' + physicalLocation['artifactLocation'][ - 'uri'] if 'byteOffset' in region else '\t' + physicalLocation['artifactLocation']['uri'] + ':' + str( - region['startLine']) - if 'startColumn' in region: - description += ':' + str(region['startColumn']) - if 'snippet' in region: - description += '\t-\t' + region['snippet']['text'] - description += '\n' + description = "**Code flow:**\n" + for location in threadFlow["locations"]: + physicalLocation = location["location"]["physicalLocation"] + region = physicalLocation["region"] + description += ( + "\t" + physicalLocation["artifactLocation"]["uri"] + if "byteOffset" in region + else "\t" + + physicalLocation["artifactLocation"]["uri"] + + ":" + + str(region["startLine"]) + ) + if "startColumn" in region: + description += ":" + str(region["startColumn"]) + if "snippet" in region: + description += "\t-\t" + region["snippet"]["text"] + description += "\n" return description def get_description(result, rule): - description = '' - message = '' - if 'message' in result: - message = get_message_from_multiformatMessageString(result['message'], rule) - description += '**Result message:** {}\n'.format(message) + description = "" + message = "" + if "message" in result: + message = get_message_from_multiformatMessageString( + result["message"], rule + ) + description += "**Result message:** {}\n".format(message) if get_snippet(result) is not None: - description += '**Snippet:**\n```{}```\n'.format(get_snippet(result)) + description += "**Snippet:**\n```{}```\n".format(get_snippet(result)) if rule is not None: - if 'name' in rule: - description += '**Rule name:** {}\n'.format(rule.get('name')) - shortDescription = '' - if 'shortDescription' in rule: - shortDescription = get_message_from_multiformatMessageString(rule['shortDescription'], rule) + if "name" in rule: + description += "**Rule name:** {}\n".format(rule.get("name")) + shortDescription = "" + if "shortDescription" in rule: + shortDescription = get_message_from_multiformatMessageString( + rule["shortDescription"], rule + ) if shortDescription != message: - description += '**Rule short description:** {}\n'.format(shortDescription) - if 'fullDescription' in rule: - fullDescription = get_message_from_multiformatMessageString(rule['fullDescription'], rule) - if fullDescription != message and fullDescription != shortDescription: - description += '**Rule full description:** {}\n'.format(fullDescription) - - if len(result.get('codeFlows', [])) > 0: - description += get_codeFlowsDescription(result['codeFlows']) - - if description.endswith('\n'): + description += "**Rule short description:** {}\n".format( + shortDescription + ) + if "fullDescription" in rule: + fullDescription = get_message_from_multiformatMessageString( + rule["fullDescription"], rule + ) + if ( + fullDescription != message + and fullDescription != shortDescription + ): + description += "**Rule full description:** {}\n".format( + fullDescription + ) + + if len(result.get("codeFlows", [])) > 0: + description += get_codeFlowsDescription(result["codeFlows"]) + + if description.endswith("\n"): description = description[:-1] return description @@ -248,11 +287,13 @@ def get_description(result, rule): def get_references(rule): reference = None if rule is not None: - if 'helpUri' in rule: - reference = rule['helpUri'] - elif 'help' in rule: - helpText = get_message_from_multiformatMessageString(rule['help'], rule) - if helpText.startswith('http'): + if "helpUri" in rule: + reference = rule["helpUri"] + elif "help" in rule: + helpText = get_message_from_multiformatMessageString( + rule["help"], rule + ) + if helpText.startswith("http"): reference = helpText return reference @@ -260,11 +301,11 @@ def get_references(rule): def cvss_to_severity(cvss): severity_mapping = { - 1: 'Info', - 2: 'Low', - 3: 'Medium', - 4: 'High', - 5: 'Critical' + 1: "Info", + 2: "Low", + 3: "Medium", + 4: "High", + 5: "Critical", } if cvss >= 9: @@ -280,30 +321,33 @@ def cvss_to_severity(cvss): def get_severity(result, rule): - severity = result.get('level') + severity = result.get("level") if severity is None and rule is not None: # get the severity from the rule - if 'defaultConfiguration' in rule: - severity = rule['defaultConfiguration'].get('level') - - if 'note' == severity: - return 'Info' - elif 'warning' == severity: - return 'Medium' - elif 'error' == severity: - return 'High' + if "defaultConfiguration" in rule: + severity = rule["defaultConfiguration"].get("level") + + if "note" == severity: + return "Info" + elif "warning" == severity: + return "Medium" + elif "error" == severity: + return "High" else: - return 'Medium' + return "Medium" def get_item(result, rules, artifacts, run_date): - # see https://docs.oasis-open.org/sarif/sarif/v2.1.0/csprd01/sarif-v2.1.0-csprd01.html / 3.27.9 - kind = result.get('kind', 'fail') - if kind != 'fail': + # see + # https://docs.oasis-open.org/sarif/sarif/v2.1.0/csprd01/sarif-v2.1.0-csprd01.html + # / 3.27.9 + kind = result.get("kind", "fail") + if kind != "fail": return None # if finding is suppressed, mark it as False Positive - # Note: see https://docs.oasis-open.org/sarif/sarif/v2.0/csprd02/sarif-v2.0-csprd02.html#_Toc10127852 + # Note: see + # https://docs.oasis-open.org/sarif/sarif/v2.0/csprd02/sarif-v2.0-csprd02.html#_Toc10127852 suppressed = False if result.get("suppressions"): suppressed = True @@ -312,21 +356,21 @@ def get_item(result, rules, artifacts, run_date): file_path = None line = None if "locations" in result: - location = result['locations'][0] - if 'physicalLocation' in location: - file_path = location['physicalLocation']['artifactLocation']['uri'] + location = result["locations"][0] + if "physicalLocation" in location: + file_path = location["physicalLocation"]["artifactLocation"]["uri"] # 'region' attribute is optionnal - if 'region' in location['physicalLocation']: + if "region" in location["physicalLocation"]: # https://docs.oasis-open.org/sarif/sarif/v2.0/csprd02/sarif-v2.0-csprd02.html / 3.30.1 # need to check whether it is byteOffset - if 'byteOffset' in location['physicalLocation']['region']: + if "byteOffset" in location["physicalLocation"]["region"]: pass else: - line = location['physicalLocation']['region']['startLine'] + line = location["physicalLocation"]["region"]["startLine"] # test rule link - rule = rules.get(result.get('ruleId')) + rule = rules.get(result.get("ruleId")) finding = Finding( title=get_title(result, rule), @@ -341,20 +385,21 @@ def get_item(result, rules, artifacts, run_date): references=get_references(rule), ) - if 'ruleId' in result: - finding.vuln_id_from_tool = result['ruleId'] + if "ruleId" in result: + finding.vuln_id_from_tool = result["ruleId"] # for now we only support when the id of the rule is a CVE - if cve_try(result['ruleId']): - finding.unsaved_vulnerability_ids = [cve_try(result['ruleId'])] + if cve_try(result["ruleId"]): + finding.unsaved_vulnerability_ids = [cve_try(result["ruleId"])] # some time the rule id is here but the tool doesn't define it if rule is not None: cwes_extracted = get_rule_cwes(rule) if len(cwes_extracted) > 0: finding.cwe = cwes_extracted[-1] - # Some tools such as GitHub or Grype return the severity in properties instead - if 'properties' in rule and 'security-severity' in rule['properties']: - cvss = float(rule['properties']['security-severity']) + # Some tools such as GitHub or Grype return the severity in properties + # instead + if "properties" in rule and "security-severity" in rule["properties"]: + cvss = float(rule["properties"]["security-severity"]) severity = cvss_to_severity(cvss) finding.cvssv3_score = cvss finding.severity = severity @@ -366,7 +411,9 @@ def get_item(result, rules, artifacts, run_date): # manage fixes provided in the report if "fixes" in result: - finding.mitigation = "\n".join([fix.get('description', {}).get("text") for fix in result["fixes"]]) + finding.mitigation = "\n".join( + [fix.get("description", {}).get("text") for fix in result["fixes"]] + ) if run_date: finding.date = run_date @@ -378,16 +425,19 @@ def get_item(result, rules, artifacts, run_date): # manage fingerprints # fingerprinting in SARIF is more complete than in current implementation # SARIF standard make it possible to have multiple version in the same report - # for now we just take the first one and keep the format to be able to compare it + # for now we just take the first one and keep the format to be able to + # compare it if result.get("fingerprints"): hashes = get_fingerprints_hashes(result["fingerprints"]) first_item = next(iter(hashes.items())) - finding.unique_id_from_tool = first_item[1]['value'] + finding.unique_id_from_tool = first_item[1]["value"] elif result.get("partialFingerprints"): # for this one we keep an order to have id that could be compared hashes = get_fingerprints_hashes(result["partialFingerprints"]) sorted_hashes = sorted(hashes.keys()) - finding.unique_id_from_tool = "|".join([f'{key}:{hashes[key]["value"]}' for key in sorted_hashes]) + finding.unique_id_from_tool = "|".join( + [f'{key}:{hashes[key]["value"]}' for key in sorted_hashes] + ) return finding diff --git a/dojo/tools/scantist/parser.py b/dojo/tools/scantist/parser.py index e84fa1c2e8..d4b1e6c076 100644 --- a/dojo/tools/scantist/parser.py +++ b/dojo/tools/scantist/parser.py @@ -37,6 +37,7 @@ def get_items(self, tree, test): test: : purpose: parses input rawto extract dojo """ + def get_findings(vuln, test): """ vuln : input vulnerable node @@ -49,7 +50,7 @@ def get_findings(vuln, test): component_name = vuln.get("Library") component_version = vuln.get("Library Version") - title = vulnerability_id + '|' + component_name + title = vulnerability_id + "|" + component_name description = vuln.get("Description") file_path = vuln.get("File Path", "") @@ -65,12 +66,12 @@ def get_findings(vuln, test): severity=severity, cwe=cwe, mitigation=mitigation, - references=vuln.get('references'), + references=vuln.get("references"), file_path=file_path, component_name=component_name, component_version=component_version, - severity_justification=vuln.get('severity_justification'), - dynamic_finding=True + severity_justification=vuln.get("severity_justification"), + dynamic_finding=True, ) if vulnerability_id: finding.unsaved_vulnerability_ids = [vulnerability_id] @@ -82,7 +83,9 @@ def get_findings(vuln, test): if item: hash_key = hashlib.md5( - node.get('Public ID').encode('utf-8') + node.get('Library').encode('utf-8')).hexdigest() + node.get("Public ID").encode("utf-8") + + node.get("Library").encode("utf-8") + ).hexdigest() items[hash_key] = get_findings(node, test) diff --git a/dojo/tools/scout_suite/__init__.py b/dojo/tools/scout_suite/__init__.py index e2b8f0a3a1..237e49125f 100644 --- a/dojo/tools/scout_suite/__init__.py +++ b/dojo/tools/scout_suite/__init__.py @@ -1 +1 @@ -__author__ = 'Hasan Tayyar Besik' +__author__ = "Hasan Tayyar Besik" diff --git a/dojo/tools/scout_suite/parser.py b/dojo/tools/scout_suite/parser.py index d66aca4583..038efd5251 100644 --- a/dojo/tools/scout_suite/parser.py +++ b/dojo/tools/scout_suite/parser.py @@ -1,4 +1,3 @@ - import json import textwrap from datetime import datetime @@ -26,8 +25,8 @@ def get_description_for_scan_types(self, scan_type): def get_tests(self, scan_type, handle): content = handle.read() - if type(content) is bytes: - content = content.decode('utf-8') + if isinstance(content, bytes): + content = content.decode("utf-8") raw_data = content.replace("scoutsuite_results =", "") data = json.loads(raw_data) @@ -35,30 +34,49 @@ def get_tests(self, scan_type, handle): last_run = data["last_run"] test_description = "" - test_description = "%s**Account:** `%s`\n" % (test_description, account_id) - test_description = "%s**Provider:** %s\n" % (test_description, data["provider_name"]) - test_description = "%s**Ruleset:** `%s`\n" % (test_description, last_run["ruleset_name"]) - test_description = "%s**Ruleset Description:** %s\n" % (test_description, last_run["ruleset_about"]) + test_description = "%s**Account:** `%s`\n" % ( + test_description, + account_id, + ) + test_description = "%s**Provider:** %s\n" % ( + test_description, + data["provider_name"], + ) + test_description = "%s**Ruleset:** `%s`\n" % ( + test_description, + last_run["ruleset_name"], + ) + test_description = "%s**Ruleset Description:** %s\n" % ( + test_description, + last_run["ruleset_about"], + ) # Summary of Services - test_description = "%s\n\n Services | Checked Items | Flagged Items | Max Level | Resource Count | Rules Count" % (test_description) - test_description = "%s\n:---|---:|---:|---:|---:|---:" % (test_description) + test_description = ( + "%s\n\n Services | Checked Items | Flagged Items | Max Level | Resource Count | Rules Count" + % (test_description) + ) + test_description = "%s\n:---|---:|---:|---:|---:|---:" % ( + test_description + ) for service, items in list(last_run["summary"].items()): test_description += "\n" - test_description += "|".join([ - service, - str(items["checked_items"]), - str(items["flagged_items"]), - str(items["max_level"]), - str(items["resources_count"]), - str(items["rules_count"]) - ]) + test_description += "|".join( + [ + service, + str(items["checked_items"]), + str(items["flagged_items"]), + str(items["max_level"]), + str(items["resources_count"]), + str(items["rules_count"]), + ] + ) tests = list() test = ParserTest( name=self.ID, type=data["provider_name"], - version=last_run.get('version'), + version=last_run.get("version"), ) test.description = test_description @@ -68,8 +86,8 @@ def get_tests(self, scan_type, handle): def get_findings(self, filename, test): content = filename.read() - if type(content) is bytes: - content = content.decode('utf-8') + if isinstance(content, bytes): + content = content.decode("utf-8") raw_data = content.replace("scoutsuite_results =", "") data = json.loads(raw_data) return self.__get_items(data) @@ -79,7 +97,9 @@ def __get_items(self, data): # get the date of the run last_run_date = None if "time" in data.get("last_run", {}): - last_run_date = datetime.strptime(data["last_run"]["time"][0:10], "%Y-%m-%d").date() + last_run_date = datetime.strptime( + data["last_run"]["time"][0:10], "%Y-%m-%d" + ).date() # Configured Services for service_name in data["services"]: @@ -87,15 +107,23 @@ def __get_items(self, data): for finding_name in service_item.get("findings", []): finding = service_item["findings"][finding_name] for name in finding["items"]: - description_text = finding.get("rationale", "") + "\n**Location:** " + name + "\n\n---\n" - key = name.split('.') + description_text = ( + finding.get("rationale", "") + + "\n**Location:** " + + name + + "\n\n---\n" + ) + key = name.split(".") i = 1 lookup = service_item while i < len(key): if key[i] in lookup: - if (type(lookup[key[i]]) is dict): + if isinstance(lookup[key[i]], dict): lookup = lookup[key[i]] - if (key[i - 1] == "security_groups" or key[i - 1] == "PolicyDocument"): + if ( + key[i - 1] == "security_groups" + or key[i - 1] == "PolicyDocument" + ): break i = i + 1 @@ -104,16 +132,20 @@ def __get_items(self, data): self.item_data = "" find = Finding( - title=textwrap.shorten(finding['description'], 150), + title=textwrap.shorten(finding["description"], 150), date=last_run_date, cwe=1032, # Security Configuration Weaknesses, would like to fine tune description=description_text, severity=self.getCriticalityRating(finding["level"]), mitigation=finding.get("remediation"), - file_path=name, # we use file_path as a hack as there is no notion of "service" in finding today + file_path=name, + # we use file_path as a hack as there is no notion of + # "service" in finding today dynamic_finding=False, static_finding=True, - vuln_id_from_tool=":".join([data["provider_code"], finding_name]), + vuln_id_from_tool=":".join( + [data["provider_code"], finding_name] + ), ) if finding.get("references"): find.references = "\n".join(finding["references"]) @@ -127,8 +159,8 @@ def formatview(self, depth): else: return "" - def recursive_print(self, src, depth=0, key=''): - tabs = lambda n: ' ' * n * 2 + def recursive_print(self, src, depth=0, key=""): + def tabs(n): return " " * n * 2 if isinstance(src, dict): for key, value in src.items(): if isinstance(src, str): @@ -141,9 +173,15 @@ def recursive_print(self, src, depth=0, key=''): if self.pdepth != depth: self.item_data = self.item_data + "\n" if key: - self.item_data = self.item_data + self.formatview(depth) + '**%s:** %s\n\n' % (key.title(), src) + self.item_data = ( + self.item_data + + self.formatview(depth) + + "**%s:** %s\n\n" % (key.title(), src) + ) else: - self.item_data = self.item_data + self.formatview(depth) + '%s\n' % src + self.item_data = ( + self.item_data + self.formatview(depth) + "%s\n" % src + ) self.pdepth = depth # Criticality rating diff --git a/dojo/tools/semgrep/parser.py b/dojo/tools/semgrep/parser.py index e9a04336d4..12a95e0557 100644 --- a/dojo/tools/semgrep/parser.py +++ b/dojo/tools/semgrep/parser.py @@ -4,7 +4,6 @@ class SemgrepParser(object): - def get_scan_types(self): return ["Semgrep JSON Report"] @@ -25,7 +24,7 @@ def get_findings(self, filename, test): title=item["check_id"], severity=self.convert_severity(item["extra"]["severity"]), description=self.get_description(item), - file_path=item['path'], + file_path=item["path"], line=item["start"]["line"], static_finding=True, dynamic_finding=False, @@ -34,26 +33,40 @@ def get_findings(self, filename, test): ) # manage CWE - if 'cwe' in item["extra"]["metadata"]: + if "cwe" in item["extra"]["metadata"]: if isinstance(item["extra"]["metadata"].get("cwe"), list): - finding.cwe = int(item["extra"]["metadata"].get("cwe")[0].partition(':')[0].partition('-')[2]) + finding.cwe = int( + item["extra"]["metadata"] + .get("cwe")[0] + .partition(":")[0] + .partition("-")[2] + ) else: - finding.cwe = int(item["extra"]["metadata"].get("cwe").partition(':')[0].partition('-')[2]) + finding.cwe = int( + item["extra"]["metadata"] + .get("cwe") + .partition(":")[0] + .partition("-")[2] + ) # manage references from metadata - if 'references' in item["extra"]["metadata"]: - finding.references = "\n".join(item["extra"]["metadata"]["references"]) + if "references" in item["extra"]["metadata"]: + finding.references = "\n".join( + item["extra"]["metadata"]["references"] + ) # manage mitigation from metadata - if 'fix' in item["extra"]: + if "fix" in item["extra"]: finding.mitigation = item["extra"]["fix"] - elif 'fix_regex' in item["extra"]: - finding.mitigation = "\n".join([ - "**You can automaticaly apply this regex:**", - "\n```\n", - json.dumps(item["extra"]["fix_regex"]), - "\n```\n", - ]) + elif "fix_regex" in item["extra"]: + finding.mitigation = "\n".join( + [ + "**You can automaticaly apply this regex:**", + "\n```\n", + json.dumps(item["extra"]["fix_regex"]), + "\n```\n", + ] + ) dupe_key = finding.title + finding.file_path + str(finding.line) @@ -76,13 +89,13 @@ def convert_severity(self, val): raise ValueError(f"Unknown value for severity: {val}") def get_description(self, item): - description = '' + description = "" message = item["extra"]["message"] - description += '**Result message:** {}\n'.format(message) + description += "**Result message:** {}\n".format(message) snippet = item["extra"].get("lines") if snippet is not None: - description += '**Snippet:**\n```{}```\n'.format(snippet) + description += "**Snippet:**\n```{}```\n".format(snippet) return description diff --git a/dojo/tools/skf/__init__.py b/dojo/tools/skf/__init__.py index 56a56d5116..ad180af05d 100644 --- a/dojo/tools/skf/__init__.py +++ b/dojo/tools/skf/__init__.py @@ -1 +1 @@ -__author__ = 'martin.marsicano' +__author__ = "martin.marsicano" diff --git a/dojo/tools/skf/parser.py b/dojo/tools/skf/parser.py index c8d15250b1..8200075693 100644 --- a/dojo/tools/skf/parser.py +++ b/dojo/tools/skf/parser.py @@ -7,7 +7,6 @@ class ColumnMappingStrategy(object): - mapped_column = None def __init__(self): @@ -17,27 +16,29 @@ def map_column_value(self, finding, column_value): pass def process_column(self, column_name, column_value, finding): - - if column_name.lower() == self.mapped_column and column_value is not None: + if ( + column_name.lower() == self.mapped_column + and column_value is not None + ): self.map_column_value(finding, column_value) elif self.successor is not None: self.successor.process_column(column_name, column_value, finding) class DateColumnMappingStrategy(ColumnMappingStrategy): - def __init__(self): - self.mapped_column = 'date' + self.mapped_column = "date" super(DateColumnMappingStrategy, self).__init__() def map_column_value(self, finding, column_value): - finding.date = datetime.strptime(column_value, '%Y-%m-%d %H:%M:%S').date() + finding.date = datetime.strptime( + column_value, "%Y-%m-%d %H:%M:%S" + ).date() class TitleColumnMappingStrategy(ColumnMappingStrategy): - def __init__(self): - self.mapped_column = 'title' + self.mapped_column = "title" super(TitleColumnMappingStrategy, self).__init__() def map_column_value(self, finding, column_value): @@ -45,9 +46,8 @@ def map_column_value(self, finding, column_value): class DescriptionColumnMappingStrategy(ColumnMappingStrategy): - def __init__(self): - self.mapped_column = 'description' + self.mapped_column = "description" super(DescriptionColumnMappingStrategy, self).__init__() def map_column_value(self, finding, column_value): @@ -55,9 +55,8 @@ def map_column_value(self, finding, column_value): class MitigationColumnMappingStrategy(ColumnMappingStrategy): - def __init__(self): - self.mapped_column = 'mitigation' + self.mapped_column = "mitigation" super(MitigationColumnMappingStrategy, self).__init__() def map_column_value(self, finding, column_value): @@ -65,7 +64,6 @@ def map_column_value(self, finding, column_value): class SKFParser(object): - def get_scan_types(self): return ["SKF Scan"] @@ -95,18 +93,20 @@ def read_column_names(self, column_names, row): def get_findings(self, filename, test): content = filename.read() - if type(content) is bytes: - content = content.decode('utf-8') + if isinstance(content, bytes): + content = content.decode("utf-8") column_names = dict() chain = self.create_chain() row_number = 0 - reader = csv.reader(io.StringIO(content), delimiter=',', quotechar='"', escapechar='\\') + reader = csv.reader( + io.StringIO(content), delimiter=",", quotechar='"', escapechar="\\" + ) dupes = dict() for row in reader: finding = Finding(test=test) - finding.severity = 'Info' + finding.severity = "Info" if row_number == 0: self.read_column_names(column_names, row) @@ -115,11 +115,21 @@ def get_findings(self, filename, test): column_number = 0 for column in row: - chain.process_column(column_names[column_number], column, finding) + chain.process_column( + column_names[column_number], column, finding + ) column_number += 1 if finding is not None: - key = hashlib.sha256(str(finding.severity + '|' + finding.title + '|' + finding.description).encode('utf-8')).hexdigest() + key = hashlib.sha256( + str( + finding.severity + + "|" + + finding.title + + "|" + + finding.description + ).encode("utf-8") + ).hexdigest() if key not in dupes: dupes[key] = finding diff --git a/dojo/tools/snyk/parser.py b/dojo/tools/snyk/parser.py index 304935ba20..0918fc7f11 100755 --- a/dojo/tools/snyk/parser.py +++ b/dojo/tools/snyk/parser.py @@ -5,7 +5,6 @@ class SnykParser(object): - def get_scan_types(self): return ["Snyk Scan"] @@ -16,7 +15,6 @@ def get_description_for_scan_types(self, scan_type): return "Snyk output file (snyk test --json > snyk.json) can be imported in JSON format." def get_findings(self, json_output, test): - reportTree = self.parse_json(json_output) if isinstance(reportTree, list): @@ -34,58 +32,72 @@ def parse_json(self, json_output): try: data = json_output.read() try: - tree = json.loads(str(data, 'utf-8')) - except: + tree = json.loads(str(data, "utf-8")) + except Exception: tree = json.loads(data) - except: + except Exception: raise ValueError("Invalid format") return tree def get_items(self, tree, test): items = {} - target_file = tree.get('displayTargetFile', None) - upgrades = tree.get('remediation', {}).get('upgrade', None) - if 'vulnerabilities' in tree: - vulnerabilityTree = tree['vulnerabilities'] + target_file = tree.get("displayTargetFile", None) + upgrades = tree.get("remediation", {}).get("upgrade", None) + if "vulnerabilities" in tree: + vulnerabilityTree = tree["vulnerabilities"] for node in vulnerabilityTree: - item = self.get_item(node, test, target_file=target_file, upgrades=upgrades) - unique_key = node['title'] + str(node['packageName'] + str( - node['version']) + str(node['from']) + str(node['id'])) + item = self.get_item( + node, test, target_file=target_file, upgrades=upgrades + ) + unique_key = node["title"] + str( + node["packageName"] + + str(node["version"]) + + str(node["from"]) + + str(node["id"]) + ) items[unique_key] = item return list(items.values()) def get_item(self, vulnerability, test, target_file=None, upgrades=None): - # vulnerable and unaffected versions can be in string format for a single vulnerable version, # or an array for multiple versions depending on the language. - if isinstance(vulnerability['semver']['vulnerable'], list): - vulnerable_versions = ", ".join(vulnerability['semver']['vulnerable']) + if isinstance(vulnerability["semver"]["vulnerable"], list): + vulnerable_versions = ", ".join( + vulnerability["semver"]["vulnerable"] + ) else: - vulnerable_versions = vulnerability['semver']['vulnerable'] + vulnerable_versions = vulnerability["semver"]["vulnerable"] # Following the CVSS Scoring per https://nvd.nist.gov/vuln-metrics/cvss - if 'cvssScore' in vulnerability: - if vulnerability['cvssScore'] is None: - severity = vulnerability['severity'].title() - # If we're dealing with a license finding, there will be no cvssScore - elif vulnerability['cvssScore'] <= 3.9: + if "cvssScore" in vulnerability: + if vulnerability["cvssScore"] is None: + severity = vulnerability["severity"].title() + # If we're dealing with a license finding, there will be no + # cvssScore + elif vulnerability["cvssScore"] <= 3.9: severity = "Low" - elif vulnerability['cvssScore'] >= 4.0 and vulnerability['cvssScore'] <= 6.9: + elif ( + vulnerability["cvssScore"] >= 4.0 + and vulnerability["cvssScore"] <= 6.9 + ): severity = "Medium" - elif vulnerability['cvssScore'] >= 7.0 and vulnerability['cvssScore'] <= 8.9: + elif ( + vulnerability["cvssScore"] >= 7.0 + and vulnerability["cvssScore"] <= 8.9 + ): severity = "High" else: severity = "Critical" else: # Re-assign 'severity' directly - severity = vulnerability['severity'].title() + severity = vulnerability["severity"].title() # Construct "file_path" removing versions - vulnPath = '' - for index, item in enumerate(vulnerability['from']): + vulnPath = "" + for index, item in enumerate(vulnerability["from"]): if index == 0: vulnPath += "@".join(item.split("@")[0:-1]) else: @@ -93,19 +105,28 @@ def get_item(self, vulnerability, test, target_file=None, upgrades=None): # create the finding object finding = Finding( - title=vulnerability['from'][0] + ": " + vulnerability['title'], + title=vulnerability["from"][0] + ": " + vulnerability["title"], test=test, severity=severity, - severity_justification="Issue severity of: **" + severity + "** from a base " + - "CVSS score of: **" + str(vulnerability.get('cvssScore')) + "**", - description="## Component Details\n - **Vulnerable Package**: " + - vulnerability['packageName'] + "\n- **Current Version**: " + str( - vulnerability['version']) + "\n- **Vulnerable Version(s)**: " + - vulnerable_versions + "\n- **Vulnerable Path**: " + " > ".join( - vulnerability['from']) + "\n" + vulnerability['description'], + severity_justification="Issue severity of: **" + + severity + + "** from a base " + + "CVSS score of: **" + + str(vulnerability.get("cvssScore")) + + "**", + description="## Component Details\n - **Vulnerable Package**: " + + vulnerability["packageName"] + + "\n- **Current Version**: " + + str(vulnerability["version"]) + + "\n- **Vulnerable Version(s)**: " + + vulnerable_versions + + "\n- **Vulnerable Path**: " + + " > ".join(vulnerability["from"]) + + "\n" + + vulnerability["description"], mitigation="A fix (if available) will be provided in the description.", - component_name=vulnerability['packageName'], - component_version=vulnerability['version'], + component_name=vulnerability["packageName"], + component_version=vulnerability["version"], false_p=False, duplicate=False, out_of_scope=False, @@ -113,42 +134,47 @@ def get_item(self, vulnerability, test, target_file=None, upgrades=None): static_finding=True, dynamic_finding=False, file_path=vulnPath, - vuln_id_from_tool=vulnerability['id'], + vuln_id_from_tool=vulnerability["id"], ) finding.unsaved_tags = [] # CVSSv3 vector - if vulnerability.get('CVSSv3'): - finding.cvssv3 = CVSS3(vulnerability['CVSSv3']).clean_vector() + if vulnerability.get("CVSSv3"): + finding.cvssv3 = CVSS3(vulnerability["CVSSv3"]).clean_vector() # manage CVE and CWE with idnitifiers - cwe_references = '' - if 'identifiers' in vulnerability: - if 'CVE' in vulnerability['identifiers']: - vulnerability_ids = vulnerability['identifiers']['CVE'] + cwe_references = "" + if "identifiers" in vulnerability: + if "CVE" in vulnerability["identifiers"]: + vulnerability_ids = vulnerability["identifiers"]["CVE"] if vulnerability_ids: finding.unsaved_vulnerability_ids = vulnerability_ids - if 'CWE' in vulnerability['identifiers']: - cwes = vulnerability['identifiers']['CWE'] + if "CWE" in vulnerability["identifiers"]: + cwes = vulnerability["identifiers"]["CWE"] if cwes: - # Per the current json format, if several CWEs, take the first one. + # Per the current json format, if several CWEs, take the + # first one. finding.cwe = int(cwes[0].split("-")[1]) - if len(vulnerability['identifiers']['CVE']) > 1: - cwe_references = ', '.join(cwes) + if len(vulnerability["identifiers"]["CVE"]) > 1: + cwe_references = ", ".join(cwes) else: finding.cwe = 1035 - references = '' - if 'id' in vulnerability: - references = "**SNYK ID**: https://app.snyk.io/vuln/{}\n\n".format(vulnerability['id']) + references = "" + if "id" in vulnerability: + references = "**SNYK ID**: https://app.snyk.io/vuln/{}\n\n".format( + vulnerability["id"] + ) if cwe_references: - references += "Several CWEs were reported: \n\n{}\n".format(cwe_references) + references += "Several CWEs were reported: \n\n{}\n".format( + cwe_references + ) # Append vuln references to references section - for item in vulnerability.get('references', []): - references += "**" + item['title'] + "**: " + item['url'] + "\n" + for item in vulnerability.get("references", []): + references += "**" + item["title"] + "**: " + item["url"] + "\n" finding.references = references @@ -160,21 +186,30 @@ def get_item(self, vulnerability, test, target_file=None, upgrades=None): # Add the remediation substring to mitigation section if (remediation_index != -1) and (references_index != -1): - finding.mitigation = finding.description[remediation_index:references_index] + finding.mitigation = finding.description[ + remediation_index:references_index + ] # Add Target file if supplied if target_file: - finding.unsaved_tags.append('target_file:{}'.format(target_file)) - finding.mitigation += '\nUpgrade Location: {}'.format(target_file) + finding.unsaved_tags.append("target_file:{}".format(target_file)) + finding.mitigation += "\nUpgrade Location: {}".format(target_file) # Add the upgrade libs list to the mitigation section if upgrades: for current_pack_version, meta_dict in upgrades.items(): - upgraded_pack = meta_dict['upgradeTo'] - tertiary_upgrade_list = meta_dict['upgrades'] - if any(lib.split('@')[0] in finding.mitigation for lib in tertiary_upgrade_list): - finding.unsaved_tags.append('upgrade_to:{}'.format(upgraded_pack)) - finding.mitigation += '\nUpgrade from {} to {} to fix this issue, as well as updating the following:\n - '.format(current_pack_version, upgraded_pack) - finding.mitigation += '\n - '.join(tertiary_upgrade_list) + upgraded_pack = meta_dict["upgradeTo"] + tertiary_upgrade_list = meta_dict["upgrades"] + if any( + lib.split("@")[0] in finding.mitigation + for lib in tertiary_upgrade_list + ): + finding.unsaved_tags.append( + "upgrade_to:{}".format(upgraded_pack) + ) + finding.mitigation += "\nUpgrade from {} to {} to fix this issue, as well as updating the following:\n - ".format( + current_pack_version, upgraded_pack + ) + finding.mitigation += "\n - ".join(tertiary_upgrade_list) return finding diff --git a/dojo/tools/solar_appscreener/parser.py b/dojo/tools/solar_appscreener/parser.py index cc21ee81cf..093d476fd2 100644 --- a/dojo/tools/solar_appscreener/parser.py +++ b/dojo/tools/solar_appscreener/parser.py @@ -18,15 +18,15 @@ def get_description_for_scan_types(self, scan_type): return "Solar Appscreener report file can be imported in CSV format from Detailed_Results.csv." def get_findings(self, filename, test): - if filename is None: return () content = filename.read() - if type(content) is bytes: - content = content.decode('utf-8') - reader = csv.DictReader(io.StringIO( - content), delimiter=',', quotechar='"') + if isinstance(content, bytes): + content = content.decode("utf-8") + reader = csv.DictReader( + io.StringIO(content), delimiter=",", quotechar='"' + ) csvarray = [] for row in reader: @@ -35,14 +35,14 @@ def get_findings(self, filename, test): items = list() for row in csvarray: finding = Finding(test=test) - finding.title = row.get('Vulnerability', '') - finding.description = row.get('Description', '') - finding.mitigation = row.get('Recommendations') - finding.references = row.get('Links') - finding.severity = row.get('Severity Level', 'Info') - finding.file_path = row.get('File') - finding.sast_source_file_path = row.get('File') - finding.line = row.get('Line') + finding.title = row.get("Vulnerability", "") + finding.description = row.get("Description", "") + finding.mitigation = row.get("Recommendations") + finding.references = row.get("Links") + finding.severity = row.get("Severity Level", "Info") + finding.file_path = row.get("File") + finding.sast_source_file_path = row.get("File") + finding.line = row.get("Line") if finding.line: if not finding.line.isdigit(): diff --git a/dojo/tools/sonarqube/parser.py b/dojo/tools/sonarqube/parser.py index 24151d401e..d05c70d040 100644 --- a/dojo/tools/sonarqube/parser.py +++ b/dojo/tools/sonarqube/parser.py @@ -10,7 +10,6 @@ class SonarQubeParser(object): - mode = None def set_mode(self, mode): @@ -31,16 +30,23 @@ def get_description_for_scan_types(self, scan_type): def get_findings(self, filename, test): parser = etree.HTMLParser() tree = etree.parse(filename, parser) - if self.mode not in [None, 'detailed']: - raise ValueError("Internal error: Invalid mode " + self.mode + ". Expected: one of None, 'detailed'") + if self.mode not in [None, "detailed"]: + raise ValueError( + "Internal error: Invalid mode " + + self.mode + + ". Expected: one of None, 'detailed'" + ) return self.get_items(tree, test, self.mode) def get_items(self, tree, test, mode): - # Check that there is at least one vulnerability (the vulnerabilities table is absent when no vuln are found) - detailTbody = tree.xpath("/html/body/div[contains(@class,'detail')]/table/tbody") + # Check that there is at least one vulnerability (the vulnerabilities + # table is absent when no vuln are found) + detailTbody = tree.xpath( + "/html/body/div[contains(@class,'detail')]/table/tbody" + ) dupes = dict() - if (len(detailTbody) == 2): + if len(detailTbody) == 2: # First is "Detail of the Detected Vulnerabilities" (not present if no vuln) # Second is "Known Security Rules" vulnerabilities_table = list(detailTbody[0].iter("tr")) @@ -57,18 +63,25 @@ def get_items(self, tree, test, mode): for vuln in vulnerabilities_table: vuln_properties = list(vuln.iter("td")) vuln_rule_name = list(vuln_properties[0].iter("a"))[0].text - vuln_severity = self.convert_sonar_severity(vuln_properties[1].text) + vuln_severity = self.convert_sonar_severity( + vuln_properties[1].text + ) vuln_file_path = vuln_properties[2].text vuln_line = vuln_properties[3].text vuln_title = vuln_properties[4].text vuln_mitigation = vuln_properties[5].text vuln_key = vuln_properties[6].text if vuln_title is None or vuln_mitigation is None: - raise ValueError("Parser ValueError: can't find a title or a mitigation for vulnerability of name " + vuln_rule_name) + raise ValueError( + "Parser ValueError: can't find a title or a mitigation for vulnerability of name " + + vuln_rule_name + ) try: vuln_details = rulesDic[vuln_rule_name] vuln_description = self.get_description(vuln_details) - vuln_references = self.get_references(vuln_rule_name, vuln_details) + vuln_references = self.get_references( + vuln_rule_name, vuln_details + ) vuln_cwe = self.get_cwe(vuln_references) except KeyError: vuln_description = "No description provided" @@ -76,34 +89,73 @@ def get_items(self, tree, test, mode): vuln_cwe = 0 if mode is None: self.process_result_file_name_aggregated( - test, dupes, vuln_title, vuln_cwe, vuln_description, vuln_file_path, vuln_line, vuln_severity, vuln_mitigation, vuln_references) + test, + dupes, + vuln_title, + vuln_cwe, + vuln_description, + vuln_file_path, + vuln_line, + vuln_severity, + vuln_mitigation, + vuln_references, + ) else: self.process_result_detailed( - test, dupes, vuln_title, vuln_cwe, vuln_description, vuln_file_path, vuln_line, vuln_severity, vuln_mitigation, vuln_references, vuln_key) + test, + dupes, + vuln_title, + vuln_cwe, + vuln_description, + vuln_file_path, + vuln_line, + vuln_severity, + vuln_mitigation, + vuln_references, + vuln_key, + ) return list(dupes.values()) # Process one vuln from the report for "SonarQube Scan detailed" # Create the finding and add it into the dupes list - def process_result_detailed(self, test, dupes, vuln_title, vuln_cwe, vuln_description, vuln_file_path, vuln_line, vuln_severity, vuln_mitigation, vuln_references, vuln_key): - # vuln_key is the unique id from tool which means that there is basically no aggregation except real duplicates - aggregateKeys = "{}{}{}{}{}".format(vuln_cwe, vuln_title, vuln_description, vuln_file_path, vuln_key) - find = Finding(title=vuln_title, - cwe=int(vuln_cwe), - description=vuln_description, - file_path=vuln_file_path, - line=vuln_line, - test=test, - severity=vuln_severity, - mitigation=vuln_mitigation, - references=vuln_references, - false_p=False, - duplicate=False, - out_of_scope=False, - mitigated=None, - impact="No impact provided", - static_finding=True, - dynamic_finding=False, - unique_id_from_tool=vuln_key) + def process_result_detailed( + self, + test, + dupes, + vuln_title, + vuln_cwe, + vuln_description, + vuln_file_path, + vuln_line, + vuln_severity, + vuln_mitigation, + vuln_references, + vuln_key, + ): + # vuln_key is the unique id from tool which means that there is + # basically no aggregation except real duplicates + aggregateKeys = "{}{}{}{}{}".format( + vuln_cwe, vuln_title, vuln_description, vuln_file_path, vuln_key + ) + find = Finding( + title=vuln_title, + cwe=int(vuln_cwe), + description=vuln_description, + file_path=vuln_file_path, + line=vuln_line, + test=test, + severity=vuln_severity, + mitigation=vuln_mitigation, + references=vuln_references, + false_p=False, + duplicate=False, + out_of_scope=False, + mitigated=None, + impact="No impact provided", + static_finding=True, + dynamic_finding=False, + unique_id_from_tool=vuln_key, + ) dupes[aggregateKeys] = find # Process one vuln from the report for "SonarQube Scan" @@ -111,33 +163,58 @@ def process_result_detailed(self, test, dupes, vuln_title, vuln_cwe, vuln_descri # For aggregated findings: # - the description is enriched with each finding line number # - the mitigation (message) is concatenated with each finding's mitigation value - def process_result_file_name_aggregated(self, test, dupes, vuln_title, vuln_cwe, vuln_description, vuln_file_path, vuln_line, vuln_severity, vuln_mitigation, vuln_references): - aggregateKeys = "{}{}{}{}".format(vuln_cwe, vuln_title, vuln_description, vuln_file_path) + def process_result_file_name_aggregated( + self, + test, + dupes, + vuln_title, + vuln_cwe, + vuln_description, + vuln_file_path, + vuln_line, + vuln_severity, + vuln_mitigation, + vuln_references, + ): + aggregateKeys = "{}{}{}{}".format( + vuln_cwe, vuln_title, vuln_description, vuln_file_path + ) descriptionOneOccurence = "Line: {}".format(vuln_line) if aggregateKeys not in dupes: - find = Finding(title=vuln_title, - cwe=int(vuln_cwe), - description=vuln_description + '\n\n-----\nOccurences:\n' + descriptionOneOccurence, - file_path=vuln_file_path, - # No line number because we have aggregated different vulnerabilities that may have different line numbers - test=test, - severity=vuln_severity, - mitigation=vuln_mitigation, - references=vuln_references, - false_p=False, - duplicate=False, - out_of_scope=False, - mitigated=None, - impact="No impact provided", - static_finding=True, - dynamic_finding=False, - nb_occurences=1) + find = Finding( + title=vuln_title, + cwe=int(vuln_cwe), + description=vuln_description + + "\n\n-----\nOccurences:\n" + + descriptionOneOccurence, + file_path=vuln_file_path, + # No line number because we have aggregated different + # vulnerabilities that may have different line numbers + test=test, + severity=vuln_severity, + mitigation=vuln_mitigation, + references=vuln_references, + false_p=False, + duplicate=False, + out_of_scope=False, + mitigated=None, + impact="No impact provided", + static_finding=True, + dynamic_finding=False, + nb_occurences=1, + ) dupes[aggregateKeys] = find else: - # We have already created a finding for this aggregate: updates the description, nb_occurences and mitigation (message field in the report which may vary for each vuln) + # We have already created a finding for this aggregate: updates the + # description, nb_occurences and mitigation (message field in the + # report which may vary for each vuln) find = dupes[aggregateKeys] - find.description = "{}\n{}".format(find.description, descriptionOneOccurence) - find.mitigation = "{}\n______\n{}".format(find.mitigation, vuln_mitigation) + find.description = "{}\n{}".format( + find.description, descriptionOneOccurence + ) + find.mitigation = "{}\n______\n{}".format( + find.mitigation, vuln_mitigation + ) find.nb_occurences = find.nb_occurences + 1 def convert_sonar_severity(self, sonar_severity): @@ -154,7 +231,9 @@ def convert_sonar_severity(self, sonar_severity): return "Info" def get_description(self, vuln_details): - rule_description = etree.tostring(vuln_details, pretty_print=True).decode('utf-8', errors='replace') + rule_description = etree.tostring( + vuln_details, pretty_print=True + ).decode("utf-8", errors="replace") rule_description = rule_description.split("

    See", 1)[0] rule_description = (str(rule_description)).replace("

    ", "**") rule_description = (str(rule_description)).replace("

    ", "**") diff --git a/dojo/tools/sonatype/parser.py b/dojo/tools/sonatype/parser.py index b7b88878e1..0e3934f913 100644 --- a/dojo/tools/sonatype/parser.py +++ b/dojo/tools/sonatype/parser.py @@ -25,15 +25,15 @@ def get_findings(self, json_output, test): def get_items(self, tree, test): items = {} - if 'components' in tree: - vulnerability_tree = tree['components'] + if "components" in tree: + vulnerability_tree = tree["components"] for node in vulnerability_tree: item = get_item(node, test) if item is None: continue # TODO - unique_key = node['hash'] + unique_key = node["hash"] items[unique_key] = item return list(items.values()) @@ -41,12 +41,15 @@ def get_items(self, tree, test): def get_item(vulnerability, test): # Following the CVSS Scoring per https://nvd.nist.gov/vuln-metrics/cvss - if vulnerability['securityData'] is not None and len(vulnerability['securityData']['securityIssues']) >= 1: + if ( + vulnerability["securityData"] is not None + and len(vulnerability["securityData"]["securityIssues"]) >= 1 + ): # there can be nothing in the array, or securityData can be null altogether. If the latter, well, nothing much to do? # issues is an array, and there can be 2+ of them, e.g. a cve and a sonatype entry or two cves # Given the current Finding class, if a cve, will be the main. If not a cve, then CVE ref will remain null due to regex. # Others go to references. - main_finding = vulnerability['securityData']['securityIssues'][0] + main_finding = vulnerability["securityData"]["securityIssues"][0] if main_finding.get("source") == "cve": vulnerability_id = main_finding.get("reference") @@ -54,56 +57,75 @@ def get_item(vulnerability, test): # if sonatype of else, will not match Finding model today vulnerability_id = None - if main_finding['severity'] <= 3.9: + if main_finding["severity"] <= 3.9: severity = "Low" - elif main_finding['severity'] > 4.0 and main_finding['severity'] <= 6.9: + elif ( + main_finding["severity"] > 4.0 and main_finding["severity"] <= 6.9 + ): severity = "Medium" - elif main_finding['severity'] and main_finding['severity'] <= 8.9: + elif main_finding["severity"] and main_finding["severity"] <= 8.9: severity = "High" else: severity = "Critical" references = [] - if len(vulnerability['securityData']['securityIssues']) > 1: - for additional_issue in vulnerability['securityData']['securityIssues']: - references.append("{}, {}, {}, {}, {} ".format( - additional_issue.get("reference"), - additional_issue.get("status"), - additional_issue.get("severity"), - additional_issue.get("threatCategory"), - additional_issue.get("url")) + if len(vulnerability["securityData"]["securityIssues"]) > 1: + for additional_issue in vulnerability["securityData"][ + "securityIssues" + ]: + references.append( + "{}, {}, {}, {}, {} ".format( + additional_issue.get("reference"), + additional_issue.get("status"), + additional_issue.get("severity"), + additional_issue.get("threatCategory"), + additional_issue.get("url"), + ) ) - component_id = '' - if 'componentIdentifier' in vulnerability: - if vulnerability['componentIdentifier']['format'] == "maven": + component_id = "" + if "componentIdentifier" in vulnerability: + if vulnerability["componentIdentifier"]["format"] == "maven": component_id = "{} {} {}".format( - vulnerability['componentIdentifier']['coordinates']['artifactId'], - vulnerability['componentIdentifier']['coordinates']['groupId'], - vulnerability['componentIdentifier']['coordinates']['version'] + vulnerability["componentIdentifier"]["coordinates"][ + "artifactId" + ], + vulnerability["componentIdentifier"]["coordinates"][ + "groupId" + ], + vulnerability["componentIdentifier"]["coordinates"][ + "version" + ], ) - elif vulnerability['componentIdentifier']['format'] == "a-name": + elif vulnerability["componentIdentifier"]["format"] == "a-name": component_id = "{} {} {}".format( - vulnerability['componentIdentifier']['coordinates']['name'], - vulnerability['componentIdentifier']['coordinates']['qualifier'], - vulnerability['componentIdentifier']['coordinates']['version'] + vulnerability["componentIdentifier"]["coordinates"][ + "name" + ], + vulnerability["componentIdentifier"]["coordinates"][ + "qualifier" + ], + vulnerability["componentIdentifier"]["coordinates"][ + "version" + ], ) finding_title = "{} - {}".format( - main_finding['reference'], - component_id + main_finding["reference"], component_id ) - finding_description = "Hash {}\n\n".format(vulnerability['hash']) + finding_description = "Hash {}\n\n".format(vulnerability["hash"]) finding_description += component_id - finding_description += "\n\nPlease check the CVE details of this finding for a detailed description. The details of issues beginning with \"SONATYPE-\" can be found by contacting Sonatype, Inc. or through mechanisms they have provided in their product." - threat_category = main_finding.get("threatCategory", "CVSS vector not provided. ").title() - status = main_finding['status'] - score = main_finding.get('severity', "No CVSS score yet.") - if 'pathnames' in vulnerability: - file_path = ' '.join(vulnerability['pathnames'])[:1000] + finding_description += '\n\nPlease check the CVE details of this finding for a detailed description. The details of issues beginning with "SONATYPE-" can be found by contacting Sonatype, Inc. or through mechanisms they have provided in their product.' + threat_category = main_finding.get( + "threatCategory", "CVSS vector not provided. " + ).title() + status = main_finding["status"] + main_finding.get("severity", "No CVSS score yet.") + if "pathnames" in vulnerability: + file_path = " ".join(vulnerability["pathnames"])[:1000] else: - file_path = '' + file_path = "" # create the finding object finding = Finding( @@ -112,7 +134,9 @@ def get_item(vulnerability, test): severity=severity, description=finding_description, mitigation=status, - references="{}\n{}\n".format(main_finding['url'], "\n".join(references)), + references="{}\n{}\n".format( + main_finding["url"], "\n".join(references) + ), false_p=False, duplicate=False, out_of_scope=False, diff --git a/dojo/tools/spotbugs/parser.py b/dojo/tools/spotbugs/parser.py index 44b5dbb352..a9a0d23f03 100644 --- a/dojo/tools/spotbugs/parser.py +++ b/dojo/tools/spotbugs/parser.py @@ -21,11 +21,7 @@ def get_findings(self, filename, test): reference_patterns = dict() dupes = dict() - SEVERITY = { - '1': 'High', - '2': 'Medium', - '3': 'Low' - } + SEVERITY = {"1": "High", "2": "Medium", "3": "Low"} tree = ET.parse(filename) root = tree.getroot() @@ -34,95 +30,106 @@ def get_findings(self, filename, test): html_parser.ignore_links = False # Parse tags - for pattern in root.findall('BugPattern'): + for pattern in root.findall("BugPattern"): # Parse ...
    html content html_text = html_parser.handle( - ET.tostring( - pattern.find('Details'), - method='text' - ).decode('utf-8') + ET.tostring(pattern.find("Details"), method="text").decode( + "utf-8" + ) ) # Parse mitigation from html - mitigation = '' + mitigation = "" i = 0 for line in html_text.splitlines(): i += 1 # Break loop when references are reached - if 'Reference' in line: + if "Reference" in line: break - # Add a string before the code indicating that it's just an example, NOT the actual scanned code - if ('Vulnerable Code:' in line) or ('Insecure configuration:' in line) or ('Code at risk:' in line): - mitigation += '\n\n#### Example\n' + # Add a string before the code indicating that it's just an + # example, NOT the actual scanned code + if ( + ("Vulnerable Code:" in line) + or ("Insecure configuration:" in line) + or ("Code at risk:" in line) + ): + mitigation += "\n\n#### Example\n" # Add line to mitigation - mitigation += line + '\n' + mitigation += line + "\n" # Add mitigations to dictionary - mitigation_patterns[pattern.get('type')] = mitigation + mitigation_patterns[pattern.get("type")] = mitigation # Parse references from html - reference = '' + reference = "" # Sometimes there's a breakline in the middle of the reference, # so the splitlines method ends up breaking it in two. # We solve this problem by joining all references and adding breaklines with regex. # Start loop where the previous loop ended for line in html_text.splitlines()[i:]: # Concatenate all references in one big string - reference += line + ' ' + reference += line + " " # Add breakline between each reference # regex: turns ') [' into ')\n[' # ')': reference ends # '[': reference starts - reference = re.sub(r'(?<=\))(.*?)(?=\[)', '\n', reference) + reference = re.sub(r"(?<=\))(.*?)(?=\[)", "\n", reference) # Add references to dictionary - reference_patterns[pattern.get('type')] = reference + reference_patterns[pattern.get("type")] = reference # Parse tags - for bug in root.findall('BugInstance'): - desc = '' + for bug in root.findall("BugInstance"): + desc = "" for message in bug.itertext(): - desc += message + '\n' + desc += message + "\n" - shortmessage_extract = bug.find('ShortMessage') + shortmessage_extract = bug.find("ShortMessage") if shortmessage_extract is not None: title = shortmessage_extract.text else: - title = bug.get('type') - severity = SEVERITY[bug.get('priority')] + title = bug.get("type") + severity = SEVERITY[bug.get("priority")] description = desc finding = Finding( title=title, - cwe=int(bug.get('cweid', default=0)), + cwe=int(bug.get("cweid", default=0)), severity=severity, description=description, test=test, static_finding=True, dynamic_finding=False, - nb_occurences=1 + nb_occurences=1, ) # find the source line and file on the buginstance - source_extract = bug.find('SourceLine') + source_extract = bug.find("SourceLine") if source_extract is not None: finding.file_path = source_extract.get("sourcepath") finding.sast_source_object = source_extract.get("classname") - finding.sast_source_file_path = source_extract.get("sourcepath") - if 'start' in source_extract.attrib and source_extract.get("start").isdigit(): + finding.sast_source_file_path = source_extract.get( + "sourcepath" + ) + if ( + "start" in source_extract.attrib + and source_extract.get("start").isdigit() + ): finding.line = int(source_extract.get("start")) finding.sast_source_line = int(source_extract.get("start")) - if bug.get('type') in mitigation_patterns: - finding.mitigation = mitigation_patterns[bug.get('type')] - finding.references = reference_patterns[bug.get('type')] + if bug.get("type") in mitigation_patterns: + finding.mitigation = mitigation_patterns[bug.get("type")] + finding.references = reference_patterns[bug.get("type")] - if 'instanceHash' in bug.attrib: - dupe_key = bug.get('instanceHash') + if "instanceHash" in bug.attrib: + dupe_key = bug.get("instanceHash") else: - dupe_key = "|".join([ - 'no_instance_hash', - title, - description, - ]) + dupe_key = "|".join( + [ + "no_instance_hash", + title, + description, + ] + ) if dupe_key in dupes: find = dupes[dupe_key] diff --git a/dojo/tools/ssl_labs/__init__.py b/dojo/tools/ssl_labs/__init__.py index 16eb15eddb..20f78a98a5 100644 --- a/dojo/tools/ssl_labs/__init__.py +++ b/dojo/tools/ssl_labs/__init__.py @@ -1 +1 @@ -__author__ = 'Aaron Weaver' +__author__ = "Aaron Weaver" diff --git a/dojo/tools/ssl_labs/parser.py b/dojo/tools/ssl_labs/parser.py index 31e898c84b..5c99ef03e3 100644 --- a/dojo/tools/ssl_labs/parser.py +++ b/dojo/tools/ssl_labs/parser.py @@ -1,4 +1,4 @@ -__author__ = 'Aaron Weaver' +__author__ = "Aaron Weaver" import json from datetime import datetime @@ -7,7 +7,6 @@ class SslLabsParser(object): - def get_scan_types(self): return ["SSL Labs Scan"] @@ -20,8 +19,8 @@ def get_description_for_scan_types(self, scan_type): def get_findings(self, filename, test): tree = filename.read() try: - data = json.loads(str(tree, 'utf-8')) - except: + data = json.loads(str(tree, "utf-8")) + except Exception: data = json.loads(tree) find_date = datetime.now() @@ -36,15 +35,10 @@ def get_findings(self, filename, test): if "endpoints" in host: ssl_endpoints = host["endpoints"] for endpoints in ssl_endpoints: - categories = '' - language = '' - mitigation = 'N/A' - impact = 'N/A' - references = '' - findingdetail = '' - title = '' - group = '' - status = '' + mitigation = "N/A" + impact = "N/A" + references = "" + title = "" port = None protocol = None ipAddress = None @@ -70,18 +64,41 @@ def get_findings(self, filename, test): cert = "" if "cert" in endpoints["details"]: cert = endpoints["details"]["cert"] - description = "%sCertifcate Subject: %s\n" % (description, cert["subject"]) - description = "%sIssuer Subject: %s\n" % (description, cert["issuerSubject"]) - description = "%sSignature Algorithm: %s\n" % (description, cert["sigAlg"]) + description = "%sCertifcate Subject: %s\n" % ( + description, + cert["subject"], + ) + description = "%sIssuer Subject: %s\n" % ( + description, + cert["issuerSubject"], + ) + description = "%sSignature Algorithm: %s\n" % ( + description, + cert["sigAlg"], + ) else: for cert in host["certs"]: - description = "%sCertifcate Subject: %s\n" % (description, cert["subject"]) - description = "%sIssuer Subject: %s\n" % (description, cert["issuerSubject"]) - description = "%sSignature Algorithm: %s\n" % (description, cert["sigAlg"]) + description = "%sCertifcate Subject: %s\n" % ( + description, + cert["subject"], + ) + description = "%sIssuer Subject: %s\n" % ( + description, + cert["issuerSubject"], + ) + description = "%sSignature Algorithm: %s\n" % ( + description, + cert["sigAlg"], + ) protocol_str = "" for protocol_data in endpoints["details"]["protocols"]: - protocol_str += protocol_data["name"] + " " + protocol_data["version"] + "\n" + protocol_str += ( + protocol_data["name"] + + " " + + protocol_data["version"] + + "\n" + ) if protocol_str: description += "\nProtocols:\n" + protocol_str @@ -95,72 +112,188 @@ def get_findings(self, filename, test): elif "suites" in endpoints["details"]: for item in endpoints["details"]["suites"]: for suites in item["list"]: - suite_info = suite_info + self.suite_data(suites) - except: + suite_info = suite_info + self.suite_data( + suites + ) + except Exception: suite_info = "Not provided." + "\n\n" description += suite_info description += "Additional Information:\n\n" if "serverSignature" in endpoints["details"]: - description += "serverSignature: " + endpoints["details"]["serverSignature"] + "\n" + description += ( + "serverSignature: " + + endpoints["details"]["serverSignature"] + + "\n" + ) if "prefixDelegation" in endpoints["details"]: - description += "prefixDelegation: " + str(endpoints["details"]["prefixDelegation"]) + "\n" + description += ( + "prefixDelegation: " + + str(endpoints["details"]["prefixDelegation"]) + + "\n" + ) if "nonPrefixDelegation" in endpoints["details"]: - description += "nonPrefixDelegation: " + str(endpoints["details"]["nonPrefixDelegation"]) + "\n" + description += ( + "nonPrefixDelegation: " + + str(endpoints["details"]["nonPrefixDelegation"]) + + "\n" + ) if "vulnBeast" in endpoints["details"]: - description += "vulnBeast: " + str(endpoints["details"]["vulnBeast"]) + "\n" + description += ( + "vulnBeast: " + + str(endpoints["details"]["vulnBeast"]) + + "\n" + ) if "renegSupport" in endpoints["details"]: - description += "renegSupport: " + str(endpoints["details"]["renegSupport"]) + "\n" + description += ( + "renegSupport: " + + str(endpoints["details"]["renegSupport"]) + + "\n" + ) if "stsStatus" in endpoints["details"]: - description += "stsStatus: " + endpoints["details"]["stsStatus"] + "\n" + description += ( + "stsStatus: " + + endpoints["details"]["stsStatus"] + + "\n" + ) if "stsResponseHeader" in endpoints["details"]: - description += "stsResponseHeader: " + endpoints["details"]["stsResponseHeader"] + "\n" + description += ( + "stsResponseHeader: " + + endpoints["details"]["stsResponseHeader"] + + "\n" + ) if "stsPreload" in endpoints["details"]: - description += "stsPreload: " + str(endpoints["details"]["stsPreload"]) + "\n" + description += ( + "stsPreload: " + + str(endpoints["details"]["stsPreload"]) + + "\n" + ) if "sessionResumption" in endpoints["details"]: - description += "sessionResumption: " + str(endpoints["details"]["sessionResumption"]) + "\n" + description += ( + "sessionResumption: " + + str(endpoints["details"]["sessionResumption"]) + + "\n" + ) if "compressionMethods" in endpoints["details"]: - description += "compressionMethods: " + str(endpoints["details"]["compressionMethods"]) + "\n" + description += ( + "compressionMethods: " + + str(endpoints["details"]["compressionMethods"]) + + "\n" + ) if "supportsNpn" in endpoints["details"]: - description += "supportsNpn: " + str(endpoints["details"]["supportsNpn"]) + "\n" + description += ( + "supportsNpn: " + + str(endpoints["details"]["supportsNpn"]) + + "\n" + ) if "supportsAlpn" in endpoints["details"]: - description += "supportsAlpn: " + str(endpoints["details"]["supportsAlpn"]) + "\n" + description += ( + "supportsAlpn: " + + str(endpoints["details"]["supportsAlpn"]) + + "\n" + ) if "sessionTickets" in endpoints["details"]: - description += "sessionTickets: " + str(endpoints["details"]["sessionTickets"]) + "\n" + description += ( + "sessionTickets: " + + str(endpoints["details"]["sessionTickets"]) + + "\n" + ) if "ocspStapling" in endpoints["details"]: - description += "ocspStapling: " + str(endpoints["details"]["ocspStapling"]) + "\n" + description += ( + "ocspStapling: " + + str(endpoints["details"]["ocspStapling"]) + + "\n" + ) if "sniRequired" in endpoints["details"]: - description += "sniRequired: " + str(endpoints["details"]["sniRequired"]) + "\n" + description += ( + "sniRequired: " + + str(endpoints["details"]["sniRequired"]) + + "\n" + ) if "httpStatusCode" in endpoints["details"]: - description += "httpStatusCode: " + str(endpoints["details"]["httpStatusCode"]) + "\n" + description += ( + "httpStatusCode: " + + str(endpoints["details"]["httpStatusCode"]) + + "\n" + ) if "supportsRc4" in endpoints["details"]: - description += "supportsRc4: " + str(endpoints["details"]["supportsRc4"]) + "\n" + description += ( + "supportsRc4: " + + str(endpoints["details"]["supportsRc4"]) + + "\n" + ) if "rc4WithModern" in endpoints["details"]: - description += "rc4WithModern: " + str(endpoints["details"]["rc4WithModern"]) + "\n" + description += ( + "rc4WithModern: " + + str(endpoints["details"]["rc4WithModern"]) + + "\n" + ) if "forwardSecrecy" in endpoints["details"]: - description += "forwardSecrecy: " + str(endpoints["details"]["forwardSecrecy"]) + "\n" + description += ( + "forwardSecrecy: " + + str(endpoints["details"]["forwardSecrecy"]) + + "\n" + ) if "protocolIntolerance" in endpoints["details"]: - description += "protocolIntolerance: " + str(endpoints["details"]["protocolIntolerance"]) + "\n" + description += ( + "protocolIntolerance: " + + str(endpoints["details"]["protocolIntolerance"]) + + "\n" + ) if "miscIntolerance" in endpoints["details"]: - description += "miscIntolerance: " + str(endpoints["details"]["miscIntolerance"]) + "\n" + description += ( + "miscIntolerance: " + + str(endpoints["details"]["miscIntolerance"]) + + "\n" + ) if "heartbleed" in endpoints["details"]: - description += "heartbleed: " + str(endpoints["details"]["heartbleed"]) + "\n" + description += ( + "heartbleed: " + + str(endpoints["details"]["heartbleed"]) + + "\n" + ) if "heartbeat" in endpoints["details"]: - description += "heartbeat: " + str(endpoints["details"]["heartbeat"]) + "\n" + description += ( + "heartbeat: " + + str(endpoints["details"]["heartbeat"]) + + "\n" + ) if "openSslCcs" in endpoints["details"]: - description += "openSslCcs: " + str(endpoints["details"]["openSslCcs"]) + "\n" + description += ( + "openSslCcs: " + + str(endpoints["details"]["openSslCcs"]) + + "\n" + ) if "openSSLLuckyMinus20" in endpoints["details"]: - description += "openSSLLuckyMinus20: " + str(endpoints["details"]["openSSLLuckyMinus20"]) + "\n" + description += ( + "openSSLLuckyMinus20: " + + str(endpoints["details"]["openSSLLuckyMinus20"]) + + "\n" + ) if "poodle" in endpoints["details"]: - description += "poodle: " + str(endpoints["details"]["poodle"]) + "\n" + description += ( + "poodle: " + str(endpoints["details"]["poodle"]) + "\n" + ) if "poodleTls" in endpoints["details"]: - description += "poodleTls: " + str(endpoints["details"]["poodleTls"]) + "\n" + description += ( + "poodleTls: " + + str(endpoints["details"]["poodleTls"]) + + "\n" + ) if "fallbackScsv" in endpoints["details"]: - description += "fallbackScsv: " + str(endpoints["details"]["fallbackScsv"]) + "\n" + description += ( + "fallbackScsv: " + + str(endpoints["details"]["fallbackScsv"]) + + "\n" + ) if "freak" in endpoints["details"]: - description += "freak: " + str(endpoints["details"]["freak"]) + "\n" + description += ( + "freak: " + str(endpoints["details"]["freak"]) + "\n" + ) if "hasSct" in endpoints["details"]: - description += "hasSct: " + str(endpoints["details"]["hasSct"]) + "\n" + description += ( + "hasSct: " + str(endpoints["details"]["hasSct"]) + "\n" + ) """ cName = "" @@ -174,7 +307,11 @@ def get_findings(self, filename, test): protoName = "" for protocols in endpoints["details"]["protocols"]: - protoName = "%s %s %s\n" % (protoName, protocols["name"], protocols["version"]) + protoName = "%s %s %s\n" % ( + protoName, + protocols["name"], + protocols["version"], + ) dupe_key = hostName + grade @@ -183,25 +320,33 @@ def get_findings(self, filename, test): if description is not None: find.description += description else: - find = Finding(title=title, - cwe=310, # Cryptographic Issues - test=test, - description=description, - severity=sev, - mitigation=mitigation, - impact=impact, - references=references, - date=find_date, - dynamic_finding=True) + find = Finding( + title=title, + cwe=310, # Cryptographic Issues + test=test, + description=description, + severity=sev, + mitigation=mitigation, + impact=impact, + references=references, + date=find_date, + dynamic_finding=True, + ) dupes[dupe_key] = find find.unsaved_endpoints = list() - find.unsaved_endpoints.append(Endpoint(host=hostName, port=port, protocol=protocol)) + find.unsaved_endpoints.append( + Endpoint(host=hostName, port=port, protocol=protocol) + ) if ipAddress: - find.unsaved_endpoints.append(Endpoint(host=ipAddress, port=port, protocol=protocol)) + find.unsaved_endpoints.append( + Endpoint(host=ipAddress, port=port, protocol=protocol) + ) if endpoints["details"]["httpTransactions"]: for url in endpoints["details"]["httpTransactions"]: - find.unsaved_endpoints.append(Endpoint.from_uri(url['requestUrl'])) + find.unsaved_endpoints.append( + Endpoint.from_uri(url["requestUrl"]) + ) return list(dupes.values()) @@ -225,7 +370,9 @@ def getCriticalityRating(self, rating): def suite_data(self, suites): suite_info = "" suite_info += suites["name"] + "\n" - suite_info += "Cipher Strength: " + str(suites["cipherStrength"]) + "\n" + suite_info += ( + "Cipher Strength: " + str(suites["cipherStrength"]) + "\n" + ) if "ecdhBits" in suites: suite_info += "ecdhBits: " + str(suites["ecdhBits"]) + "\n" if "ecdhStrength" in suites: diff --git a/dojo/tools/sslscan/parser.py b/dojo/tools/sslscan/parser.py index 679a13cb8e..f5166c407d 100644 --- a/dojo/tools/sslscan/parser.py +++ b/dojo/tools/sslscan/parser.py @@ -5,11 +5,10 @@ from dojo.models import Endpoint, Finding -__author__ = 'dr3dd589' +__author__ = "dr3dd589" class SslscanParser(object): - def get_scan_types(self): return ["Sslscan"] @@ -23,8 +22,10 @@ def get_findings(self, file, test): tree = ET.parse(file) # get root of tree. root = tree.getroot() - if 'document' not in root.tag: - raise NamespaceErr("This doesn't seem to be a valid sslscan xml file.") + if "document" not in root.tag: + raise NamespaceErr( + "This doesn't seem to be a valid sslscan xml file." + ) dupes = dict() for ssltest in root: for target in ssltest: @@ -32,21 +33,43 @@ def get_findings(self, file, test): severity = "" description = "" severity = "Info" - host = ssltest.attrib['host'] - port = int(ssltest.attrib['port']) - if target.tag == "heartbleed" and target.attrib['vulnerable'] == '1': - title = "heartbleed" + " | " + target.attrib['sslversion'] - description = "**heartbleed** :" + "\n\n" + \ - "**sslversion** : " + target.attrib['sslversion'] + "\n" - if target.tag == "cipher" and target.attrib['strength'] not in ['acceptable', 'strong']: - title = "cipher" + " | " + target.attrib['sslversion'] - description = "**Cipher** : " + target.attrib['cipher'] + "\n\n" + \ - "**Status** : " + target.attrib['status'] + "\n\n" + \ - "**strength** : " + target.attrib['strength'] + "\n\n" + \ - "**sslversion** : " + target.attrib['sslversion'] + "\n" + host = ssltest.attrib["host"] + port = int(ssltest.attrib["port"]) + if ( + target.tag == "heartbleed" + and target.attrib["vulnerable"] == "1" + ): + title = "heartbleed" + " | " + target.attrib["sslversion"] + description = ( + "**heartbleed** :" + + "\n\n" + + "**sslversion** : " + + target.attrib["sslversion"] + + "\n" + ) + if target.tag == "cipher" and target.attrib[ + "strength" + ] not in ["acceptable", "strong"]: + title = "cipher" + " | " + target.attrib["sslversion"] + description = ( + "**Cipher** : " + + target.attrib["cipher"] + + "\n\n" + + "**Status** : " + + target.attrib["status"] + + "\n\n" + + "**strength** : " + + target.attrib["strength"] + + "\n\n" + + "**sslversion** : " + + target.attrib["sslversion"] + + "\n" + ) if title and description is not None: - dupe_key = hashlib.sha256(str(description + title).encode('utf-8')).hexdigest() + dupe_key = hashlib.sha256( + str(description + title).encode("utf-8") + ).hexdigest() if dupe_key in dupes: finding = dupes[dupe_key] if finding.references: @@ -60,16 +83,15 @@ def get_findings(self, file, test): test=test, description=description, severity=severity, - dynamic_finding=True,) + dynamic_finding=True, + ) finding.unsaved_endpoints = list() dupes[dupe_key] = finding if host: - if '://' in host: + if "://" in host: endpoint = Endpoint.from_uri(host) else: - endpoint = Endpoint( - host=host, - port=port) + endpoint = Endpoint(host=host, port=port) finding.unsaved_endpoints.append(endpoint) return dupes.values() diff --git a/dojo/tools/sslyze/parser.py b/dojo/tools/sslyze/parser.py index 8143529642..4f557d887a 100644 --- a/dojo/tools/sslyze/parser.py +++ b/dojo/tools/sslyze/parser.py @@ -17,13 +17,12 @@ def get_description_for_scan_types(self, scan_type): return "Import XML report of SSLyze version 2 scan." def get_findings(self, filename, test): - if filename is None: return list() - if filename.name.lower().endswith('.xml'): + if filename.name.lower().endswith(".xml"): return SSLyzeXMLParser().get_findings(filename, test) - elif filename.name.lower().endswith('.json'): + elif filename.name.lower().endswith(".json"): return SSLyzeJSONParser().get_findings(filename, test) else: - raise ValueError('Unknown File Format') + raise ValueError("Unknown File Format") diff --git a/dojo/tools/sslyze/parser_json.py b/dojo/tools/sslyze/parser_json.py index ee73120a5f..112695fb07 100644 --- a/dojo/tools/sslyze/parser_json.py +++ b/dojo/tools/sslyze/parser_json.py @@ -5,73 +5,71 @@ # FIXME discuss this list as maintenance subject # Recommended cipher suites according to German BSI as of 2020 TLS12_RECOMMENDED_CIPHERS = [ - 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256', - 'TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384', - 'TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256', - 'TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384', - 'TLS_ECDHE_ECDSA_WITH_AES_128_CCM', - 'TLS_ECDHE_ECDSA_WITH_AES_256_CCM', - 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256', - 'TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384', - 'TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256', - 'TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384', - 'TLS_DHE_DSS_WITH_AES_128_CBC_SHA256', - 'TLS_DHE_DSS_WITH_AES_256_CBC_', - 'TLS_DHE_DSS_WITH_AES_128_GCM_SHA256', - 'TLS_DHE_DSS_WITH_AES_256_GCM_SHA384', - 'TLS_DHE_RSA_WITH_AES_128_CBC_SHA256', - 'TLS_DHE_RSA_WITH_AES_256_CBC_SHA256', - 'TLS_DHE_RSA_WITH_AES_128_GCM_SHA256', - 'TLS_DHE_RSA_WITH_AES_256_GCM_SHA384', - 'TLS_DHE_RSA_WITH_AES_128_CCM', - 'TLS_DHE_RSA_WITH_AES_256_CCM', - 'TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA256', - 'TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA384', - 'TLS_ECDH_ECDSA_WITH_AES_128_GCM_SHA256', - 'TLS_ECDH_ECDSA_WITH_AES_256_GCM_SHA384', - 'TLS_ECDH_RSA_WITH_AES_128_CBC_SHA256', - 'TLS_ECDH_RSA_WITH_AES_256_CBC_SHA384', - 'TLS_ECDH_RSA_WITH_AES_128_GCM_SHA256', - 'TLS_ECDH_RSA_WITH_AES_256_GCM_SHA384', - 'TLS_DH_DSS_WITH_AES_128_CBC_SHA256', - 'TLS_DH_DSS_WITH_AES_256_CBC_SHA256', - 'TLS_DH_DSS_WITH_AES_128_GCM_SHA256', - 'TLS_DH_DSS_WITH_AES_256_GCM_SHA384', - 'TLS_DH_RSA_WITH_AES_128_CBC_SHA256', - 'TLS_DH_RSA_WITH_AES_256_CBC_SHA256', - 'TLS_DH_RSA_WITH_AES_128_GCM_SHA256', - 'TLS_DH_RSA_WITH_AES_256_GCM_SHA384', - 'TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256', - 'TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA384', - 'TLS_ECDHE_PSK_WITH_AES_128_GCM_SHA256', - 'TLS_ECDHE_PSK_WITH_AES_256_GCM_SHA384', - 'TLS_ECDHE_PSK_WITH_AES_128_CCM_SHA256', - 'TLS_DHE_PSK_WITH_AES_128_CBC_SHA256', - 'TLS_DHE_PSK_WITH_AES_256_CBC_SHA384', - 'TLS_DHE_PSK_WITH_AES_128_GCM_SHA256', - 'TLS_DHE_PSK_WITH_AES_256_GCM_SHA384', - 'TLS_DHE_PSK_WITH_AES_128_CCM', - 'TLS_DHE_PSK_WITH_AES_256_CCM', - 'TLS_RSA_PSK_WITH_AES_128_CBC_SHA256', - 'TLS_RSA_PSK_WITH_AES_256_CBC_SHA384', - 'TLS_RSA_PSK_WITH_AES_128_GCM_SHA256', - 'TLS_RSA_PSK_WITH_AES_256_GCM_SHA384' + "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256", + "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384", + "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", + "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", + "TLS_ECDHE_ECDSA_WITH_AES_128_CCM", + "TLS_ECDHE_ECDSA_WITH_AES_256_CCM", + "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256", + "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384", + "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", + "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", + "TLS_DHE_DSS_WITH_AES_128_CBC_SHA256", + "TLS_DHE_DSS_WITH_AES_256_CBC_", + "TLS_DHE_DSS_WITH_AES_128_GCM_SHA256", + "TLS_DHE_DSS_WITH_AES_256_GCM_SHA384", + "TLS_DHE_RSA_WITH_AES_128_CBC_SHA256", + "TLS_DHE_RSA_WITH_AES_256_CBC_SHA256", + "TLS_DHE_RSA_WITH_AES_128_GCM_SHA256", + "TLS_DHE_RSA_WITH_AES_256_GCM_SHA384", + "TLS_DHE_RSA_WITH_AES_128_CCM", + "TLS_DHE_RSA_WITH_AES_256_CCM", + "TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA256", + "TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA384", + "TLS_ECDH_ECDSA_WITH_AES_128_GCM_SHA256", + "TLS_ECDH_ECDSA_WITH_AES_256_GCM_SHA384", + "TLS_ECDH_RSA_WITH_AES_128_CBC_SHA256", + "TLS_ECDH_RSA_WITH_AES_256_CBC_SHA384", + "TLS_ECDH_RSA_WITH_AES_128_GCM_SHA256", + "TLS_ECDH_RSA_WITH_AES_256_GCM_SHA384", + "TLS_DH_DSS_WITH_AES_128_CBC_SHA256", + "TLS_DH_DSS_WITH_AES_256_CBC_SHA256", + "TLS_DH_DSS_WITH_AES_128_GCM_SHA256", + "TLS_DH_DSS_WITH_AES_256_GCM_SHA384", + "TLS_DH_RSA_WITH_AES_128_CBC_SHA256", + "TLS_DH_RSA_WITH_AES_256_CBC_SHA256", + "TLS_DH_RSA_WITH_AES_128_GCM_SHA256", + "TLS_DH_RSA_WITH_AES_256_GCM_SHA384", + "TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256", + "TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA384", + "TLS_ECDHE_PSK_WITH_AES_128_GCM_SHA256", + "TLS_ECDHE_PSK_WITH_AES_256_GCM_SHA384", + "TLS_ECDHE_PSK_WITH_AES_128_CCM_SHA256", + "TLS_DHE_PSK_WITH_AES_128_CBC_SHA256", + "TLS_DHE_PSK_WITH_AES_256_CBC_SHA384", + "TLS_DHE_PSK_WITH_AES_128_GCM_SHA256", + "TLS_DHE_PSK_WITH_AES_256_GCM_SHA384", + "TLS_DHE_PSK_WITH_AES_128_CCM", + "TLS_DHE_PSK_WITH_AES_256_CCM", + "TLS_RSA_PSK_WITH_AES_128_CBC_SHA256", + "TLS_RSA_PSK_WITH_AES_256_CBC_SHA384", + "TLS_RSA_PSK_WITH_AES_128_GCM_SHA256", + "TLS_RSA_PSK_WITH_AES_256_GCM_SHA384", ] TLS13_RECOMMENDED_CIPHERS = [ - 'TLS_AES_128_GCM_SHA256', - 'TLS_AES_256_GCM_SHA384', - 'TLS_AES_128_CCM_SHA256' + "TLS_AES_128_GCM_SHA256", + "TLS_AES_256_GCM_SHA384", + "TLS_AES_128_CCM_SHA256", ] -BSI_LINK = 'https://www.bsi.bund.de/SharedDocs/Downloads/EN/BSI/Publications/TechGuidelines/TG02102/BSI-TR-02102-2.pdf?__blob=publicationFile&v=10' -REFERENCES = 'TLS recommendations of German BSI: ' + BSI_LINK +BSI_LINK = "https://www.bsi.bund.de/SharedDocs/Downloads/EN/BSI/Publications/TechGuidelines/TG02102/BSI-TR-02102-2.pdf?__blob=publicationFile&v=10" +REFERENCES = "TLS recommendations of German BSI: " + BSI_LINK class SSLyzeJSONParser(object): - def get_findings(self, json_output, test): - if json_output is None: return @@ -84,10 +82,10 @@ def parse_json(self, json_output): try: data = json_output.read() try: - tree = json.loads(str(data, 'utf-8')) - except: + tree = json.loads(str(data, "utf-8")) + except Exception: tree = json.loads(data) - except: + except Exception: raise Exception("Invalid format") return tree @@ -95,10 +93,10 @@ def parse_json(self, json_output): def get_items(self, tree, test): items = [] - for node in tree['server_scan_results']: + for node in tree["server_scan_results"]: endpoint = get_endpoint(node) - if 'scan_commands_results' in node: - scr_node = node['scan_commands_results'] + if "scan_commands_results" in node: + scr_node = node["scan_commands_results"] item = get_heartbleed(scr_node, test, endpoint) if item: items.append(item) @@ -108,30 +106,68 @@ def get_items(self, tree, test): item = get_renegotiation(scr_node, test, endpoint) if item: items.append(item) - item = get_weak_protocol('ssl_2_0_cipher_suites', 'SSL 2.0', scr_node, test, endpoint) + item = get_weak_protocol( + "ssl_2_0_cipher_suites", + "SSL 2.0", + scr_node, + test, + endpoint, + ) if item: items.append(item) - item = get_weak_protocol('ssl_3_0_cipher_suites', 'SSL 3.0', scr_node, test, endpoint) + item = get_weak_protocol( + "ssl_3_0_cipher_suites", + "SSL 3.0", + scr_node, + test, + endpoint, + ) if item: items.append(item) - item = get_weak_protocol('tls_1_0_cipher_suites', 'TLS 1.0', scr_node, test, endpoint) + item = get_weak_protocol( + "tls_1_0_cipher_suites", + "TLS 1.0", + scr_node, + test, + endpoint, + ) if item: items.append(item) - item = get_weak_protocol('tls_1_1_cipher_suites', 'TLS 1.1', scr_node, test, endpoint) + item = get_weak_protocol( + "tls_1_1_cipher_suites", + "TLS 1.1", + scr_node, + test, + endpoint, + ) if item: items.append(item) - item = get_strong_protocol('tls_1_2_cipher_suites', 'TLS 1.2', TLS12_RECOMMENDED_CIPHERS, scr_node, test, endpoint) + item = get_strong_protocol( + "tls_1_2_cipher_suites", + "TLS 1.2", + TLS12_RECOMMENDED_CIPHERS, + scr_node, + test, + endpoint, + ) if item: items.append(item) - item = get_strong_protocol('tls_1_3_cipher_suites', 'TLS 1.3', TLS13_RECOMMENDED_CIPHERS, scr_node, test, endpoint) + item = get_strong_protocol( + "tls_1_3_cipher_suites", + "TLS 1.3", + TLS13_RECOMMENDED_CIPHERS, + scr_node, + test, + endpoint, + ) if item: items.append(item) item = get_certificate_information(scr_node, test, endpoint) if item: items.append(item) - elif 'scan_result' in node: - scr_node = node['scan_result'] + elif "scan_result" in node: + scr_node = node["scan_result"] item = get_heartbleed(scr_node, test, endpoint) if item: items.append(item) @@ -141,22 +177,60 @@ def get_items(self, tree, test): item = get_renegotiation(scr_node, test, endpoint) if item: items.append(item) - item = get_weak_protocol('ssl_2_0_cipher_suites', 'SSL 2.0', scr_node, test, endpoint) + item = get_weak_protocol( + "ssl_2_0_cipher_suites", + "SSL 2.0", + scr_node, + test, + endpoint, + ) if item: items.append(item) - item = get_weak_protocol('ssl_3_0_cipher_suites', 'SSL 3.0', scr_node, test, endpoint) + item = get_weak_protocol( + "ssl_3_0_cipher_suites", + "SSL 3.0", + scr_node, + test, + endpoint, + ) if item: items.append(item) - item = get_weak_protocol('tls_1_0_cipher_suites', 'TLS 1.0', scr_node, test, endpoint) + item = get_weak_protocol( + "tls_1_0_cipher_suites", + "TLS 1.0", + scr_node, + test, + endpoint, + ) if item: items.append(item) - item = get_weak_protocol('tls_1_1_cipher_suites', 'TLS 1.1', scr_node, test, endpoint) + item = get_weak_protocol( + "tls_1_1_cipher_suites", + "TLS 1.1", + scr_node, + test, + endpoint, + ) if item: items.append(item) - item = get_strong_protocol('tls_1_2_cipher_suites', 'TLS 1.2', TLS12_RECOMMENDED_CIPHERS, scr_node, test, endpoint) + item = get_strong_protocol( + "tls_1_2_cipher_suites", + "TLS 1.2", + TLS12_RECOMMENDED_CIPHERS, + scr_node, + test, + endpoint, + ) if item: items.append(item) - item = get_strong_protocol('tls_1_3_cipher_suites', 'TLS 1.3', TLS13_RECOMMENDED_CIPHERS, scr_node, test, endpoint) + item = get_strong_protocol( + "tls_1_3_cipher_suites", + "TLS 1.3", + TLS13_RECOMMENDED_CIPHERS, + scr_node, + test, + endpoint, + ) if item: items.append(item) item = get_certificate_information(scr_node, test, endpoint) @@ -167,92 +241,147 @@ def get_items(self, tree, test): def get_heartbleed(node, test, endpoint): - if 'heartbleed' in node: - heartbleed = node['heartbleed'] - if heartbleed.get('status') == 'NOT_SCHEDULED': + if "heartbleed" in node: + heartbleed = node["heartbleed"] + if heartbleed.get("status") == "NOT_SCHEDULED": return None vulnerable = False - if 'is_vulnerable_to_heartbleed' in heartbleed: - vulnerable = heartbleed['is_vulnerable_to_heartbleed'] + if "is_vulnerable_to_heartbleed" in heartbleed: + vulnerable = heartbleed["is_vulnerable_to_heartbleed"] if vulnerable: - title = 'Heartbleed' - description = get_url(endpoint) + ' is vulnerable to heartbleed' - vulnerability_id = 'CVE-2014-0160' - return get_finding(title, description, vulnerability_id, None, test, endpoint) - elif 'result' in heartbleed: - hb_result = heartbleed['result'] - if 'is_vulnerable_to_heartbleed' in hb_result: - vulnerable = hb_result['is_vulnerable_to_heartbleed'] + title = "Heartbleed" + description = ( + get_url(endpoint) + " is vulnerable to heartbleed" + ) + vulnerability_id = "CVE-2014-0160" + return get_finding( + title, description, vulnerability_id, None, test, endpoint + ) + elif "result" in heartbleed: + hb_result = heartbleed["result"] + if "is_vulnerable_to_heartbleed" in hb_result: + vulnerable = hb_result["is_vulnerable_to_heartbleed"] if vulnerable: - title = 'Heartbleed' - description = get_url(endpoint) + ' is vulnerable to heartbleed' - vulnerability_id = 'CVE-2014-0160' - return get_finding(title, description, vulnerability_id, None, test, endpoint) + title = "Heartbleed" + description = ( + get_url(endpoint) + " is vulnerable to heartbleed" + ) + vulnerability_id = "CVE-2014-0160" + return get_finding( + title, + description, + vulnerability_id, + None, + test, + endpoint, + ) return None return None def get_ccs(node, test, endpoint): - if 'openssl_ccs_injection' in node: - ccs_injection = node['openssl_ccs_injection'] + if "openssl_ccs_injection" in node: + ccs_injection = node["openssl_ccs_injection"] vulnerable = False - if ccs_injection.get('status') == 'NOT_SCHEDULED': + if ccs_injection.get("status") == "NOT_SCHEDULED": return None - if 'is_vulnerable_to_ccs_injection' in ccs_injection: - vulnerable = ccs_injection['is_vulnerable_to_ccs_injection'] + if "is_vulnerable_to_ccs_injection" in ccs_injection: + vulnerable = ccs_injection["is_vulnerable_to_ccs_injection"] if vulnerable: - title = 'CCS injection' - description = get_url(endpoint) + ' is vulnerable to OpenSSL CCS injection' - vulnerability_id = 'CVE-2014-0224' - return get_finding(title, description, vulnerability_id, None, test, endpoint) - - elif 'result' in ccs_injection: - ccs_result = ccs_injection['result'] - if 'is_vulnerable_to_ccs_injection' in ccs_result: - vulnerable = ccs_result['is_vulnerable_to_ccs_injection'] + title = "CCS injection" + description = ( + get_url(endpoint) + + " is vulnerable to OpenSSL CCS injection" + ) + vulnerability_id = "CVE-2014-0224" + return get_finding( + title, description, vulnerability_id, None, test, endpoint + ) + + elif "result" in ccs_injection: + ccs_result = ccs_injection["result"] + if "is_vulnerable_to_ccs_injection" in ccs_result: + vulnerable = ccs_result["is_vulnerable_to_ccs_injection"] if vulnerable: - title = 'CCS injection' - description = get_url(endpoint) + ' is vulnerable to OpenSSL CCS injection' - vulnerability_id = 'CVE-2014-0224' - return get_finding(title, description, vulnerability_id, None, test, endpoint) + title = "CCS injection" + description = ( + get_url(endpoint) + + " is vulnerable to OpenSSL CCS injection" + ) + vulnerability_id = "CVE-2014-0224" + return get_finding( + title, + description, + vulnerability_id, + None, + test, + endpoint, + ) return None return None def get_renegotiation(node, test, endpoint): - if 'session_renegotiation' in node: - renegotiation = node['session_renegotiation'] - if renegotiation.get('status') == 'NOT_SCHEDULED': + if "session_renegotiation" in node: + renegotiation = node["session_renegotiation"] + if renegotiation.get("status") == "NOT_SCHEDULED": return None - if 'accepts_client_renegotiation' in renegotiation and 'supports_secure_renegotiation' in renegotiation: + if ( + "accepts_client_renegotiation" in renegotiation + and "supports_secure_renegotiation" in renegotiation + ): vulnerable = False - title = 'Session renegotiation' - description = get_url(endpoint) + ' has problems with session renegotiation:' - vulnerable_cr = 'accepts_client_renegotiation' in renegotiation and renegotiation['accepts_client_renegotiation'] + title = "Session renegotiation" + description = ( + get_url(endpoint) + " has problems with session renegotiation:" + ) + vulnerable_cr = ( + "accepts_client_renegotiation" in renegotiation + and renegotiation["accepts_client_renegotiation"] + ) if vulnerable_cr: vulnerable = True - description += '\n - Client renegotiation is accepted' - vulnerable_sr = 'supports_secure_renegotiation' in renegotiation and not renegotiation['supports_secure_renegotiation'] + description += "\n - Client renegotiation is accepted" + vulnerable_sr = ( + "supports_secure_renegotiation" in renegotiation + and not renegotiation["supports_secure_renegotiation"] + ) if vulnerable_sr: vulnerable = True - description += '\n - Secure session renegotiation is not supported' + description += ( + "\n - Secure session renegotiation is not supported" + ) if vulnerable: - return get_finding(title, description, None, None, test, endpoint) - - elif 'result' in renegotiation: - reneg_result = renegotiation['result'] - if 'is_vulnerable_to_client_renegotiation_dos' in reneg_result: - reneg_dos = reneg_result['is_vulnerable_to_client_renegotiation_dos'] + return get_finding( + title, description, None, None, test, endpoint + ) + + elif "result" in renegotiation: + reneg_result = renegotiation["result"] + if "is_vulnerable_to_client_renegotiation_dos" in reneg_result: + reneg_dos = reneg_result[ + "is_vulnerable_to_client_renegotiation_dos" + ] if reneg_dos: - title = 'Is vulnerable to client negotiation DoS' - description = get_url(endpoint) + ' has problems with session renegotiation:' - return get_finding(title, description, None, None, test, endpoint) - if 'supports_secure_renegotiation' in reneg_result: - reneg_secure = reneg_result['supports_secure_renegotiation'] + title = "Is vulnerable to client negotiation DoS" + description = ( + get_url(endpoint) + + " has problems with session renegotiation:" + ) + return get_finding( + title, description, None, None, test, endpoint + ) + if "supports_secure_renegotiation" in reneg_result: + reneg_secure = reneg_result["supports_secure_renegotiation"] if not reneg_secure: - title = 'Does not support secure negotiations' - description = get_url(endpoint) + ' has problems with session renegotiation:' - return get_finding(title, description, None, None, test, endpoint) + title = "Does not support secure negotiations" + description = ( + get_url(endpoint) + + " has problems with session renegotiation:" + ) + return get_finding( + title, description, None, None, test, endpoint + ) return None return None @@ -260,18 +389,32 @@ def get_renegotiation(node, test, endpoint): def get_weak_protocol(cipher, text, node, test, endpoint): if cipher in node: weak_node = node[cipher] - if weak_node.get('status') == 'NOT_SCHEDULED': + if weak_node.get("status") == "NOT_SCHEDULED": return None - if 'accepted_cipher_suites' in weak_node and len(weak_node['accepted_cipher_suites']) > 0: - title = text + ' not recommended' - description = get_url(endpoint) + ' accepts ' + text + ' connections' - return get_finding(title, description, None, REFERENCES, test, endpoint) - elif 'result' in weak_node: - weak_node_result = weak_node['result'] - if 'accepted_cipher_suites' in weak_node_result and len(weak_node_result['accepted_cipher_suites']) > 0: - title = text + ' not recommended' - description = get_url(endpoint) + ' accepts ' + text + ' connections' - return get_finding(title, description, None, REFERENCES, test, endpoint) + if ( + "accepted_cipher_suites" in weak_node + and len(weak_node["accepted_cipher_suites"]) > 0 + ): + title = text + " not recommended" + description = ( + get_url(endpoint) + " accepts " + text + " connections" + ) + return get_finding( + title, description, None, REFERENCES, test, endpoint + ) + elif "result" in weak_node: + weak_node_result = weak_node["result"] + if ( + "accepted_cipher_suites" in weak_node_result + and len(weak_node_result["accepted_cipher_suites"]) > 0 + ): + title = text + " not recommended" + description = ( + get_url(endpoint) + " accepts " + text + " connections" + ) + return get_finding( + title, description, None, REFERENCES, test, endpoint + ) return None return None @@ -279,109 +422,159 @@ def get_weak_protocol(cipher, text, node, test, endpoint): def get_strong_protocol(cipher, text, suites, node, test, endpoint): if cipher in node: strong_node = node[cipher] - if strong_node.get('status') == 'NOT_SCHEDULED': + if strong_node.get("status") == "NOT_SCHEDULED": return None - if 'accepted_cipher_suites' in strong_node and len(strong_node['accepted_cipher_suites']) > 0: + if ( + "accepted_cipher_suites" in strong_node + and len(strong_node["accepted_cipher_suites"]) > 0 + ): unrecommended_cipher_found = False - title = 'Unrecommended cipher suites for ' + text - description = get_url(endpoint) + ' accepts unrecommended cipher suites for ' + text + ':' - for cipher_node in strong_node['accepted_cipher_suites']: - if 'cipher_suite' in cipher_node: - cs_node = cipher_node['cipher_suite'] - if 'name' in cs_node and not cs_node['name'] in suites: + title = "Unrecommended cipher suites for " + text + description = ( + get_url(endpoint) + + " accepts unrecommended cipher suites for " + + text + + ":" + ) + for cipher_node in strong_node["accepted_cipher_suites"]: + if "cipher_suite" in cipher_node: + cs_node = cipher_node["cipher_suite"] + if "name" in cs_node and not cs_node["name"] in suites: unrecommended_cipher_found = True - description += '\n - ' + cs_node['name'] + description += "\n - " + cs_node["name"] if unrecommended_cipher_found: - return get_finding(title, description, None, REFERENCES, test, endpoint) + return get_finding( + title, description, None, REFERENCES, test, endpoint + ) - elif 'result' in strong_node: - strong_node_result = strong_node['result'] + elif "result" in strong_node: + strong_node_result = strong_node["result"] unrecommended_cipher_found = False - if 'accepted_cipher_suites' in strong_node_result and len(strong_node_result['accepted_cipher_suites']) > 0: - title = 'Unrecommended cipher suites for ' + text - description = get_url(endpoint) + ' accepts unrecommended cipher suites for ' + text + ':' - for strong_node_result_cyphers in strong_node_result['accepted_cipher_suites']: - if 'cipher_suite' in strong_node_result_cyphers: - cs_node = strong_node_result_cyphers['cipher_suite'] - if 'name' in cs_node and not cs_node['name'] in suites: + if ( + "accepted_cipher_suites" in strong_node_result + and len(strong_node_result["accepted_cipher_suites"]) > 0 + ): + title = "Unrecommended cipher suites for " + text + description = ( + get_url(endpoint) + + " accepts unrecommended cipher suites for " + + text + + ":" + ) + for strong_node_result_cyphers in strong_node_result[ + "accepted_cipher_suites" + ]: + if "cipher_suite" in strong_node_result_cyphers: + cs_node = strong_node_result_cyphers["cipher_suite"] + if "name" in cs_node and not cs_node["name"] in suites: unrecommended_cipher_found = True - description += '\n - ' + cs_node['name'] + description += "\n - " + cs_node["name"] if unrecommended_cipher_found: - return get_finding(title, description, None, REFERENCES, test, endpoint) + return get_finding( + title, description, None, REFERENCES, test, endpoint + ) return None return None def get_certificate_information(node, test, endpoint): - if 'certificate_info' in node: - ci_node = node['certificate_info'] - if ci_node.get('status') == 'NOT_SCHEDULED': + if "certificate_info" in node: + ci_node = node["certificate_info"] + if ci_node.get("status") == "NOT_SCHEDULED": return None - if 'certificate_deployments' in ci_node: - for cd_node in ci_node['certificate_deployments']: - title = 'Problems in certificate deployments' - description = get_url(endpoint) + ' has problems in certificate deployments:' + if "certificate_deployments" in ci_node: + for cd_node in ci_node["certificate_deployments"]: + title = "Problems in certificate deployments" + description = ( + get_url(endpoint) + + " has problems in certificate deployments:" + ) vulnerable = False - if 'leaf_certificate_subject_matches_hostname' in cd_node: - if not cd_node['leaf_certificate_subject_matches_hostname']: + if "leaf_certificate_subject_matches_hostname" in cd_node: + if not cd_node[ + "leaf_certificate_subject_matches_hostname" + ]: vulnerable = True - description += '\n - Certificate subject does not match hostname' - for pvr_node in cd_node['path_validation_results']: - if 'openssl_error_string' in pvr_node and pvr_node['openssl_error_string'] is not None: + description += ( + "\n - Certificate subject does not match hostname" + ) + for pvr_node in cd_node["path_validation_results"]: + if ( + "openssl_error_string" in pvr_node + and pvr_node["openssl_error_string"] is not None + ): vulnerable = True name = None version = None - if 'trust_store' in pvr_node: - ts_node = pvr_node['trust_store'] - if 'name' in ts_node: - name = ts_node['name'] - if 'version' in ts_node: - version = ts_node['version'] - description += '\n - ' + pvr_node['openssl_error_string'] + if "trust_store" in pvr_node: + ts_node = pvr_node["trust_store"] + if "name" in ts_node: + name = ts_node["name"] + if "version" in ts_node: + version = ts_node["version"] + description += ( + "\n - " + pvr_node["openssl_error_string"] + ) if name is not None: - description += ' for trust store ' + name + description += " for trust store " + name if version is not None: - description += ', version ' + version + description += ", version " + version if vulnerable: - return get_finding(title, description, None, None, test, endpoint) - - elif 'result' in ci_node: - ci_node_result = ci_node['result'] - if 'certificate_deployments' in ci_node_result: - for cd_node in ci_node_result['certificate_deployments']: - title = 'Problems in certificate deployments' - description = get_url(endpoint) + ' has problems in certificate deployments:' + return get_finding( + title, description, None, None, test, endpoint + ) + + elif "result" in ci_node: + ci_node_result = ci_node["result"] + if "certificate_deployments" in ci_node_result: + for cd_node in ci_node_result["certificate_deployments"]: + title = "Problems in certificate deployments" + description = ( + get_url(endpoint) + + " has problems in certificate deployments:" + ) vulnerable = False - if 'leaf_certificate_subject_matches_hostname' in cd_node: - if not cd_node['leaf_certificate_subject_matches_hostname']: + if "leaf_certificate_subject_matches_hostname" in cd_node: + if not cd_node[ + "leaf_certificate_subject_matches_hostname" + ]: vulnerable = True - description += '\n - Certificate subject does not match hostname' - for pvr_node in cd_node['path_validation_results']: - if 'openssl_error_string' in pvr_node and pvr_node['openssl_error_string'] is not None: + description += "\n - Certificate subject does not match hostname" + for pvr_node in cd_node["path_validation_results"]: + if ( + "openssl_error_string" in pvr_node + and pvr_node["openssl_error_string"] is not None + ): vulnerable = True name = None version = None - if 'trust_store' in pvr_node: - ts_node = pvr_node['trust_store'] - if 'name' in ts_node: - name = ts_node['name'] - if 'version' in ts_node: - version = ts_node['version'] - description += '\n - ' + pvr_node['openssl_error_string'] + if "trust_store" in pvr_node: + ts_node = pvr_node["trust_store"] + if "name" in ts_node: + name = ts_node["name"] + if "version" in ts_node: + version = ts_node["version"] + description += ( + "\n - " + pvr_node["openssl_error_string"] + ) if name is not None: - description += ' for trust store ' + name + description += " for trust store " + name if version is not None: - description += ', version ' + version + description += ", version " + version if vulnerable: - return get_finding(title, description, None, None, test, endpoint) + return get_finding( + title, description, None, None, test, endpoint + ) return None return None -def get_finding(title, description, vulnerability_id, references, test, endpoint): - title += ' (' + get_url(endpoint) + ')' - severity = 'Medium' +def get_finding( + title, description, vulnerability_id, references, test, endpoint +): + title += " (" + get_url(endpoint) + ")" + severity = "Medium" finding = Finding( title=title, test=test, @@ -389,7 +582,8 @@ def get_finding(title, description, vulnerability_id, references, test, endpoint severity=severity, references=references, dynamic_finding=False, - static_finding=True) + static_finding=True, + ) if vulnerability_id: finding.unsaved_vulnerability_ids = [vulnerability_id] if endpoint is not None: @@ -399,35 +593,33 @@ def get_finding(title, description, vulnerability_id, references, test, endpoint def get_url(endpoint): - url = 'unkown host' + url = "unkown host" if endpoint is not None: if endpoint.host is not None: url = endpoint.host if endpoint.port is not None: - url = url + ':' + str(endpoint.port) + url = url + ":" + str(endpoint.port) return url def get_endpoint(node): hostname = None - if 'server_info' in node: - si_node = node['server_info'] - if 'server_location' in si_node: - sl_node = si_node['server_location'] - if 'hostname' in sl_node: - hostname = sl_node['hostname'] - if 'port' in sl_node: - port = sl_node['port'] - - elif 'server_location' in node: - si_node = node['server_location'] - if 'hostname' in si_node: - hostname = si_node['hostname'] - if 'port' in si_node: - port = si_node['port'] + if "server_info" in node: + si_node = node["server_info"] + if "server_location" in si_node: + sl_node = si_node["server_location"] + if "hostname" in sl_node: + hostname = sl_node["hostname"] + if "port" in sl_node: + port = sl_node["port"] + + elif "server_location" in node: + si_node = node["server_location"] + if "hostname" in si_node: + hostname = si_node["hostname"] + if "port" in si_node: + port = si_node["port"] if hostname is not None: - return Endpoint( - host=hostname, - port=port) + return Endpoint(host=hostname, port=port) else: return None diff --git a/dojo/tools/sslyze/parser_xml.py b/dojo/tools/sslyze/parser_xml.py index 0fce70d8b4..bb6dc00457 100644 --- a/dojo/tools/sslyze/parser_xml.py +++ b/dojo/tools/sslyze/parser_xml.py @@ -5,7 +5,7 @@ from dojo.models import Endpoint, Finding -__author__ = 'dr3dd589' +__author__ = "dr3dd589" # FIXME discuss this list as maintenance subject WEAK_CIPHER_LIST = [ @@ -43,76 +43,104 @@ "TLS_RSA_WITH_CAMELLIA_256_CBC_SHA", "TLS_RSA_WITH_CAMELLIA_256_CBC_SHA256", "TLS_RSA_WITH_IDEA_CBC_SHA", - "TLS_RSA_WITH_SEED_CBC_SHA" + "TLS_RSA_WITH_SEED_CBC_SHA", ] -PROTOCOLS = [ - "sslv2", - "sslv3", - "tlsv1", - "tlsv1_1", - "tlsv1_2", - "tlsv1_3" -] +PROTOCOLS = ["sslv2", "sslv3", "tlsv1", "tlsv1_1", "tlsv1_2", "tlsv1_3"] class SSLyzeXMLParser(object): - def get_findings(self, file, test): - tree = ET.parse(file) # get root of tree. root = tree.getroot() - if 'document' not in root.tag: - raise NamespaceErr("This doesn't seem to be a valid sslyze xml file.") + if "document" not in root.tag: + raise NamespaceErr( + "This doesn't seem to be a valid sslyze xml file." + ) - results = root.find('results') + results = root.find("results") dupes = dict() for target in results: - host = target.attrib['host'] - port = target.attrib['port'] - protocol = target.attrib['tlsWrappedProtocol'] + host = target.attrib["host"] + port = target.attrib["port"] + protocol = target.attrib["tlsWrappedProtocol"] for element in target: title = "" severity = "" description = "" severity = "Info" weak_cipher = {} - if element.tag == 'heartbleed': - heartbleed_element = element.find('openSslHeartbleed') - if 'isVulnerable' in heartbleed_element.attrib: - if heartbleed_element.attrib['isVulnerable'] == 'True': - title = element.attrib['title'] + " | " + host - description = "**heartbleed** : Vulnerable" + "\n\n" + \ - "**title** : " + element.attrib['title'] - if element.tag == 'openssl_ccs': - openssl_ccs_element = element.find('openSslCcsInjection') - if 'isVulnerable' in openssl_ccs_element.attrib: - if openssl_ccs_element.attrib['isVulnerable'] == 'True': - title = element.attrib['title'] + " | " + host - description = "**openssl_ccs** : Vulnerable" + "\n\n" + \ - "**title** : " + element.attrib['title'] - if element.tag == 'reneg': - reneg_element = element.find('sessionRenegotiation') - if 'isSecure' in reneg_element.attrib: - if reneg_element.attrib['isSecure'] == 'False': - title = element.attrib['title'] + " | " + host - description = "**Session Renegotiation** : Vulnerable" + "\n\n" + \ - "**title** : " + element.attrib['title'] - if element.tag in PROTOCOLS and element.attrib['isProtocolSupported'] == "True": + if element.tag == "heartbleed": + heartbleed_element = element.find("openSslHeartbleed") + if "isVulnerable" in heartbleed_element.attrib: + if heartbleed_element.attrib["isVulnerable"] == "True": + title = element.attrib["title"] + " | " + host + description = ( + "**heartbleed** : Vulnerable" + + "\n\n" + + "**title** : " + + element.attrib["title"] + ) + if element.tag == "openssl_ccs": + openssl_ccs_element = element.find("openSslCcsInjection") + if "isVulnerable" in openssl_ccs_element.attrib: + if ( + openssl_ccs_element.attrib["isVulnerable"] + == "True" + ): + title = element.attrib["title"] + " | " + host + description = ( + "**openssl_ccs** : Vulnerable" + + "\n\n" + + "**title** : " + + element.attrib["title"] + ) + if element.tag == "reneg": + reneg_element = element.find("sessionRenegotiation") + if "isSecure" in reneg_element.attrib: + if reneg_element.attrib["isSecure"] == "False": + title = element.attrib["title"] + " | " + host + description = ( + "**Session Renegotiation** : Vulnerable" + + "\n\n" + + "**title** : " + + element.attrib["title"] + ) + if ( + element.tag in PROTOCOLS + and element.attrib["isProtocolSupported"] == "True" + ): weak_cipher[element.tag] = [] for ciphers in element: - if ciphers.tag == 'preferredCipherSuite' or ciphers.tag == 'acceptedCipherSuites': + if ( + ciphers.tag == "preferredCipherSuite" + or ciphers.tag == "acceptedCipherSuites" + ): for cipher in ciphers: - if cipher.attrib['name'] in WEAK_CIPHER_LIST: - if not cipher.attrib['name'] in weak_cipher[element.tag]: - weak_cipher[element.tag].append(cipher.attrib['name']) + if cipher.attrib["name"] in WEAK_CIPHER_LIST: + if ( + not cipher.attrib["name"] + in weak_cipher[element.tag] + ): + weak_cipher[element.tag].append( + cipher.attrib["name"] + ) if len(weak_cipher[element.tag]) > 0: - title = element.tag + " | " + "Weak Ciphers" + " | " + host - description = "**Protocol** : " + element.tag + "\n\n" + \ - "**Weak Ciphers** : " + ",\n\n".join(weak_cipher[element.tag]) + title = ( + element.tag + " | " + "Weak Ciphers" + " | " + host + ) + description = ( + "**Protocol** : " + + element.tag + + "\n\n" + + "**Weak Ciphers** : " + + ",\n\n".join(weak_cipher[element.tag]) + ) if title and description is not None: - dupe_key = hashlib.md5(str(description + title).encode('utf-8')).hexdigest() + dupe_key = hashlib.md5( + str(description + title).encode("utf-8") + ).hexdigest() if dupe_key in dupes: finding = dupes[dupe_key] if finding.references: @@ -126,13 +154,15 @@ def get_findings(self, file, test): test=test, description=description, severity=severity, - dynamic_finding=True,) + dynamic_finding=True, + ) finding.unsaved_endpoints = list() dupes[dupe_key] = finding if host is not None: - finding.unsaved_endpoints.append(Endpoint( - host=host, - port=port, - protocol=protocol)) + finding.unsaved_endpoints.append( + Endpoint( + host=host, port=port, protocol=protocol + ) + ) return dupes.values() diff --git a/dojo/tools/stackhawk/parser.py b/dojo/tools/stackhawk/parser.py index f25f902577..a8bb4aa09b 100644 --- a/dojo/tools/stackhawk/parser.py +++ b/dojo/tools/stackhawk/parser.py @@ -6,12 +6,12 @@ class StackHawkScanMetadata: def __init__(self, completed_scan): - self.date = completed_scan['scan']['startedTimestamp'] - self.component_name = completed_scan['scan']['application'] - self.component_version = completed_scan['scan']['env'] + self.date = completed_scan["scan"]["startedTimestamp"] + self.component_name = completed_scan["scan"]["application"] + self.component_version = completed_scan["scan"]["env"] self.static_finding = False self.dynamic_finding = True - self.service = completed_scan['scan']['application'] + self.service = completed_scan["scan"]["application"] class StackHawkParser(object): @@ -36,58 +36,75 @@ def get_findings(self, json_output, test): return findings - def __extract_findings(self, completed_scan, metadata: StackHawkScanMetadata, test): + def __extract_findings( + self, completed_scan, metadata: StackHawkScanMetadata, test + ): findings = {} - if 'findings' in completed_scan: - raw_findings = completed_scan['findings'] + if "findings" in completed_scan: + raw_findings = completed_scan["findings"] for raw_finding in raw_findings: - key = raw_finding['pluginId'] + key = raw_finding["pluginId"] if key not in findings: - finding = self.__extract_finding(raw_finding, metadata, test) + finding = self.__extract_finding( + raw_finding, metadata, test + ) findings[key] = finding # Update the test description these scan results are linked to. - test.description = 'View scan details here: ' + self.__hyperlink(completed_scan['scan']['scanURL']) + test.description = "View scan details here: " + self.__hyperlink( + completed_scan["scan"]["scanURL"] + ) return list(findings.values()) - def __extract_finding(self, raw_finding, metadata: StackHawkScanMetadata, test) -> Finding: - + def __extract_finding( + self, raw_finding, metadata: StackHawkScanMetadata, test + ) -> Finding: steps_to_reproduce = "Use a specific message link and click 'Validate' to see the cURL!\n\n" - host = raw_finding['host'] + host = raw_finding["host"] endpoints = [] - paths = raw_finding['paths'] + paths = raw_finding["paths"] for path in paths: - steps_to_reproduce += '**' + path['path'] + '**' +\ - self.__endpoint_status(path['status']) +\ - '\n' + self.__hyperlink(path['pathURL']) + '\n' - endpoint = Endpoint.from_uri(host + path['path']) + steps_to_reproduce += ( + "**" + + path["path"] + + "**" + + self.__endpoint_status(path["status"]) + + "\n" + + self.__hyperlink(path["pathURL"]) + + "\n" + ) + endpoint = Endpoint.from_uri(host + path["path"]) endpoints.append(endpoint) - are_all_endpoints_risk_accepted = self.__are_all_endpoints_in_status(paths, 'RISK_ACCEPTED') - are_all_endpoints_false_positive = self.__are_all_endpoints_in_status(paths, 'FALSE_POSITIVE') + are_all_endpoints_risk_accepted = self.__are_all_endpoints_in_status( + paths, "RISK_ACCEPTED" + ) + are_all_endpoints_false_positive = self.__are_all_endpoints_in_status( + paths, "FALSE_POSITIVE" + ) finding = Finding( test=test, - title=raw_finding['pluginName'], + title=raw_finding["pluginName"], date=parse_datetime(metadata.date), - severity=raw_finding['severity'], - description="View this finding in the StackHawk platform at:\n" + - self.__hyperlink(raw_finding['findingURL']), + severity=raw_finding["severity"], + description="View this finding in the StackHawk platform at:\n" + + self.__hyperlink(raw_finding["findingURL"]), steps_to_reproduce=steps_to_reproduce, component_name=metadata.component_name, component_version=metadata.component_version, static_finding=metadata.static_finding, dynamic_finding=metadata.dynamic_finding, - vuln_id_from_tool=raw_finding['pluginId'], - nb_occurences=raw_finding['totalCount'], + vuln_id_from_tool=raw_finding["pluginId"], + nb_occurences=raw_finding["totalCount"], service=metadata.service, false_p=are_all_endpoints_false_positive, - risk_accepted=are_all_endpoints_risk_accepted + risk_accepted=are_all_endpoints_risk_accepted, ) finding.unsaved_endpoints.extend(endpoints) @@ -97,34 +114,39 @@ def __extract_finding(self, raw_finding, metadata: StackHawkScanMetadata, test) def __parse_json(json_output): report = json.load(json_output) - if 'scanCompleted' not in report or 'service' not in report or report['service'] != 'StackHawk': + if ( + "scanCompleted" not in report + or "service" not in report + or report["service"] != "StackHawk" + ): # By verifying the json data, we can now make certain assumptions. # Specifically, that the attributes accessed when parsing the finding will always exist. # See our documentation for more details on this data: # https://docs.stackhawk.com/workflow-integrations/webhook.html#scan-completed - raise ValueError(" Unexpected JSON format provided. " - "Need help? " - "Check out the StackHawk Docs at " - "https://docs.stackhawk.com/workflow-integrations/defect-dojo.html" - ) + raise ValueError( + " Unexpected JSON format provided. " + "Need help? " + "Check out the StackHawk Docs at " + "https://docs.stackhawk.com/workflow-integrations/defect-dojo.html" + ) - return report['scanCompleted'] + return report["scanCompleted"] @staticmethod def __hyperlink(link: str) -> str: - return '[' + link + '](' + link + ')' + return "[" + link + "](" + link + ")" @staticmethod def __endpoint_status(status: str) -> str: - if status == 'NEW': - return '** - New**' - elif status == 'RISK_ACCEPTED': + if status == "NEW": + return "** - New**" + elif status == "RISK_ACCEPTED": return '** - Marked "Risk Accepted"**' - elif status == 'FALSE_POSITIVE': + elif status == "FALSE_POSITIVE": return '** - Marked "False Positive"**' else: return "" @staticmethod def __are_all_endpoints_in_status(paths, check_status: str) -> bool: - return all(item['status'] == check_status for item in paths) + return all(item["status"] == check_status for item in paths) diff --git a/dojo/tools/talisman/parser.py b/dojo/tools/talisman/parser.py index ce9e10d216..8b07e52d88 100644 --- a/dojo/tools/talisman/parser.py +++ b/dojo/tools/talisman/parser.py @@ -70,9 +70,13 @@ def get_findings(self, filename, test): ) key = hashlib.md5( - (title + message + file_path + description + severity).encode( - "utf-8" - ) + ( + title + + message + + file_path + + description + + severity + ).encode("utf-8") ).hexdigest() if key not in dupes: diff --git a/dojo/tools/tenable/csv_format.py b/dojo/tools/tenable/csv_format.py index 4c019aabba..a2e2b72d08 100644 --- a/dojo/tools/tenable/csv_format.py +++ b/dojo/tools/tenable/csv_format.py @@ -50,7 +50,9 @@ def _convert_severity(self, severity_value): def _format_cve(self, val): if val is None or val == "": return None - cve_match = re.findall(r"CVE-[0-9]+-[0-9]+", val.upper(), re.IGNORECASE) + cve_match = re.findall( + r"CVE-[0-9]+-[0-9]+", val.upper(), re.IGNORECASE + ) if cve_match: return cve_match return None @@ -64,7 +66,7 @@ def _format_cpe(self, val): def get_findings(self, filename: str, test: Test): # Read the CSV content = filename.read() - if type(content) is bytes: + if isinstance(content, bytes): content = content.decode("utf-8") csv.field_size_limit(int(sys.maxsize / 10)) # the request/resp are big reader = csv.DictReader(io.StringIO(content)) @@ -92,8 +94,15 @@ def get_findings(self, filename: str, test: Test): impact = row.get("Description", "N/A") references = row.get("See Also", "N/A") # Determine if the current row has already been processed - dupe_key = severity + title + row.get('Host', 'No host') + str(row.get('Port', 'No port')) + row.get('Synopsis', 'No synopsis') - # Finding has not been detected in the current report. Proceed with parsing + dupe_key = ( + severity + + title + + row.get("Host", "No host") + + str(row.get("Port", "No port")) + + row.get("Synopsis", "No synopsis") + ) + # Finding has not been detected in the current report. Proceed with + # parsing if dupe_key not in dupes: # Create the finding object find = Finding( @@ -103,16 +112,18 @@ def get_findings(self, filename: str, test: Test): severity=severity, mitigation=mitigation, impact=impact, - references=references + references=references, ) # manage CVSS vector (only v3.x for now) cvss_vector = row.get("CVSS V3 Vector", "") if cvss_vector != "": - find.cvssv3 = CVSS3("CVSS:3.0/" + str(cvss_vector)).clean_vector(output_prefix=True) + find.cvssv3 = CVSS3( + "CVSS:3.0/" + str(cvss_vector) + ).clean_vector(output_prefix=True) # Add CVSS score if present - cvssv3 = row.get('CVSSv3', "") + cvssv3 = row.get("CVSSv3", "") if cvssv3 != "": find.cvssv3_score = cvssv3 # manage CPE data @@ -120,19 +131,31 @@ def get_findings(self, filename: str, test: Test): if detected_cpe: # FIXME support more than one CPE in Nessus CSV parser if len(detected_cpe) > 1: - LOGGER.debug("more than one CPE for a finding. NOT supported by Nessus CSV parser") + LOGGER.debug( + "more than one CPE for a finding. NOT supported by Nessus CSV parser" + ) cpe_decoded = CPE(detected_cpe[0]) - find.component_name = cpe_decoded.get_product()[0] if len(cpe_decoded.get_product()) > 0 else None - find.component_version = cpe_decoded.get_version()[0] if len(cpe_decoded.get_version()) > 0 else None + find.component_name = ( + cpe_decoded.get_product()[0] + if len(cpe_decoded.get_product()) > 0 + else None + ) + find.component_version = ( + cpe_decoded.get_version()[0] + if len(cpe_decoded.get_version()) > 0 + else None + ) find.unsaved_endpoints = [] find.unsaved_vulnerability_ids = [] dupes[dupe_key] = find else: - # This is a duplicate. Update the description of the original finding + # This is a duplicate. Update the description of the original + # finding find = dupes[dupe_key] - # Determine if there is more details to be included in the description + # Determine if there is more details to be included in the + # description plugin_output = str(row.get("Plugin Output", "")) if plugin_output != "": find.description += f"\n\n{plugin_output}" @@ -156,13 +179,10 @@ def get_findings(self, filename: str, test: Test): if isinstance(port, str) and port in ["", "0"]: port = None # Update the endpoints - if '://' in host: + if "://" in host: endpoint = Endpoint.from_uri(host) else: - endpoint = Endpoint( - protocol=protocol, - host=host, - port=port) + endpoint = Endpoint(protocol=protocol, host=host, port=port) # Add the list to be processed later find.unsaved_endpoints.append(endpoint) diff --git a/dojo/tools/tenable/parser.py b/dojo/tools/tenable/parser.py index fe11c12cfe..0b54e9ea2d 100644 --- a/dojo/tools/tenable/parser.py +++ b/dojo/tools/tenable/parser.py @@ -10,12 +10,18 @@ def get_label_for_scan_types(self, scan_type): return "Tenable Scan" def get_description_for_scan_types(self, scan_type): - return "Reports can be imported as CSV or .nessus (XML) report formats." + return ( + "Reports can be imported as CSV or .nessus (XML) report formats." + ) def get_findings(self, filename, test): - if filename.name.lower().endswith(".xml") or filename.name.lower().endswith(".nessus"): + if filename.name.lower().endswith( + ".xml" + ) or filename.name.lower().endswith(".nessus"): return TenableXMLParser().get_findings(filename, test) elif filename.name.lower().endswith(".csv"): return TenableCSVParser().get_findings(filename, test) else: - raise ValueError("Filename extension not recognized. Use .xml, .nessus or .csv") + raise ValueError( + "Filename extension not recognized. Use .xml, .nessus or .csv" + ) diff --git a/dojo/tools/tenable/xml_format.py b/dojo/tools/tenable/xml_format.py index f1038f1b4c..aa8b17c9b1 100644 --- a/dojo/tools/tenable/xml_format.py +++ b/dojo/tools/tenable/xml_format.py @@ -45,11 +45,11 @@ def get_findings(self, filename: str, test: Test) -> list: nscan = ElementTree.parse(filename) root = nscan.getroot() - if 'NessusClientData_v2' not in root.tag: + if "NessusClientData_v2" not in root.tag: raise ValueError( - 'This version of Nessus report is not supported. ' - 'Please make sure the export is ' - 'formatted using the NessusClientData_v2 schema.' + "This version of Nessus report is not supported. " + "Please make sure the export is " + "formatted using the NessusClientData_v2 schema." ) dupes = {} @@ -57,7 +57,9 @@ def get_findings(self, filename: str, test: Test) -> list: for host in report.iter("ReportHost"): ip = host.attrib.get("name") fqdn = None - fqdn_element_text = self.safely_get_element_text(host.find('.//HostProperties/tag[@name="host-fqdn"]')) + fqdn_element_text = self.safely_get_element_text( + host.find('.//HostProperties/tag[@name="host-fqdn"]') + ) if fqdn_element_text is not None: fqdn = fqdn_element_text @@ -76,15 +78,23 @@ def get_findings(self, filename: str, test: Test) -> list: if protocol == "www": protocol = "http" if protocol not in SCHEME_PORT_MAP: - protocol = re.sub(r"[^A-Za-z0-9\-\+]+", "", item.attrib.get("protocol", protocol)) + protocol = re.sub( + r"[^A-Za-z0-9\-\+]+", + "", + item.attrib.get("protocol", protocol), + ) # Set the description with a few different fields description = "" plugin_output = None - synopsis_element_text = self.safely_get_element_text(item.find("synopsis")) + synopsis_element_text = self.safely_get_element_text( + item.find("synopsis") + ) if synopsis_element_text is not None: description = f"{synopsis_element_text}\n\n" - plugin_output_element_text = self.safely_get_element_text(item.find("plugin_output")) + plugin_output_element_text = self.safely_get_element_text( + item.find("plugin_output") + ) if plugin_output_element_text is not None: plugin_output = f"Plugin Output: {ip}{str(f':{port}' if port is not None else '')}" plugin_output += f"\n```\n{str(plugin_output_element_text)}\n```\n\n" @@ -96,31 +106,53 @@ def get_findings(self, filename: str, test: Test) -> list: # Build up the impact impact = "" - description_element_text = self.safely_get_element_text(item.find("description")) + description_element_text = self.safely_get_element_text( + item.find("description") + ) if description_element_text is not None: impact = description_element_text + "\n\n" - cvss_element_text = self.safely_get_element_text(item.find("cvss")) + cvss_element_text = self.safely_get_element_text( + item.find("cvss") + ) if cvss_element_text is not None: impact += f"CVSS Score: {cvss_element_text}\n" - cvssv3_element_text = self.safely_get_element_text(item.find("cvssv3")) + cvssv3_element_text = self.safely_get_element_text( + item.find("cvssv3") + ) if cvssv3_element_text is not None: impact += f"CVSSv3 Score: {cvssv3_element_text}\n" - cvss_vector_element_text = self.safely_get_element_text(item.find("cvss_vector")) + cvss_vector_element_text = self.safely_get_element_text( + item.find("cvss_vector") + ) if cvss_vector_element_text is not None: impact += f"CVSS Vector: {cvss_vector_element_text}\n" - cvssv3_vector_element_text = self.safely_get_element_text(item.find("cvss3_vector")) + cvssv3_vector_element_text = self.safely_get_element_text( + item.find("cvss3_vector") + ) if cvssv3_vector_element_text is not None: - impact += f"CVSSv3 Vector: {cvssv3_vector_element_text}\n" - cvss_base_score_element_text = self.safely_get_element_text(item.find("cvss_base_score")) + impact += ( + f"CVSSv3 Vector: {cvssv3_vector_element_text}\n" + ) + cvss_base_score_element_text = ( + self.safely_get_element_text( + item.find("cvss_base_score") + ) + ) if cvss_base_score_element_text is not None: impact += f"CVSS Base Score: {cvss_base_score_element_text}\n" - cvss_temporal_score_element_text = self.safely_get_element_text(item.find("cvss_temporal_score")) + cvss_temporal_score_element_text = ( + self.safely_get_element_text( + item.find("cvss_temporal_score") + ) + ) if cvss_temporal_score_element_text is not None: impact += f"CVSS Temporal Score: {cvss_temporal_score_element_text}\n" # Set the mitigation mitigation = "N/A" - mitigation_element_text = self.safely_get_element_text(item.find("solution")) + mitigation_element_text = self.safely_get_element_text( + item.find("solution") + ) if mitigation_element_text is not None: mitigation = mitigation_element_text @@ -138,28 +170,41 @@ def get_findings(self, filename: str, test: Test) -> list: references += xref_text + "\n" vulnerability_id = None - cve_element_text = self.safely_get_element_text(item.find("cve")) + cve_element_text = self.safely_get_element_text( + item.find("cve") + ) if cve_element_text is not None: vulnerability_id = cve_element_text cwe = None - cwe_element_text = self.safely_get_element_text(item.find("cwe")) + cwe_element_text = self.safely_get_element_text( + item.find("cwe") + ) if cwe_element_text is not None: cwe = cwe_element_text cvssv3 = None - cvssv3_element_text = self.safely_get_element_text(item.find("cvss3_vector")) + cvssv3_element_text = self.safely_get_element_text( + item.find("cvss3_vector") + ) if cvssv3_element_text is not None: if "CVSS:3.0/" not in cvssv3_element_text: - cvssv3_element_text = f"CVSS:3.0/{cvssv3_element_text}" - cvssv3 = CVSS3(cvssv3_element_text).clean_vector(output_prefix=True) + cvssv3_element_text = ( + f"CVSS:3.0/{cvssv3_element_text}" + ) + cvssv3 = CVSS3(cvssv3_element_text).clean_vector( + output_prefix=True + ) cvssv3_score = None - cvssv3_score_element_text = self.safely_get_element_text(item.find("cvssv3")) + cvssv3_score_element_text = self.safely_get_element_text( + item.find("cvssv3") + ) if cvssv3_score_element_text is not None: cvssv3_score = cvssv3_score_element_text - # Determine the current entry has already been parsed in this report + # Determine the current entry has already been parsed in + # this report dupe_key = severity + title if dupe_key not in dupes: find = Finding( @@ -172,7 +217,7 @@ def get_findings(self, filename: str, test: Test) -> list: references=references, cwe=cwe, cvssv3=cvssv3, - cvssv3_score=cvssv3_score + cvssv3_score=cvssv3_score, ) find.unsaved_endpoints = [] find.unsaved_vulnerability_ids = [] @@ -191,9 +236,11 @@ def get_findings(self, filename: str, test: Test) -> list: elif protocol == "general": endpoint = Endpoint(host=fqdn if fqdn else ip) else: - endpoint = Endpoint(protocol=protocol, - host=fqdn if fqdn else ip, - port=port) + endpoint = Endpoint( + protocol=protocol, + host=fqdn if fqdn else ip, + port=port, + ) find.unsaved_endpoints.append(endpoint) return list(dupes.values()) diff --git a/dojo/tools/terrascan/parser.py b/dojo/tools/terrascan/parser.py index 98d7e662d3..15e9e06813 100644 --- a/dojo/tools/terrascan/parser.py +++ b/dojo/tools/terrascan/parser.py @@ -27,26 +27,33 @@ def get_description_for_scan_types(self, scan_type): def get_findings(self, filename, test): data = json.load(filename) dupes = {} - if 'results' not in data and 'violations' not in data.get('results'): + if "results" not in data and "violations" not in data.get("results"): raise ValueError("missing mandatory attribute 'results'") - if data.get('results').get('violations') is None: + if data.get("results").get("violations") is None: return list() - for item in data.get('results').get('violations'): - rule_name = item.get('rule_name') - description = item.get('description') - if item.get('severity') in self.SEVERITY: - severity = self.SEVERITY[item.get('severity')] + for item in data.get("results").get("violations"): + rule_name = item.get("rule_name") + description = item.get("description") + if item.get("severity") in self.SEVERITY: + severity = self.SEVERITY[item.get("severity")] else: severity = "Info" - rule_id = item.get('rule_id') - category = item.get('category') - resource_name = item.get('resource_name') - resource_type = item.get('resource_type') - file = item.get('file') - line = item.get('line') + rule_id = item.get("rule_id") + category = item.get("category") + resource_name = item.get("resource_name") + resource_type = item.get("resource_type") + file = item.get("file") + line = item.get("line") dupe_key = hashlib.sha256( - (rule_id + rule_name + resource_name + resource_type + file + str(line)).encode('utf-8') + ( + rule_id + + rule_name + + resource_name + + resource_type + + file + + str(line) + ).encode("utf-8") ).hexdigest() if dupe_key in dupes: diff --git a/dojo/tools/testssl/parser.py b/dojo/tools/testssl/parser.py index 91bc876184..0a03239c44 100644 --- a/dojo/tools/testssl/parser.py +++ b/dojo/tools/testssl/parser.py @@ -6,7 +6,6 @@ class TestsslParser(object): - def get_scan_types(self): return ["Testssl Scan"] @@ -18,33 +17,46 @@ def get_description_for_scan_types(self, scan_type): def get_findings(self, filename, test): content = filename.read() - if type(content) is bytes: - content = content.decode('utf-8') - reader = csv.DictReader(io.StringIO(content), delimiter=',', quotechar='"') + if isinstance(content, bytes): + content = content.decode("utf-8") + reader = csv.DictReader( + io.StringIO(content), delimiter=",", quotechar='"' + ) dupes = dict() for row in reader: # filter 'OK' # possible values: LOW|MEDIUM|HIGH|CRITICAL + WARN|OK|INFO - if row['severity'] in ['OK']: + if row["severity"] in ["OK"]: continue - if row['id'] in ['rating_spec', 'rating_doc', 'protocol_support_score', 'protocol_support_score_weighted', 'key_exchange_score', 'key_exchange_score_weighted', 'cipher_strength_score', 'cipher_strength_score_weighted', 'final_score', 'overall_grade']: + if row["id"] in [ + "rating_spec", + "rating_doc", + "protocol_support_score", + "protocol_support_score_weighted", + "key_exchange_score", + "key_exchange_score_weighted", + "cipher_strength_score", + "cipher_strength_score_weighted", + "final_score", + "overall_grade", + ]: continue - if 'grade_cap_reason_' in row['id']: + if "grade_cap_reason_" in row["id"]: continue # convert severity - severity = row['severity'].lower().capitalize() - if severity == 'Warn' or severity == 'Fatal': - severity = 'Info' + severity = row["severity"].lower().capitalize() + if severity == "Warn" or severity == "Fatal": + severity = "Info" # detect CVEs - cves = row['cve'].split(' ') + cves = row["cve"].split(" ") if len(cves) == 0: cves = [None] for vulnerability in cves: finding = Finding( - title=row['id'], - description=row['finding'], + title=row["id"], + description=row["finding"], severity=severity, nb_occurences=1, ) @@ -52,25 +64,39 @@ def get_findings(self, filename, test): if vulnerability: finding.unsaved_vulnerability_ids = [vulnerability] # manage CWE - if '-' in row['cwe']: - finding.cwe = int(row['cwe'].split('-')[1].strip()) + if "-" in row["cwe"]: + finding.cwe = int(row["cwe"].split("-")[1].strip()) # manage endpoint - finding.unsaved_endpoints = [Endpoint(host=row['fqdn/ip'].split("/")[0])] - if row.get('port') and row['port'].isdigit(): - finding.unsaved_endpoints[0].port = int(row['port']) + finding.unsaved_endpoints = [ + Endpoint(host=row["fqdn/ip"].split("/")[0]) + ] + if row.get("port") and row["port"].isdigit(): + finding.unsaved_endpoints[0].port = int(row["port"]) # internal de-duplication - dupe_key = hashlib.sha256("|".join([ - finding.description, - finding.title, - str(vulnerability) - ]).encode('utf-8')).hexdigest() + dupe_key = hashlib.sha256( + "|".join( + [ + finding.description, + finding.title, + str(vulnerability), + ] + ).encode("utf-8") + ).hexdigest() if dupe_key in dupes: - dupes[dupe_key].unsaved_endpoints.extend(finding.unsaved_endpoints) + dupes[dupe_key].unsaved_endpoints.extend( + finding.unsaved_endpoints + ) if dupes[dupe_key].unsaved_vulnerability_ids: - dupes[dupe_key].unsaved_vulnerability_ids.extend(finding.unsaved_vulnerability_ids) + dupes[dupe_key].unsaved_vulnerability_ids.extend( + finding.unsaved_vulnerability_ids + ) else: - dupes[dupe_key].unsaved_vulnerability_ids = finding.unsaved_vulnerability_ids + dupes[ + dupe_key + ].unsaved_vulnerability_ids = ( + finding.unsaved_vulnerability_ids + ) dupes[dupe_key].nb_occurences += finding.nb_occurences else: dupes[dupe_key] = finding diff --git a/dojo/tools/tfsec/parser.py b/dojo/tools/tfsec/parser.py index 47a6f0455d..fd6751cc53 100644 --- a/dojo/tools/tfsec/parser.py +++ b/dojo/tools/tfsec/parser.py @@ -31,33 +31,43 @@ def get_description_for_scan_types(self, scan_type): def get_findings(self, filename, test): data = json.load(filename) dupes = {} - if 'results' not in data: - raise ValueError("Incorrect TFSec scan, missing attribute 'results'") - if data.get('results') is None: + if "results" not in data: + raise ValueError( + "Incorrect TFSec scan, missing attribute 'results'" + ) + if data.get("results") is None: return list() - for item in data.get('results'): - if item.get('passed', None): + for item in data.get("results"): + if item.get("passed", None): continue - rule_id = item.get('rule_id') - rule_description = item.get('rule_description') - rule_provider = item.get('rule_provider') - file = item.get('location').get('filename') - start_line = item.get('location').get('start_line') - end_line = item.get('location').get('end_line') - description = '\n'.join(["Rule ID: " + rule_id, item.get('description')]) - impact = item.get('impact') - resolution = item.get('resolution') - if item.get('links', None) is not None: - references = '\n'.join(item.get('links')) + rule_id = item.get("rule_id") + rule_description = item.get("rule_description") + rule_provider = item.get("rule_provider") + file = item.get("location").get("filename") + start_line = item.get("location").get("start_line") + end_line = item.get("location").get("end_line") + description = "\n".join( + ["Rule ID: " + rule_id, item.get("description")] + ) + impact = item.get("impact") + resolution = item.get("resolution") + if item.get("links", None) is not None: + references = "\n".join(item.get("links")) else: - references = item.get('link', None) - if item.get('severity').upper() in self.SEVERITY: - severity = self.SEVERITY[item.get('severity').upper()] + references = item.get("link", None) + if item.get("severity").upper() in self.SEVERITY: + severity = self.SEVERITY[item.get("severity").upper()] else: severity = "Low" dupe_key = hashlib.sha256( - (rule_provider + rule_id + file + str(start_line) + str(end_line)).encode('utf-8') + ( + rule_provider + + rule_id + + file + + str(start_line) + + str(end_line) + ).encode("utf-8") ).hexdigest() if dupe_key in dupes: diff --git a/dojo/tools/trivy/parser.py b/dojo/tools/trivy/parser.py index 40b08aa38e..e0dfc5c20b 100644 --- a/dojo/tools/trivy/parser.py +++ b/dojo/tools/trivy/parser.py @@ -45,7 +45,6 @@ class TrivyParser: - def get_scan_types(self): return ["Trivy Scan"] @@ -56,12 +55,11 @@ def get_description_for_scan_types(self, scan_type): return "Import trivy JSON scan report." def get_findings(self, scan_file, test): - scan_data = scan_file.read() try: - data = json.loads(str(scan_data, 'utf-8')) - except: + data = json.loads(str(scan_data, "utf-8")) + except Exception: data = json.loads(scan_data) # Legacy format is empty @@ -71,96 +69,107 @@ def get_findings(self, scan_file, test): elif isinstance(data, list): return self.get_result_items(test, data) else: - schema_version = data.get('SchemaVersion', None) - cluster_name = data.get('ClusterName') + schema_version = data.get("SchemaVersion", None) + cluster_name = data.get("ClusterName") if schema_version == 2: - results = data.get('Results', []) + results = data.get("Results", []) return self.get_result_items(test, results) elif cluster_name: findings = list() - vulnerabilities = data.get('Vulnerabilities', []) + vulnerabilities = data.get("Vulnerabilities", []) for service in vulnerabilities: - namespace = service.get('Namespace') - kind = service.get('Kind') - name = service.get('Name') - service_name = '' + namespace = service.get("Namespace") + kind = service.get("Kind") + name = service.get("Name") + service_name = "" if namespace: - service_name = f'{namespace} / ' + service_name = f"{namespace} / " if kind: - service_name += f'{kind} / ' + service_name += f"{kind} / " if name: - service_name += f'{name} / ' + service_name += f"{name} / " if len(service_name) >= 3: service_name = service_name[:-3] - findings += self.get_result_items(test, service.get('Results', []), service_name) - misconfigurations = data.get('Misconfigurations', []) + findings += self.get_result_items( + test, service.get("Results", []), service_name + ) + misconfigurations = data.get("Misconfigurations", []) for service in misconfigurations: - namespace = service.get('Namespace') - kind = service.get('Kind') - name = service.get('Name') - service_name = '' + namespace = service.get("Namespace") + kind = service.get("Kind") + name = service.get("Name") + service_name = "" if namespace: - service_name = f'{namespace} / ' + service_name = f"{namespace} / " if kind: - service_name += f'{kind} / ' + service_name += f"{kind} / " if name: - service_name += f'{name} / ' + service_name += f"{name} / " if len(service_name) >= 3: service_name = service_name[:-3] - findings += self.get_result_items(test, service.get('Results', []), service_name) + findings += self.get_result_items( + test, service.get("Results", []), service_name + ) return findings else: - raise ValueError('Schema of Trivy json report is not supported') + raise ValueError( + "Schema of Trivy json report is not supported" + ) def get_result_items(self, test, results, service_name=None): items = list() for target_data in results: - if not isinstance(target_data, dict) or 'Target' not in target_data: + if ( + not isinstance(target_data, dict) + or "Target" not in target_data + ): continue - target = target_data['Target'] + target = target_data["Target"] - target_target = target_data.get('Target') - target_class = target_data.get('Class') - target_type = target_data.get('Type') + target_target = target_data.get("Target") + target_class = target_data.get("Class") + target_type = target_data.get("Type") - vulnerabilities = target_data.get('Vulnerabilities', []) or [] + vulnerabilities = target_data.get("Vulnerabilities", []) or [] for vuln in vulnerabilities: if not isinstance(vuln, dict): continue try: - vuln_id = vuln.get('VulnerabilityID', '0') - package_name = vuln['PkgName'] - severity = TRIVY_SEVERITIES[vuln['Severity']] - file_path = vuln.get('PkgPath') + vuln_id = vuln.get("VulnerabilityID", "0") + package_name = vuln["PkgName"] + severity = TRIVY_SEVERITIES[vuln["Severity"]] + file_path = vuln.get("PkgPath") except KeyError as exc: - logger.warning('skip vulnerability due %r', exc) + logger.warning("skip vulnerability due %r", exc) continue - package_version = vuln.get('InstalledVersion', '') - references = '\n'.join(vuln.get('References', [])) - mitigation = vuln.get('FixedVersion', '') - if len(vuln.get('CweIDs', [])) > 0: - cwe = int(vuln['CweIDs'][0].split("-")[1]) + package_version = vuln.get("InstalledVersion", "") + references = "\n".join(vuln.get("References", [])) + mitigation = vuln.get("FixedVersion", "") + if len(vuln.get("CweIDs", [])) > 0: + cwe = int(vuln["CweIDs"][0].split("-")[1]) else: cwe = 0 - type = target_data.get('Type', '') - title = ' '.join([ - vuln_id, - package_name, - package_version, - ]) + type = target_data.get("Type", "") + title = " ".join( + [ + vuln_id, + package_name, + package_version, + ] + ) description = DESCRIPTION_TEMPLATE.format( - title=vuln.get('Title', ''), + title=vuln.get("Title", ""), target=target, type=type, fixed_version=mitigation, - description_text=vuln.get('Description', ''), + description_text=vuln.get("Description", ""), ) - cvss = vuln.get('CVSS', None) + cvss = vuln.get("CVSS", None) cvssv3 = None if cvss is not None: - nvd = cvss.get('nvd', None) + nvd = cvss.get("nvd", None) if nvd is not None: - cvssv3 = nvd.get('V3Vector', None) + cvssv3 = nvd.get("V3Vector", None) finding = Finding( test=test, title=title, @@ -184,19 +193,19 @@ def get_result_items(self, test, results, service_name=None): items.append(finding) - misconfigurations = target_data.get('Misconfigurations', []) + misconfigurations = target_data.get("Misconfigurations", []) for misconfiguration in misconfigurations: - misc_type = misconfiguration.get('Type') - misc_id = misconfiguration.get('ID') - misc_title = misconfiguration.get('Title') - misc_description = misconfiguration.get('Description') - misc_message = misconfiguration.get('Message') - misc_resolution = misconfiguration.get('Resolution') - misc_severity = misconfiguration.get('Severity') - misc_primary_url = misconfiguration.get('PrimaryURL') - misc_references = misconfiguration.get('References', []) - - title = f'{misc_id} - {misc_title}' + misc_type = misconfiguration.get("Type") + misc_id = misconfiguration.get("ID") + misc_title = misconfiguration.get("Title") + misc_description = misconfiguration.get("Description") + misc_message = misconfiguration.get("Message") + misc_resolution = misconfiguration.get("Resolution") + misc_severity = misconfiguration.get("Severity") + misc_primary_url = misconfiguration.get("PrimaryURL") + misc_references = misconfiguration.get("References", []) + + title = f"{misc_id} - {misc_title}" description = MISC_DESCRIPTION_TEMPLATE.format( target=target_target, type=misc_type, @@ -206,13 +215,13 @@ def get_result_items(self, test, results, service_name=None): severity = TRIVY_SEVERITIES[misc_severity] references = None if misc_primary_url: - references = f'{misc_primary_url}\n' + references = f"{misc_primary_url}\n" if misc_primary_url in misc_references: misc_references.remove(misc_primary_url) if references: - references += '\n'.join(misc_references) + references += "\n".join(misc_references) else: - references = '\n'.join(misc_references) + references = "\n".join(misc_references) finding = Finding( test=test, @@ -228,15 +237,15 @@ def get_result_items(self, test, results, service_name=None): ) items.append(finding) - secrets = target_data.get('Secrets', []) + secrets = target_data.get("Secrets", []) for secret in secrets: - secret_category = secret.get('Category') - secret_title = secret.get('Title') - secret_severity = secret.get('Severity') - secret_match = secret.get('Match') - secret_start_line = secret.get('StartLine') + secret_category = secret.get("Category") + secret_title = secret.get("Title") + secret_severity = secret.get("Severity") + secret_match = secret.get("Match") + secret_start_line = secret.get("StartLine") - title = f'Secret detected in {target_target} - {secret_title}' + title = f"Secret detected in {target_target} - {secret_title}" description = SECRET_DESCRIPTION_TEMPLATE.format( title=secret_title, category=secret_category, @@ -258,17 +267,17 @@ def get_result_items(self, test, results, service_name=None): ) items.append(finding) - licenses = target_data.get('Licenses', []) + licenses = target_data.get("Licenses", []) for license in licenses: - license_severity = license.get('Severity') - license_category = license.get('Category') - license_pkgname = license.get('PkgName') - license_filepath = license.get('FilePath') - license_name = license.get('Name') - license_confidence = license.get('Confidence') - license_link = license.get('Link') - - title = f'License detected in {target_target} - {license_name}' + license_severity = license.get("Severity") + license_category = license.get("Category") + license_pkgname = license.get("PkgName") + license_filepath = license.get("FilePath") + license_name = license.get("Name") + license_confidence = license.get("Confidence") + license_link = license.get("Link") + + title = f"License detected in {target_target} - {license_name}" description = LICENSE_DESCRIPTION_TEMPLATE.format( title=license_name, category=license_category, diff --git a/dojo/tools/trivy_operator/parser.py b/dojo/tools/trivy_operator/parser.py index 99c1677b3f..4e1cadda7a 100644 --- a/dojo/tools/trivy_operator/parser.py +++ b/dojo/tools/trivy_operator/parser.py @@ -28,7 +28,6 @@ class TrivyOperatorParser: - def get_scan_types(self): return ["Trivy Operator Scan"] @@ -42,50 +41,53 @@ def get_findings(self, scan_file, test): scan_data = scan_file.read() try: - data = json.loads(str(scan_data, 'utf-8')) - except: + data = json.loads(str(scan_data, "utf-8")) + except Exception: data = json.loads(scan_data) if data is None: return list() - metadata = data.get('metadata', None) + metadata = data.get("metadata", None) if metadata is None: return list() - labels = metadata.get('labels', None) + labels = metadata.get("labels", None) if labels is None: return list() - resource_namespace = labels.get('trivy-operator.resource.namespace', '') - resource_kind = labels.get('trivy-operator.resource.kind', '') - resource_name = labels.get('trivy-operator.resource.name', '') - container_name = labels.get('trivy-operator.container.name', '') - service = '/'.join([resource_namespace, resource_kind, resource_name]) - if container_name != '': - service = '/'.join([service, container_name]) - - report = data.get('report', None) + resource_namespace = labels.get( + "trivy-operator.resource.namespace", "" + ) + resource_kind = labels.get("trivy-operator.resource.kind", "") + resource_name = labels.get("trivy-operator.resource.name", "") + container_name = labels.get("trivy-operator.container.name", "") + service = "/".join([resource_namespace, resource_kind, resource_name]) + if container_name != "": + service = "/".join([service, container_name]) + + report = data.get("report", None) if report is None: return list() findings = list() - vulnerabilities = report.get('vulnerabilities', None) + vulnerabilities = report.get("vulnerabilities", None) if vulnerabilities is not None: for vulnerability in vulnerabilities: - vuln_id = vulnerability.get('vulnerabilityID', '0') - severity = TRIVY_SEVERITIES[vulnerability.get('severity')] - references = vulnerability.get('primaryLink') - mitigation = vulnerability.get('fixedVersion') - package_name = vulnerability.get('resource') - package_version = vulnerability.get('installedVersion') - cvssv3_score = vulnerability.get('score') + vuln_id = vulnerability.get("vulnerabilityID", "0") + severity = TRIVY_SEVERITIES[vulnerability.get("severity")] + references = vulnerability.get("primaryLink") + mitigation = vulnerability.get("fixedVersion") + package_name = vulnerability.get("resource") + package_version = vulnerability.get("installedVersion") + cvssv3_score = vulnerability.get("score") description = DESCRIPTION_TEMPLATE.format( - title=vulnerability.get('title'), - fixed_version=mitigation + title=vulnerability.get("title"), fixed_version=mitigation + ) + title = " ".join( + [ + vuln_id, + package_name, + package_version, + ] ) - title = ' '.join([ - vuln_id, - package_name, - package_version, - ]) finding = Finding( test=test, title=title, @@ -98,22 +100,26 @@ def get_findings(self, scan_file, test): description=description, static_finding=True, dynamic_finding=False, - service=service) + service=service, + ) if vuln_id: finding.unsaved_vulnerability_ids = [vuln_id] findings.append(finding) - checks = report.get('checks', None) + checks = report.get("checks", None) if checks is not None: for check in checks: - check_title = check.get('title') - check_severity = TRIVY_SEVERITIES[check.get('severity')] - check_id = check.get('checkID', '0') - check_references = '' + check_title = check.get("title") + check_severity = TRIVY_SEVERITIES[check.get("severity")] + check_id = check.get("checkID", "0") + check_references = "" if check_id != 0: - check_references = "https://avd.aquasec.com/misconfig/kubernetes/" + check_id.lower() - check_description = check.get('description', '') - title = f'{check_id} - {check_title}' + check_references = ( + "https://avd.aquasec.com/misconfig/kubernetes/" + + check_id.lower() + ) + check_description = check.get("description", "") + title = f"{check_id} - {check_title}" finding = Finding( test=test, title=title, @@ -122,22 +128,23 @@ def get_findings(self, scan_file, test): description=check_description, static_finding=True, dynamic_finding=False, - service=service) + service=service, + ) if check_id: finding.unsaved_vulnerability_ids = [check_id] findings.append(finding) - secrets = report.get('secrets', None) + secrets = report.get("secrets", None) if secrets is not None: for secret in secrets: - secret_title = secret.get('title') - secret_category = secret.get('category') - secret_match = secret.get('match', '') - secret_severity = TRIVY_SEVERITIES[secret.get('severity')] - secret_rule_id = secret.get('ruleID', '0') - secret_target = secret.get('target', '') - secret_references = secret.get('ruleID', '') - title = f'Secret detected in {secret_target} - {secret_title}' + secret_title = secret.get("title") + secret_category = secret.get("category") + secret_match = secret.get("match", "") + secret_severity = TRIVY_SEVERITIES[secret.get("severity")] + secret_rule_id = secret.get("ruleID", "0") + secret_target = secret.get("target", "") + secret_references = secret.get("ruleID", "") + title = f"Secret detected in {secret_target} - {secret_title}" secret_description = SECRET_DESCRIPTION_TEMPLATE.format( title=secret_title, category=secret_category, @@ -153,7 +160,8 @@ def get_findings(self, scan_file, test): file_path=secret_target, static_finding=True, dynamic_finding=False, - service=service) + service=service, + ) if secret_rule_id: finding.unsaved_vulnerability_ids = [secret_rule_id] findings.append(finding) diff --git a/dojo/tools/trufflehog/parser.py b/dojo/tools/trufflehog/parser.py index d825e1e70b..367b6ace54 100644 --- a/dojo/tools/trufflehog/parser.py +++ b/dojo/tools/trufflehog/parser.py @@ -5,7 +5,6 @@ class TruffleHogParser(object): - def get_scan_types(self): return ["Trufflehog Scan"] @@ -21,13 +20,13 @@ def get_findings(self, filename, test): if len(dict_strs) == 0: return [] try: - json_data = json.loads(str(dict_strs[0], 'utf-8')) - except: + json_data = json.loads(str(dict_strs[0], "utf-8")) + except Exception: json_data = json.loads(dict_strs[0]) - if 'SourceMetadata' in json_data: + if "SourceMetadata" in json_data: return self.get_findings_v3(dict_strs, test) - elif 'path' in json_data: + elif "path" in json_data: return self.get_findings_v2(dict_strs, test) else: return [] @@ -36,8 +35,8 @@ def get_findings_v2(self, data, test): dupes = {} for line in data: try: - json_data = json.loads(str(line, 'utf-8')) - except: + json_data = json.loads(str(line, "utf-8")) + except Exception: json_data = json.loads(line) file = json_data.get("path") @@ -45,8 +44,12 @@ def get_findings_v2(self, data, test): titleText = f"Hard Coded {reason} in: {file}" commit = json_data.get("commit") description = "**Commit:** " + str(commit).split("\n")[0] + "\n" - description += "```\n" + str(commit).replace('```', '\\`\\`\\`') + "\n```\n" - description += "**Commit Hash:** " + json_data.get("commitHash") + "\n" + description += ( + "```\n" + str(commit).replace("```", "\\`\\`\\`") + "\n```\n" + ) + description += ( + "**Commit Hash:** " + json_data.get("commitHash") + "\n" + ) description += "**Commit Date:** " + json_data.get("date") + "\n" description += "**Branch:** " + json_data.get("branch") + "\n" description += "**Reason:** " + json_data.get("reason") + "\n" @@ -60,9 +63,13 @@ def get_findings_v2(self, data, test): elif reason == "Generic Secret": severity = "Medium" - strings_found = "".join(string + "\n" for string in json_data.get("stringsFound")) + strings_found = "".join( + string + "\n" for string in json_data.get("stringsFound") + ) dupe_key = hashlib.md5((file + reason).encode("utf-8")).hexdigest() - description += "\n**Strings Found:**\n```" + strings_found + "```\n" + description += ( + "\n**Strings Found:**\n```" + strings_found + "```\n" + ) if dupe_key in dupes: finding = dupes[dupe_key] @@ -83,10 +90,11 @@ def get_findings_v2(self, data, test): references="N/A", file_path=file, line=0, # setting it to a fake value to activate deduplication - url='N/A', + url="N/A", dynamic_finding=False, static_finding=True, - nb_occurences=1) + nb_occurences=1, + ) dupes[dupe_key] = finding @@ -96,11 +104,11 @@ def get_findings_v3(self, data, test): dupes = {} for line in data: try: - json_data = json.loads(str(line, 'utf-8')) - except: + json_data = json.loads(str(line, "utf-8")) + except Exception: json_data = json.loads(line) - metadata = json_data.get('SourceMetadata', {}).get('Data', {}) + metadata = json_data.get("SourceMetadata", {}).get("Data", {}) # Get the source of the data source = {} source_data = {} @@ -140,18 +148,26 @@ def get_findings_v3(self, data, test): description += f"**Structured Data:**\n{self.walk_dict(structured_data)}\n" if extra_data: - description += f"**Extra Data:**\n{self.walk_dict(extra_data)}\n" + description += ( + f"**Extra Data:**\n{self.walk_dict(extra_data)}\n" + ) severity = "Critical" if not verified: - if "Oauth" in detector_name or "AWS" in detector_name or "Heroku" in detector_name: + if ( + "Oauth" in detector_name + or "AWS" in detector_name + or "Heroku" in detector_name + ): severity = "Critical" elif detector_name == "PrivateKey": severity = "High" elif detector_name == "Generic Secret": severity = "Medium" - dupe_key = hashlib.md5((file + detector_name).encode("utf-8")).hexdigest() + dupe_key = hashlib.md5( + (file + detector_name).encode("utf-8") + ).hexdigest() if dupe_key in dupes: finding = dupes[dupe_key] @@ -172,10 +188,11 @@ def get_findings_v3(self, data, test): references="N/A", file_path=file, line=line_number, # setting it to a fake value to activate deduplication - url='N/A', + url="N/A", dynamic_finding=False, static_finding=True, - nb_occurences=1) + nb_occurences=1, + ) dupes[dupe_key] = finding @@ -184,11 +201,13 @@ def get_findings_v3(self, data, test): def walk_dict(self, obj, tab_count=1): return_string = "" if obj: - tab_string = tab_count * '\t' + tab_string = tab_count * "\t" if isinstance(obj, dict): for key, value in obj.items(): if isinstance(value, dict): - return_string += self.walk_dict(value, tab_count=(tab_count + 1)) + return_string += self.walk_dict( + value, tab_count=(tab_count + 1) + ) continue else: return_string += f"{tab_string}{key}: {value}\n" diff --git a/dojo/tools/trufflehog3/parser.py b/dojo/tools/trufflehog3/parser.py index 1df57bc7bd..f723da3ff6 100644 --- a/dojo/tools/trufflehog3/parser.py +++ b/dojo/tools/trufflehog3/parser.py @@ -5,7 +5,6 @@ class TruffleHog3Parser(object): - def get_scan_types(self): return ["Trufflehog3 Scan"] @@ -21,12 +20,12 @@ def get_findings(self, filename, test): dupes = dict() for json_data in data: - if json_data.get('reason'): + if json_data.get("reason"): self.get_finding_legacy(json_data, test, dupes) - elif json_data.get('rule'): + elif json_data.get("rule"): self.get_finding_current(json_data, test, dupes) else: - raise ValueError('Format is not recognized for Trufflehog3') + raise ValueError("Format is not recognized for Trufflehog3") return list(dupes.values()) @@ -37,9 +36,17 @@ def get_finding_legacy(self, json_data, test, dupes): titleText = "Hard Coded " + reason + " in: " + file description = "" - description = "**Commit:** " + str(json_data.get("commit")).split("\n")[0] + "\n" - description += "\n```\n" + str(json_data.get("commit")).replace('```', '\\`\\`\\`') + "\n```\n" - description += "**Commit Hash:** " + str(json_data.get("commitHash")) + "\n" + description = ( + "**Commit:** " + str(json_data.get("commit")).split("\n")[0] + "\n" + ) + description += ( + "\n```\n" + + str(json_data.get("commit")).replace("```", "\\`\\`\\`") + + "\n```\n" + ) + description += ( + "**Commit Hash:** " + str(json_data.get("commitHash")) + "\n" + ) description += "**Commit Date:** " + json_data["date"] + "\n" description += "**Branch:** " + str(json_data.get("branch")) + "\n" description += "**Reason:** " + json_data["reason"] + "\n" @@ -58,7 +65,9 @@ def get_finding_legacy(self, json_data, test, dupes): strings_found += string + "\n" dupe_key = hashlib.md5((file + reason).encode("utf-8")).hexdigest() - description += "\n**Strings Found:**\n```\n" + strings_found + "\n```\n" + description += ( + "\n**Strings Found:**\n```\n" + strings_found + "\n```\n" + ) if dupe_key in dupes: finding = dupes[dupe_key] @@ -68,80 +77,93 @@ def get_finding_legacy(self, json_data, test, dupes): else: dupes[dupe_key] = True - finding = Finding(title=titleText, - test=test, - cwe=798, - description=description, - severity=severity, - mitigation="Secrets and passwords should be stored in a secure vault and/or secure storage.", - impact="This weakness can lead to the exposure of resources or functionality to unintended actors, possibly providing attackers with sensitive information or even execute arbitrary code.", - file_path=file, - line=0, # setting it to a fake value to activate deduplication - dynamic_finding=False, - static_finding=True, - nb_occurences=1) + finding = Finding( + title=titleText, + test=test, + cwe=798, + description=description, + severity=severity, + mitigation="Secrets and passwords should be stored in a secure vault and/or secure storage.", + impact="This weakness can lead to the exposure of resources or functionality to unintended actors, possibly providing attackers with sensitive information or even execute arbitrary code.", + file_path=file, + line=0, # setting it to a fake value to activate deduplication + dynamic_finding=False, + static_finding=True, + nb_occurences=1, + ) dupes[dupe_key] = finding def get_finding_current(self, json_data, test, dupes): - message = json_data['rule'].get('message') - severity = json_data['rule'].get('severity') + message = json_data["rule"].get("message") + severity = json_data["rule"].get("severity") if severity: severity = severity.capitalize() - file = json_data.get('path') - line = json_data.get('line') + file = json_data.get("path") + line = json_data.get("line") if line: line = int(line) else: line = 0 - secret = json_data.get('secret') - context = json_data.get('context') - id = json_data.get('id') - branch = json_data.get('branch') - commit_message = json_data.get('message') + secret = json_data.get("secret") + context = json_data.get("context") + json_data.get("id") + branch = json_data.get("branch") + commit_message = json_data.get("message") # Author will not be used because of GDPR # author = json_data.get('author') - commit = json_data.get('commit') - date = json_data.get('date') + commit = json_data.get("commit") + date = json_data.get("date") - title = f'{message} found in {file}' + title = f"{message} found in {file}" - description = f'**Secret:** {secret}\n' + description = f"**Secret:** {secret}\n" if context: - description += '**Context:**\n' + description += "**Context:**\n" for key in context: - description += f' {key}: {context[key]}\n' + description += f" {key}: {context[key]}\n" if branch: - description += f'**Branch:** {branch}\n' + description += f"**Branch:** {branch}\n" if commit_message: if len(commit_message.split("\n")) > 1: - description += "**Commit message:** " + "\n```\n" + commit_message.replace('```', '\\`\\`\\`') + "\n```\n" + description += ( + "**Commit message:** " + + "\n```\n" + + commit_message.replace("```", "\\`\\`\\`") + + "\n```\n" + ) else: - description += f'**Commit message:** {commit_message}\n' + description += f"**Commit message:** {commit_message}\n" if commit: - description += f'**Commit hash:** {commit}\n' + description += f"**Commit hash:** {commit}\n" if date: - description += f'**Commit date:** {date}\n' - if description[-1] == '\n': + description += f"**Commit date:** {date}\n" + if description[-1] == "\n": description = description[:-1] - dupe_key = hashlib.md5((title + secret + severity + str(line)).encode("utf-8")).hexdigest() + dupe_key = hashlib.md5( + (title + secret + severity + str(line)).encode("utf-8") + ).hexdigest() if dupe_key in dupes: finding = dupes[dupe_key] - finding.description = finding.description + '\n\n***\n\n' + description + finding.description = ( + finding.description + "\n\n***\n\n" + description + ) finding.nb_occurences += 1 dupes[dupe_key] = finding else: - finding = Finding(title=title, - test=test, - cwe=798, - description=description, - severity=severity, - mitigation="Secrets and passwords should be stored in a secure vault or secure storage.", - impact="This weakness can lead to the exposure of resources or functionality to unintended actors, possibly providing attackers with sensitive information or even execute arbitrary code.", - file_path=file, - line=line, - dynamic_finding=False, - static_finding=True, - nb_occurences=1) + finding = Finding( + title=title, + test=test, + cwe=798, + description=description, + severity=severity, + mitigation="Secrets and passwords should be stored in a secure vault or secure storage.", + impact="This weakness can lead to the exposure of resources or functionality to unintended actors, possibly providing attackers with sensitive information or even execute arbitrary code.", + file_path=file, + line=line, + dynamic_finding=False, + static_finding=True, + nb_occurences=1, + ) dupes[dupe_key] = finding diff --git a/dojo/tools/trustwave/__init__.py b/dojo/tools/trustwave/__init__.py index 7c26aa40bd..518a4fc58f 100755 --- a/dojo/tools/trustwave/__init__.py +++ b/dojo/tools/trustwave/__init__.py @@ -1,3 +1,4 @@ -# Based on the generic csv so we are able to import Trustwave scans exported in CSV +# Based on the generic csv so we are able to import Trustwave scans +# exported in CSV -__author__ = 'ekelson' +__author__ = "ekelson" diff --git a/dojo/tools/trustwave/parser.py b/dojo/tools/trustwave/parser.py index 3d2cf6b75a..ae6cd859ca 100644 --- a/dojo/tools/trustwave/parser.py +++ b/dojo/tools/trustwave/parser.py @@ -6,7 +6,6 @@ class TrustwaveParser(object): - def get_scan_types(self): return ["Trustwave Scan (CSV)"] @@ -17,18 +16,19 @@ def get_description_for_scan_types(self, scan_type): return "CSV output of Trustwave vulnerability scan." def get_findings(self, filename, test): - content = filename.read() - if type(content) is bytes: - content = content.decode('utf-8') - reader = csv.DictReader(io.StringIO(content), delimiter=',', quotechar='"') + if isinstance(content, bytes): + content = content.decode("utf-8") + reader = csv.DictReader( + io.StringIO(content), delimiter=",", quotechar='"' + ) severity_mapping = { - 'I': 'Info', - 'L': 'Low', - 'M': 'Medium', - 'H': 'High', - 'C': 'Critical', + "I": "Info", + "L": "Low", + "M": "Medium", + "H": "High", + "C": "Critical", } dupes = {} @@ -37,31 +37,33 @@ def get_findings(self, filename, test): test=test, nb_occurences=1, ) - host = row.get('Domain') - if host is None or host == '': - host = row.get('IP') + host = row.get("Domain") + if host is None or host == "": + host = row.get("IP") finding.unsaved_endpoints = [Endpoint(host=host)] - if row.get('Port') is not None and not "" == row.get('Port'): - finding.unsaved_endpoints[0].port = int(row['Port']) - if row.get('Protocol') is not None and not "" == row.get('Protocol'): - finding.unsaved_endpoints[0].protocol = row['Protocol'] - finding.title = row['Vulnerability Name'] - finding.description = row['Description'] - finding.references = row.get('Evidence') - finding.mitigation = row.get('Remediation') + if row.get("Port") is not None and not "" == row.get("Port"): + finding.unsaved_endpoints[0].port = int(row["Port"]) + if row.get("Protocol") is not None and not "" == row.get( + "Protocol" + ): + finding.unsaved_endpoints[0].protocol = row["Protocol"] + finding.title = row["Vulnerability Name"] + finding.description = row["Description"] + finding.references = row.get("Evidence") + finding.mitigation = row.get("Remediation") # manage severity - if row['Severity'] in severity_mapping: - finding.severity = severity_mapping[row['Severity']] + if row["Severity"] in severity_mapping: + finding.severity = severity_mapping[row["Severity"]] else: - finding.severity = 'Low' - finding.unsaved_vulnerability_ids = [row.get('CVE')] + finding.severity = "Low" + finding.unsaved_vulnerability_ids = [row.get("CVE")] - dupes_key = hashlib.sha256("|".join([ - finding.severity, - finding.title, - finding.description - ]).encode()).hexdigest() + dupes_key = hashlib.sha256( + "|".join( + [finding.severity, finding.title, finding.description] + ).encode() + ).hexdigest() if dupes_key in dupes: dupes[dupes_key].nb_occurences += 1 diff --git a/dojo/tools/trustwave_fusion_api/parser.py b/dojo/tools/trustwave_fusion_api/parser.py index f8a5946df6..14701d2c9a 100644 --- a/dojo/tools/trustwave_fusion_api/parser.py +++ b/dojo/tools/trustwave_fusion_api/parser.py @@ -17,7 +17,9 @@ def get_label_for_scan_types(self, scan_type): return "Trustwave Fusion API Scan" def get_description_for_scan_types(self, scan_type): - return "Trustwave Fusion API report file can be imported in JSON format" + return ( + "Trustwave Fusion API report file can be imported in JSON format" + ) def get_findings(self, file, test): tree = json.load(file) @@ -28,13 +30,15 @@ def get_findings(self, file, test): item = get_item(node, test) item_key = hashlib.sha256( - "|".join([item.severity, item.title, - item.description]).encode() + "|".join( + [item.severity, item.title, item.description] + ).encode() ).hexdigest() if item_key in items: items[item_key].unsaved_endpoints.extend( - item.unsaved_endpoints) + item.unsaved_endpoints + ) items[item_key].nb_occurences += 1 else: items[item_key] = item @@ -68,23 +72,27 @@ def get_item(vuln, test): if "url" in location and location["url"] and location["url"] != "None": endpoint = Endpoint.from_uri(location["url"]) # fallback to using old way of creating endpoints - elif "domain" in location and location["domain"] and location["domain"] != "None": + elif ( + "domain" in location + and location["domain"] + and location["domain"] != "None" + ): endpoint = Endpoint(host=str(location["domain"])) else: # no domain, use ip instead if "ip" in location and location["ip"] and location["ip"] != "None": endpoint = Endpoint(host=str(location["ip"])) # check for protocol if ( - "applicationProtocol" in location and - location["applicationProtocol"] and - location["applicationProtocol"] != "None" + "applicationProtocol" in location + and location["applicationProtocol"] + and location["applicationProtocol"] != "None" ): endpoint.protocol = location["applicationProtocol"] # check for port if ( - "port" in location and - location["port"] in location and - location["port"] != "None" + "port" in location + and location["port"] in location + and location["port"] != "None" ): endpoint.port = location["port"] finding.unsaved_endpoints = [endpoint] # assigning endpoint @@ -112,17 +120,19 @@ def get_item(vuln, test): # Component Name and Version if ( - "applicationCpe" in location and - location["applicationCpe"] and - location["applicationCpe"] != "None" + "applicationCpe" in location + and location["applicationCpe"] + and location["applicationCpe"] != "None" ): cpe = CPE(location["applicationCpe"]) - component_name = cpe.get_vendor()[0] + ":" if len( - cpe.get_vendor()) > 0 else "" + component_name = ( + cpe.get_vendor()[0] + ":" if len(cpe.get_vendor()) > 0 else "" + ) - component_name += cpe.get_product()[0] if len( - cpe.get_product()) > 0 else "" + component_name += ( + cpe.get_product()[0] if len(cpe.get_product()) > 0 else "" + ) finding.component_name = component_name if component_name else None finding.component_version = ( diff --git a/dojo/tools/twistlock/parser.py b/dojo/tools/twistlock/parser.py index dc7b926076..2c8a3e335d 100644 --- a/dojo/tools/twistlock/parser.py +++ b/dojo/tools/twistlock/parser.py @@ -11,22 +11,27 @@ class TwistlockCSVParser(object): - def parse_issue(self, row, test): if not row: return None - data_vulnerability_id = row.get('CVE ID', '') - data_package_version = row.get('Package Version', '') - data_fix_status = row.get('Fix Status', '') - data_package_name = row.get('Packages', '') - data_id = row.get('Id', '') - data_severity = row.get('Severity', '') - data_cvss = row.get('CVSS', '') - data_description = description_column = row.get('Description', '') + data_vulnerability_id = row.get("CVE ID", "") + data_package_version = row.get("Package Version", "") + data_fix_status = row.get("Fix Status", "") + data_package_name = row.get("Packages", "") + row.get("Id", "") + data_severity = row.get("Severity", "") + data_cvss = row.get("CVSS", "") + data_description = description_column = row.get("Description", "") if data_vulnerability_id and data_package_name: - title = data_vulnerability_id + ": " + data_package_name + " - " + data_package_version + title = ( + data_vulnerability_id + + ": " + + data_package_name + + " - " + + data_package_version + ) elif data_package_name and data_package_version: title = data_package_name + " - " + data_package_version else: @@ -36,18 +41,26 @@ def parse_issue(self, row, test): title=textwrap.shorten(title, width=255, placeholder="..."), test=test, severity=convert_severity(data_severity), - description=data_description + "

    Vulnerable Package: " + - data_package_name + "

    Current Version: " + str( - data_package_version) + "

    ", + description=data_description + + "

    Vulnerable Package: " + + data_package_name + + "

    Current Version: " + + str(data_package_version) + + "

    ", mitigation=data_fix_status, - component_name=textwrap.shorten(data_package_name, width=200, placeholder="..."), + component_name=textwrap.shorten( + data_package_name, width=200, placeholder="..." + ), component_version=data_package_version, false_p=False, duplicate=False, out_of_scope=False, mitigated=None, - severity_justification="(CVSS v3 base score: {})".format(data_cvss), - impact=data_severity) + severity_justification="(CVSS v3 base score: {})".format( + data_cvss + ), + impact=data_severity, + ) finding.description = finding.description.strip() if data_vulnerability_id: finding.unsaved_vulnerability_ids = [data_vulnerability_id] @@ -59,13 +72,23 @@ def parse(self, filename, test): return content = filename.read() dupes = dict() - if type(content) is bytes: - content = content.decode('utf-8') - reader = csv.DictReader(io.StringIO(content), delimiter=',', quotechar='"') + if isinstance(content, bytes): + content = content.decode("utf-8") + reader = csv.DictReader( + io.StringIO(content), delimiter=",", quotechar='"' + ) for row in reader: finding = self.parse_issue(row, test) if finding is not None: - key = hashlib.md5((finding.severity + '|' + finding.title + '|' + finding.description).encode('utf-8')).hexdigest() + key = hashlib.md5( + ( + finding.severity + + "|" + + finding.title + + "|" + + finding.description + ).encode("utf-8") + ).hexdigest() if key not in dupes: dupes[key] = finding return list(dupes.values()) @@ -83,74 +106,106 @@ def parse_json(self, json_output): try: data = json_output.read() try: - tree = json.loads(str(data, 'utf-8')) - except: + tree = json.loads(str(data, "utf-8")) + except Exception: tree = json.loads(data) - except: + except Exception: raise ValueError("Invalid format") return tree def get_items(self, tree, test): items = {} - if 'results' in tree: - vulnerabilityTree = tree['results'][0].get('vulnerabilities', []) + if "results" in tree: + vulnerabilityTree = tree["results"][0].get("vulnerabilities", []) for node in vulnerabilityTree: item = get_item(node, test) - unique_key = node['id'] + str(node['packageName'] + str( - node['packageVersion']) + str(node['severity'])) + unique_key = node["id"] + str( + node["packageName"] + + str(node["packageVersion"]) + + str(node["severity"]) + ) items[unique_key] = item return list(items.values()) def get_item(vulnerability, test): - severity = convert_severity(vulnerability['severity']) if 'severity' in vulnerability else "Info" - vector = vulnerability['vector'] if 'vector' in vulnerability else "CVSS vector not provided. " - status = vulnerability['status'] if 'status' in vulnerability else "There seems to be no fix yet. Please check description field." - cvss = vulnerability['cvss'] if 'cvss' in vulnerability else "No CVSS score yet." - riskFactors = vulnerability['riskFactors'] if 'riskFactors' in vulnerability else "No risk factors." + severity = ( + convert_severity(vulnerability["severity"]) + if "severity" in vulnerability + else "Info" + ) + vector = ( + vulnerability["vector"] + if "vector" in vulnerability + else "CVSS vector not provided. " + ) + status = ( + vulnerability["status"] + if "status" in vulnerability + else "There seems to be no fix yet. Please check description field." + ) + cvss = ( + vulnerability["cvss"] + if "cvss" in vulnerability + else "No CVSS score yet." + ) + riskFactors = ( + vulnerability["riskFactors"] + if "riskFactors" in vulnerability + else "No risk factors." + ) # create the finding object finding = Finding( - title=vulnerability['id'] + ": " + vulnerability['packageName'] + " - " + vulnerability['packageVersion'], + title=vulnerability["id"] + + ": " + + vulnerability["packageName"] + + " - " + + vulnerability["packageVersion"], test=test, severity=severity, - description=vulnerability['description'] + "

    Vulnerable Package: " + - vulnerability['packageName'] + "

    Current Version: " + str( - vulnerability['packageVersion']) + "

    ", + description=vulnerability["description"] + + "

    Vulnerable Package: " + + vulnerability["packageName"] + + "

    Current Version: " + + str(vulnerability["packageVersion"]) + + "

    ", mitigation=status.title(), - references=vulnerability.get('link'), - component_name=vulnerability['packageName'], - component_version=vulnerability['packageVersion'], + references=vulnerability.get("link"), + component_name=vulnerability["packageName"], + component_version=vulnerability["packageVersion"], false_p=False, duplicate=False, out_of_scope=False, mitigated=None, - severity_justification="{} (CVSS v3 base score: {})\n\n{}".format(vector, cvss, riskFactors), - impact=severity) - finding.unsaved_vulnerability_ids = [vulnerability['id']] + severity_justification="{} (CVSS v3 base score: {})\n\n{}".format( + vector, cvss, riskFactors + ), + impact=severity, + ) + finding.unsaved_vulnerability_ids = [vulnerability["id"]] finding.description = finding.description.strip() return finding def convert_severity(severity): - if severity.lower() == 'important': + if severity.lower() == "important": return "High" - elif severity.lower() == 'moderate': + elif severity.lower() == "moderate": return "Medium" - elif severity.lower() == 'information': + elif severity.lower() == "information": return "Info" - elif severity.lower() == 'informational': + elif severity.lower() == "informational": return "Info" - elif severity == '': + elif severity == "": return "Info" else: return severity.title() class TwistlockParser(object): - def get_scan_types(self): return ["Twistlock Image Scan"] @@ -161,13 +216,12 @@ def get_description_for_scan_types(self, scan_type): return "JSON output of twistcli image scan or CSV." def get_findings(self, filename, test): - if filename is None: return list() - if filename.name.lower().endswith('.json'): + if filename.name.lower().endswith(".json"): return TwistlockJsonParser().parse(filename, test) - elif filename.name.lower().endswith('.csv'): + elif filename.name.lower().endswith(".csv"): return TwistlockCSVParser().parse(filename, test) else: - raise ValueError('Unknown File Format') + raise ValueError("Unknown File Format") diff --git a/dojo/tools/vcg/parser.py b/dojo/tools/vcg/parser.py index 80a0048c68..cabdbd2997 100644 --- a/dojo/tools/vcg/parser.py +++ b/dojo/tools/vcg/parser.py @@ -8,62 +8,62 @@ class VCGFinding(object): - def get_finding_severity(self): return self.priority_mapping[self.priority] def get_finding_detail(self): - finding_detail = '' + finding_detail = "" if self.severity is not None: - finding_detail = 'Severity: ' + self.severity + '\n' + finding_detail = "Severity: " + self.severity + "\n" if self.description is not None: - finding_detail += 'Description: ' + self.description + '\n' + finding_detail += "Description: " + self.description + "\n" if self.filename is not None: - finding_detail += 'FileName: ' + self.filename + '\n' + finding_detail += "FileName: " + self.filename + "\n" if self.line is not None: - finding_detail += 'Line: ' + self.line + '\n' + finding_detail += "Line: " + self.line + "\n" if self.code_line is not None: - finding_detail += 'CodeLine: ' + self.code_line + '\n' + finding_detail += "CodeLine: " + self.code_line + "\n" return finding_detail def to_finding(self, test): - return Finding( - title=self.title, - test=test, - description=self.get_finding_detail(), - severity=self.get_finding_severity(), + title=self.title, + test=test, + description=self.get_finding_detail(), + severity=self.get_finding_severity(), ) def __init__(self): self.priority = 6 - self.title = '' - self.severity = '' - self.description = '' - self.filename = '' - self.line = '' - self.code_line = '' + self.title = "" + self.severity = "" + self.description = "" + self.filename = "" + self.line = "" + self.code_line = "" self.priority_mapping = dict() - self.priority_mapping[1] = 'Critical' - self.priority_mapping[2] = 'High' - self.priority_mapping[3] = 'Medium' - self.priority_mapping[4] = 'Low' - self.priority_mapping[5] = 'Low' - self.priority_mapping[6] = 'Info' - self.priority_mapping[7] = 'Info' + self.priority_mapping[1] = "Critical" + self.priority_mapping[2] = "High" + self.priority_mapping[3] = "Medium" + self.priority_mapping[4] = "Low" + self.priority_mapping[5] = "Low" + self.priority_mapping[6] = "Info" + self.priority_mapping[7] = "Info" class VCGXmlParser(object): - @staticmethod def get_field_from_xml(issue, field): - if issue.find(field) is not None and issue.find(field).text is not None: + if ( + issue.find(field) is not None + and issue.find(field).text is not None + ): return issue.find(field).text else: return None @@ -72,31 +72,35 @@ def __init__(self): pass def parse_issue(self, issue, test): - if issue is None: return None data = VCGFinding() - if self.get_field_from_xml(issue, 'Priority') is None: + if self.get_field_from_xml(issue, "Priority") is None: data.priority = 6 else: - data.priority = int(float(self.get_field_from_xml(issue, 'Priority'))) - - data.title = '' if self.get_field_from_xml(issue, 'Title') is None else self.get_field_from_xml(issue, 'Title') - data.severity = self.get_field_from_xml(issue, 'Severity') - data.description = self.get_field_from_xml(issue, 'Description') - data.filename = self.get_field_from_xml(issue, 'FileName') + data.priority = int( + float(self.get_field_from_xml(issue, "Priority")) + ) + + data.title = ( + "" + if self.get_field_from_xml(issue, "Title") is None + else self.get_field_from_xml(issue, "Title") + ) + data.severity = self.get_field_from_xml(issue, "Severity") + data.description = self.get_field_from_xml(issue, "Description") + data.filename = self.get_field_from_xml(issue, "FileName") # data.file_path = self.get_field_from_xml(issue, 'FileName') - data.line = self.get_field_from_xml(issue, 'Line') - data.code_line = self.get_field_from_xml(issue, 'CodeLine') + data.line = self.get_field_from_xml(issue, "Line") + data.code_line = self.get_field_from_xml(issue, "CodeLine") # data.line = self.get_field_from_xml(issue, 'CodeLine') finding = data.to_finding(test) return finding def parse(self, content, test): - dupes = dict() if content is None: @@ -104,11 +108,19 @@ def parse(self, content, test): vcgscan = ElementTree.fromstring(content) - for issue in vcgscan.findall('CodeIssue'): + for issue in vcgscan.findall("CodeIssue"): finding = self.parse_issue(issue, test) if finding is not None: - key = hashlib.md5((finding.severity + '|' + finding.title + '|' + finding.description).encode('utf-8')).hexdigest() + key = hashlib.md5( + ( + finding.severity + + "|" + + finding.title + + "|" + + finding.description + ).encode("utf-8") + ).hexdigest() if key not in dupes: dupes[key] = finding @@ -117,7 +129,6 @@ def parse(self, content, test): class VCGCsvParser(object): - @staticmethod def get_field_from_row(row, column): if row[column] is not None: @@ -126,7 +137,6 @@ def get_field_from_row(row, column): return None def parse_issue(self, row, test): - if not row: return None @@ -141,14 +151,16 @@ def parse_issue(self, row, test): data = VCGFinding() if self.get_field_from_row(row, title_column) is None: - data.title = '' + data.title = "" else: data.title = self.get_field_from_row(row, title_column) if self.get_field_from_row(row, priority_column) is None: data.priority = 6 else: - data.priority = int(float(self.get_field_from_row(row, priority_column))) + data.priority = int( + float(self.get_field_from_row(row, priority_column)) + ) data.severity = self.get_field_from_row(row, severity_column) data.description = self.get_field_from_row(row, description_column) @@ -161,14 +173,22 @@ def parse_issue(self, row, test): def parse(self, content, test): dupes = dict() - if type(content) is bytes: - content = content.decode('utf-8') - reader = csv.reader(io.StringIO(content), delimiter=',', quotechar='"') + if isinstance(content, bytes): + content = content.decode("utf-8") + reader = csv.reader(io.StringIO(content), delimiter=",", quotechar='"') for row in reader: finding = self.parse_issue(row, test) if finding is not None: - key = hashlib.md5((finding.severity + '|' + finding.title + '|' + finding.description).encode('utf-8')).hexdigest() + key = hashlib.md5( + ( + finding.severity + + "|" + + finding.title + + "|" + + finding.description + ).encode("utf-8") + ).hexdigest() if key not in dupes: dupes[key] = finding @@ -192,15 +212,14 @@ def get_description_for_scan_types(self, scan_type): return "VCG output can be imported in CSV or Xml formats." def get_findings(self, filename, test): - if filename is None: return list() content = filename.read() - if filename.name.lower().endswith('.xml'): + if filename.name.lower().endswith(".xml"): return list(VCGXmlParser().parse(content, test).values()) - elif filename.name.lower().endswith('.csv'): + elif filename.name.lower().endswith(".csv"): return list(VCGCsvParser().parse(content, test).values()) else: - raise ValueError('Unknown File Format') + raise ValueError("Unknown File Format") diff --git a/dojo/tools/veracode/__init__.py b/dojo/tools/veracode/__init__.py index 369f2551a3..69e743a006 100644 --- a/dojo/tools/veracode/__init__.py +++ b/dojo/tools/veracode/__init__.py @@ -1 +1 @@ -__author__ = 'jay7958' +__author__ = "jay7958" diff --git a/dojo/tools/veracode/parser.py b/dojo/tools/veracode/parser.py index b2b377c2bc..a6ee3a38a2 100644 --- a/dojo/tools/veracode/parser.py +++ b/dojo/tools/veracode/parser.py @@ -7,7 +7,7 @@ from dojo.models import Finding from dojo.models import Endpoint -XML_NAMESPACE = {'x': 'https://www.veracode.com/schema/reports/export/1.0'} +XML_NAMESPACE = {"x": "https://www.veracode.com/schema/reports/export/1.0"} class VeracodeParser(object): @@ -18,11 +18,11 @@ class VeracodeParser(object): """ vc_severity_mapping = { - 1: 'Info', - 2: 'Low', - 3: 'Medium', - 4: 'High', - 5: 'Critical' + 1: "Info", + 2: "Low", + 3: "Medium", + 4: "High", + 5: "Critical", } def get_scan_types(self): @@ -37,63 +37,106 @@ def get_description_for_scan_types(self, scan_type): def get_findings(self, filename, test): root = ElementTree.parse(filename).getroot() - app_id = root.attrib['app_id'] - report_date = datetime.strptime(root.attrib['last_update_time'], '%Y-%m-%d %H:%M:%S %Z') + app_id = root.attrib["app_id"] + report_date = datetime.strptime( + root.attrib["last_update_time"], "%Y-%m-%d %H:%M:%S %Z" + ) dupes = dict() # Get SAST findings - # This assumes `` only exists within the `` nodes. - for category_node in root.findall('x:severity/x:category', namespaces=XML_NAMESPACE): - + # This assumes `` only exists within the `` + # nodes. + for category_node in root.findall( + "x:severity/x:category", namespaces=XML_NAMESPACE + ): # Mitigation text. - mitigation_text = '' - mitigation_text += category_node.find('x:recommendations/x:para', namespaces=XML_NAMESPACE).get('text') + "\n\n" + mitigation_text = "" + mitigation_text += ( + category_node.find( + "x:recommendations/x:para", namespaces=XML_NAMESPACE + ).get("text") + + "\n\n" + ) # Bullet list of recommendations: - mitigation_text += ''.join(list(map( - lambda x: ' * ' + x.get('text') + '\n', - category_node.findall('x:recommendations/x:para/x:bulletitem', namespaces=XML_NAMESPACE)))) - - for flaw_node in category_node.findall('x:cwe/x:staticflaws/x:flaw', namespaces=XML_NAMESPACE): - dupe_key = flaw_node.attrib['issueid'] + mitigation_text += "".join( + list( + map( + lambda x: " * " + x.get("text") + "\n", + category_node.findall( + "x:recommendations/x:para/x:bulletitem", + namespaces=XML_NAMESPACE, + ), + ) + ) + ) + + for flaw_node in category_node.findall( + "x:cwe/x:staticflaws/x:flaw", namespaces=XML_NAMESPACE + ): + dupe_key = flaw_node.attrib["issueid"] # Only process if we didn't do that before. if dupe_key not in dupes: # Add to list. - dupes[dupe_key] = self.__xml_static_flaw_to_finding(app_id, flaw_node, mitigation_text, test) + dupes[dupe_key] = self.__xml_static_flaw_to_finding( + app_id, flaw_node, mitigation_text, test + ) - for flaw_node in category_node.findall('x:cwe/x:dynamicflaws/x:flaw', namespaces=XML_NAMESPACE): - dupe_key = flaw_node.attrib['issueid'] + for flaw_node in category_node.findall( + "x:cwe/x:dynamicflaws/x:flaw", namespaces=XML_NAMESPACE + ): + dupe_key = flaw_node.attrib["issueid"] if dupe_key not in dupes: - dupes[dupe_key] = self.__xml_dynamic_flaw_to_finding(app_id, flaw_node, mitigation_text, test) + dupes[dupe_key] = self.__xml_dynamic_flaw_to_finding( + app_id, flaw_node, mitigation_text, test + ) # Get SCA findings - for component in root.findall('x:software_composition_analysis/x:vulnerable_components' - '/x:component', namespaces=XML_NAMESPACE): - _library = component.attrib['library'] - if 'library_id' in component.attrib and component.attrib['library_id'].startswith("maven:"): - # Set the library name from the maven component if it's available to align with CycloneDX + Veracode SCA - split_library_id = component.attrib['library_id'].split(":") + for component in root.findall( + "x:software_composition_analysis/x:vulnerable_components" + "/x:component", + namespaces=XML_NAMESPACE, + ): + _library = component.attrib["library"] + if "library_id" in component.attrib and component.attrib[ + "library_id" + ].startswith("maven:"): + # Set the library name from the maven component if it's + # available to align with CycloneDX + Veracode SCA + split_library_id = component.attrib["library_id"].split(":") if len(split_library_id) > 2: _library = split_library_id[2] - _vendor = component.attrib['vendor'] - _version = component.attrib['version'] - - for vulnerability in component.findall('x:vulnerabilities/x:vulnerability', namespaces=XML_NAMESPACE): - # We don't have a Id for SCA findings so just generate a random one - dupes[str(uuid.uuid4())] = self.__xml_sca_flaw_to_finding(test, report_date, _vendor, _library, _version, vulnerability) + _vendor = component.attrib["vendor"] + _version = component.attrib["version"] + + for vulnerability in component.findall( + "x:vulnerabilities/x:vulnerability", namespaces=XML_NAMESPACE + ): + # We don't have a Id for SCA findings so just generate a random + # one + dupes[str(uuid.uuid4())] = self.__xml_sca_flaw_to_finding( + test, + report_date, + _vendor, + _library, + _version, + vulnerability, + ) return list(dupes.values()) @classmethod def __xml_flaw_to_unique_id(cls, app_id, xml_node): - issue_id = xml_node.attrib['issueid'] - return 'app-' + app_id + '_issue-' + issue_id + issue_id = xml_node.attrib["issueid"] + return "app-" + app_id + "_issue-" + issue_id @classmethod def __xml_flaw_to_severity(cls, xml_node): - return cls.vc_severity_mapping.get(int(xml_node.attrib['severity']), 'Info') + return cls.vc_severity_mapping.get( + int(xml_node.attrib["severity"]), "Info" + ) @classmethod def __xml_flaw_to_finding(cls, app_id, xml_node, mitigation_text, test): @@ -103,48 +146,66 @@ def __xml_flaw_to_finding(cls, app_id, xml_node, mitigation_text, test): finding.mitigation = mitigation_text finding.static_finding = True finding.dynamic_finding = False - finding.unique_id_from_tool = cls.__xml_flaw_to_unique_id(app_id, xml_node) + finding.unique_id_from_tool = cls.__xml_flaw_to_unique_id( + app_id, xml_node + ) # Report values finding.severity = cls.__xml_flaw_to_severity(xml_node) - finding.cwe = int(xml_node.attrib['cweid']) - finding.title = xml_node.attrib['categoryname'] - finding.impact = 'CIA Impact: ' + xml_node.attrib['cia_impact'].upper() + finding.cwe = int(xml_node.attrib["cweid"]) + finding.title = xml_node.attrib["categoryname"] + finding.impact = "CIA Impact: " + xml_node.attrib["cia_impact"].upper() # Note that DD's legacy dedupe hashing uses the description field, - # so for compatibility, description field should contain very static info. - _description = xml_node.attrib['description'].replace('. ', '.\n') + # so for compatibility, description field should contain very static + # info. + _description = xml_node.attrib["description"].replace(". ", ".\n") finding.description = _description - _references = 'None' - if 'References:' in _description: - _references = _description[_description.index( - 'References:') + 13:].replace(') ', ')\n') - finding.references = _references \ - + "\n\nVulnerable Module: " + xml_node.attrib['module'] \ - + "\nType: " + xml_node.attrib['type'] \ - + "\nVeracode issue ID: " + xml_node.attrib['issueid'] + _references = "None" + if "References:" in _description: + _references = _description[ + _description.index("References:") + 13: + ].replace(") ", ")\n") + finding.references = ( + _references + + "\n\nVulnerable Module: " + + xml_node.attrib["module"] + + "\nType: " + + xml_node.attrib["type"] + + "\nVeracode issue ID: " + + xml_node.attrib["issueid"] + ) _date_found = test.target_start - if 'date_first_occurrence' in xml_node.attrib: + if "date_first_occurrence" in xml_node.attrib: _date_found = datetime.strptime( - xml_node.attrib['date_first_occurrence'], - '%Y-%m-%d %H:%M:%S %Z') + xml_node.attrib["date_first_occurrence"], + "%Y-%m-%d %H:%M:%S %Z", + ) finding.date = _date_found _is_mitigated = False _mitigated_date = None - if ('mitigation_status' in xml_node.attrib and - xml_node.attrib["mitigation_status"].lower() == "accepted"): - if ('remediation_status' in xml_node.attrib and - xml_node.attrib["remediation_status"].lower() == "fixed"): + if ( + "mitigation_status" in xml_node.attrib + and xml_node.attrib["mitigation_status"].lower() == "accepted" + ): + if ( + "remediation_status" in xml_node.attrib + and xml_node.attrib["remediation_status"].lower() == "fixed" + ): _is_mitigated = True else: # This happens if any mitigation (including 'Potential false positive') # was accepted in VC. - for mitigation in xml_node.findall("x:mitigations/x:mitigation", namespaces=XML_NAMESPACE): + for mitigation in xml_node.findall( + "x:mitigations/x:mitigation", namespaces=XML_NAMESPACE + ): _is_mitigated = True - _mitigated_date = datetime.strptime(mitigation.attrib['date'], '%Y-%m-%d %H:%M:%S %Z') + _mitigated_date = datetime.strptime( + mitigation.attrib["date"], "%Y-%m-%d %H:%M:%S %Z" + ) finding.is_mitigated = _is_mitigated finding.mitigated = _mitigated_date finding.active = not _is_mitigated @@ -155,45 +216,62 @@ def __xml_flaw_to_finding(cls, app_id, xml_node, mitigation_text, test): # level, not on the finding-level. _false_positive = False if _is_mitigated: - _remediation_status = xml_node.attrib['remediation_status'].lower() - if "false positive" in _remediation_status or "falsepositive" in _remediation_status: + _remediation_status = xml_node.attrib["remediation_status"].lower() + if ( + "false positive" in _remediation_status + or "falsepositive" in _remediation_status + ): _false_positive = True finding.false_p = _false_positive return finding @classmethod - def __xml_static_flaw_to_finding(cls, app_id, xml_node, mitigation_text, test): - finding = cls.__xml_flaw_to_finding(app_id, xml_node, mitigation_text, test) + def __xml_static_flaw_to_finding( + cls, app_id, xml_node, mitigation_text, test + ): + finding = cls.__xml_flaw_to_finding( + app_id, xml_node, mitigation_text, test + ) finding.static_finding = True finding.dynamic_finding = False - _line_number = xml_node.attrib['line'] - _functionrelativelocation = xml_node.attrib['functionrelativelocation'] - if (_line_number is not None and _line_number.isdigit() and - _functionrelativelocation is not None and _functionrelativelocation.isdigit()): + _line_number = xml_node.attrib["line"] + _functionrelativelocation = xml_node.attrib["functionrelativelocation"] + if ( + _line_number is not None + and _line_number.isdigit() + and _functionrelativelocation is not None + and _functionrelativelocation.isdigit() + ): finding.line = int(_line_number) + int(_functionrelativelocation) finding.sast_source_line = finding.line - _source_file = xml_node.attrib.get('sourcefile') - _sourcefilepath = xml_node.attrib.get('sourcefilepath') + _source_file = xml_node.attrib.get("sourcefile") + _sourcefilepath = xml_node.attrib.get("sourcefilepath") finding.file_path = _sourcefilepath + _source_file finding.sast_source_file_path = _sourcefilepath + _source_file - _sast_source_obj = xml_node.attrib.get('functionprototype') - finding.sast_source_object = _sast_source_obj if _sast_source_obj else None + _sast_source_obj = xml_node.attrib.get("functionprototype") + finding.sast_source_object = ( + _sast_source_obj if _sast_source_obj else None + ) finding.unsaved_tags = ["sast"] return finding @classmethod - def __xml_dynamic_flaw_to_finding(cls, app_id, xml_node, mitigation_text, test): - finding = cls.__xml_flaw_to_finding(app_id, xml_node, mitigation_text, test) + def __xml_dynamic_flaw_to_finding( + cls, app_id, xml_node, mitigation_text, test + ): + finding = cls.__xml_flaw_to_finding( + app_id, xml_node, mitigation_text, test + ) finding.static_finding = False finding.dynamic_finding = True - url_host = xml_node.attrib.get('url') + url_host = xml_node.attrib.get("url") finding.unsaved_endpoints = [Endpoint.from_uri(url_host)] finding.unsaved_tags = ["dast"] @@ -210,7 +288,9 @@ def _get_cwe(val): return None @classmethod - def __xml_sca_flaw_to_finding(cls, test, report_date, vendor, library, version, xml_node): + def __xml_sca_flaw_to_finding( + cls, test, report_date, vendor, library, version, xml_node + ): # Defaults finding = Finding() finding.test = test @@ -218,12 +298,14 @@ def __xml_sca_flaw_to_finding(cls, test, report_date, vendor, library, version, finding.dynamic_finding = False # Report values - cvss_score = float(xml_node.attrib['cvss_score']) + cvss_score = float(xml_node.attrib["cvss_score"]) finding.cvssv3_score = cvss_score finding.severity = cls.__xml_flaw_to_severity(xml_node) - finding.unsaved_vulnerability_ids = [xml_node.attrib['cve_id']] - finding.cwe = cls._get_cwe(xml_node.attrib['cwe_id']) - finding.title = "Vulnerable component: {0}:{1}".format(library, version) + finding.unsaved_vulnerability_ids = [xml_node.attrib["cve_id"]] + finding.cwe = cls._get_cwe(xml_node.attrib["cwe_id"]) + finding.title = "Vulnerable component: {0}:{1}".format( + library, version + ) finding.component_name = library finding.component_version = version @@ -231,30 +313,40 @@ def __xml_sca_flaw_to_finding(cls, test, report_date, vendor, library, version, # overwrite old matching SCA findings. finding.date = report_date - _description = 'This library has known vulnerabilities.\n' - _description += \ - "**CVE:** {0} ({1})\n" \ - "CVS Score: {2} ({3})\n" \ - "Summary: \n>{4}" \ - "\n\n-----\n\n".format( - xml_node.attrib['cve_id'], - xml_node.attrib.get('first_found_date'), - xml_node.attrib['cvss_score'], - cls.vc_severity_mapping.get(int(xml_node.attrib['severity']), 'Info'), - xml_node.attrib['cve_summary']) + _description = "This library has known vulnerabilities.\n" + _description += ( + "**CVE:** {0} ({1})\n" + "CVS Score: {2} ({3})\n" + "Summary: \n>{4}" + "\n\n-----\n\n".format( + xml_node.attrib["cve_id"], + xml_node.attrib.get("first_found_date"), + xml_node.attrib["cvss_score"], + cls.vc_severity_mapping.get( + int(xml_node.attrib["severity"]), "Info" + ), + xml_node.attrib["cve_summary"], + ) + ) finding.description = _description finding.unsaved_tags = ["sca"] _is_mitigated = False _mitigated_date = None - if ('mitigation' in xml_node.attrib and - xml_node.attrib["mitigation"].lower() == "true"): + if ( + "mitigation" in xml_node.attrib + and xml_node.attrib["mitigation"].lower() == "true" + ): # This happens if any mitigation (including 'Potential false positive') # was accepted in VC. - for mitigation in xml_node.findall("x:mitigations/x:mitigation", namespaces=XML_NAMESPACE): + for mitigation in xml_node.findall( + "x:mitigations/x:mitigation", namespaces=XML_NAMESPACE + ): _is_mitigated = True - _mitigated_date = datetime.strptime(mitigation.attrib['date'], '%Y-%m-%d %H:%M:%S %Z') + _mitigated_date = datetime.strptime( + mitigation.attrib["date"], "%Y-%m-%d %H:%M:%S %Z" + ) finding.is_mitigated = _is_mitigated finding.mitigated = _mitigated_date finding.active = not _is_mitigated diff --git a/dojo/tools/veracode_sca/parser.py b/dojo/tools/veracode_sca/parser.py index 7f0f1c697d..91aa433c37 100644 --- a/dojo/tools/veracode_sca/parser.py +++ b/dojo/tools/veracode_sca/parser.py @@ -12,13 +12,12 @@ class VeracodeScaParser(object): - vc_severity_mapping = { - 1: 'Info', - 2: 'Low', - 3: 'Medium', - 4: 'High', - 5: 'Critical' + 1: "Info", + 2: "Low", + 3: "Medium", + 4: "High", + 5: "Critical", } def get_scan_types(self): @@ -49,7 +48,7 @@ def _get_findings_json(self, file, test): return findings for issue in embedded.get("issues", []): - if issue.get('issue_type') != 'vulnerability': + if issue.get("issue_type") != "vulnerability": continue date = parser.parse(issue.get("created_date")) @@ -61,31 +60,36 @@ def _get_findings_json(self, file, test): vulnerability = issue.get("vulnerability") vuln_id = vulnerability.get("cve") - if vuln_id and not (vuln_id.startswith("cve") or vuln_id.startswith("CVE")): + if vuln_id and not ( + vuln_id.startswith("cve") or vuln_id.startswith("CVE") + ): vuln_id = "CVE-" + vuln_id cvss_score = issue.get("severity") if vulnerability.get("cvss3_score"): cvss_score = vulnerability.get("cvss3_score") severity = self.__cvss_to_severity(cvss_score) - description = \ - "Project name: {0}\n" \ - "Title: \n>{1}" \ + description = ( + "Project name: {0}\n" + "Title: \n>{1}" "\n\n-----\n\n".format( - issue.get("project_name"), - vulnerability.get('title')) - - finding = Finding(test=test, - title=f"{component_name}:{component_version} | {vuln_id}", - description=description, - severity=severity, - component_name=component_name, - component_version=component_version, - static_finding=True, - dynamic_finding=False, - unique_id_from_tool=issue.get("id"), - date=date, - nb_occurences=1) + issue.get("project_name"), vulnerability.get("title") + ) + ) + + finding = Finding( + test=test, + title=f"{component_name}:{component_version} | {vuln_id}", + description=description, + severity=severity, + component_name=component_name, + component_version=component_version, + static_finding=True, + dynamic_finding=False, + unique_id_from_tool=issue.get("id"), + date=date, + nb_occurences=1, + ) if vuln_id: finding.unsaved_vulnerability_ids = [vuln_id] @@ -106,10 +110,19 @@ def _get_findings_json(self, file, test): if cwe.isdigit(): finding.cwe = int(cwe) - finding.references = "\n\n" + issue.get("_links").get("html").get("href") - status = issue.get('issue_status') - if (issue.get('Ignored') and issue.get('Ignored').capitalize() == 'True' or - status and (status.capitalize() == 'Resolved' or status.capitalize() == 'Fixed')): + finding.references = "\n\n" + issue.get("_links").get("html").get( + "href" + ) + status = issue.get("issue_status") + if ( + issue.get("Ignored") + and issue.get("Ignored").capitalize() == "True" + or status + and ( + status.capitalize() == "Resolved" + or status.capitalize() == "Fixed" + ) + ): finding.is_mitigated = True finding.mitigated = timezone.now() finding.active = False @@ -120,9 +133,11 @@ def _get_findings_json(self, file, test): def get_findings_csv(self, file, test): content = file.read() - if type(content) is bytes: - content = content.decode('utf-8') - reader = csv.DictReader(io.StringIO(content), delimiter=',', quotechar='"') + if isinstance(content, bytes): + content = content.decode("utf-8") + reader = csv.DictReader( + io.StringIO(content), delimiter=",", quotechar='"' + ) csvarray = [] for row in reader: @@ -130,49 +145,60 @@ def get_findings_csv(self, file, test): findings = [] for row in csvarray: - if row.get('Issue type') != 'Vulnerability': + if row.get("Issue type") != "Vulnerability": continue - issueId = row.get('Issue ID', None) + issueId = row.get("Issue ID", None) if not issueId: # Workaround for possible encoding issue issueId = list(row.values())[0] - library = row.get('Library', None) - if row.get('Package manager') == 'MAVEN' and row.get('Coordinate 2'): - library = row.get('Coordinate 2') - version = row.get('Version in use', None) - vuln_id = row.get('CVE', None) - if vuln_id and not (vuln_id.startswith("cve") or vuln_id.startswith("CVE")): + library = row.get("Library", None) + if row.get("Package manager") == "MAVEN" and row.get( + "Coordinate 2" + ): + library = row.get("Coordinate 2") + version = row.get("Version in use", None) + vuln_id = row.get("CVE", None) + if vuln_id and not ( + vuln_id.startswith("cve") or vuln_id.startswith("CVE") + ): vuln_id = "CVE-" + vuln_id - severity = self.fix_severity(row.get('Severity', None)) - cvss_score = float(row.get('CVSS score', 0)) - date = datetime.strptime(row.get('Issue opened: Scan date'), '%d %b %Y %H:%M%p %Z') - description = \ - "Project name: {0}\n" \ - "Title: \n>{1}" \ - "\n\n-----\n\n".format( - row.get('Project'), - row.get('Title')) - - finding = Finding(test=test, - title=f"{library}:{version} | {vuln_id}", - description=description, - severity=severity, - component_name=library, - component_version=version, - static_finding=True, - dynamic_finding=False, - unique_id_from_tool=issueId, - date=date, - nb_occurences=1) + severity = self.fix_severity(row.get("Severity", None)) + cvss_score = float(row.get("CVSS score", 0)) + date = datetime.strptime( + row.get("Issue opened: Scan date"), "%d %b %Y %H:%M%p %Z" + ) + description = ( + "Project name: {0}\n" + "Title: \n>{1}" + "\n\n-----\n\n".format(row.get("Project"), row.get("Title")) + ) + + finding = Finding( + test=test, + title=f"{library}:{version} | {vuln_id}", + description=description, + severity=severity, + component_name=library, + component_version=version, + static_finding=True, + dynamic_finding=False, + unique_id_from_tool=issueId, + date=date, + nb_occurences=1, + ) finding.unsaved_vulnerability_ids = [vuln_id] if cvss_score: finding.cvssv3_score = cvss_score - if (row.get('Ignored') and row.get('Ignored').capitalize() == 'True' or - row.get('Status') and row.get('Status').capitalize() == 'Resolved'): + if ( + row.get("Ignored") + and row.get("Ignored").capitalize() == "True" + or row.get("Status") + and row.get("Status").capitalize() == "Resolved" + ): finding.is_mitigated = True finding.mitigated = timezone.now() finding.active = False diff --git a/dojo/tools/wapiti/parser.py b/dojo/tools/wapiti/parser.py index b94686b611..85925de990 100644 --- a/dojo/tools/wapiti/parser.py +++ b/dojo/tools/wapiti/parser.py @@ -30,38 +30,42 @@ def get_findings(self, file, test): # get root of tree. root = tree.getroot() # check if it is - if 'report' not in root.tag: - raise ValueError("This doesn't seem to be a valid Wapiti XML file.") + if "report" not in root.tag: + raise ValueError( + "This doesn't seem to be a valid Wapiti XML file." + ) severity_mapping = { - '4': 'Critical', - '3': 'High', - '2': 'Medium', - '1': 'Low', - '0': 'Info', + "4": "Critical", + "3": "High", + "2": "Medium", + "1": "Low", + "0": "Info", } url = root.findtext('report_infos/info[@name="target"]') dupes = dict() - for vulnerability in root.findall('vulnerabilities/vulnerability'): - category = vulnerability.attrib['name'] - description = vulnerability.findtext('description') - mitigation = vulnerability.findtext('solution') + for vulnerability in root.findall("vulnerabilities/vulnerability"): + category = vulnerability.attrib["name"] + description = vulnerability.findtext("description") + mitigation = vulnerability.findtext("solution") # manage references cwe = None references = [] - for reference in vulnerability.findall('references/reference'): - reference_title = reference.findtext('title') + for reference in vulnerability.findall("references/reference"): + reference_title = reference.findtext("title") if reference_title.startswith("CWE"): cwe = self.get_cwe(reference_title) - references.append(f"* [{reference_title}]({reference.findtext('url')})") + references.append( + f"* [{reference_title}]({reference.findtext('url')})" + ) references = "\n".join(references) - for entry in vulnerability.findall('entries/entry'): - title = category + ": " + entry.findtext('info') + for entry in vulnerability.findall("entries/entry"): + title = category + ": " + entry.findtext("info") # get numerical severity. - num_severity = entry.findtext('level') + num_severity = entry.findtext("level") if num_severity in severity_mapping: severity = severity_mapping[num_severity] else: @@ -81,10 +85,14 @@ def get_findings(self, file, test): finding.cwe = cwe finding.unsaved_endpoints = [Endpoint.from_uri(url)] - finding.unsaved_req_resp = [{"req": entry.findtext('http_request'), "resp": ""}] + finding.unsaved_req_resp = [ + {"req": entry.findtext("http_request"), "resp": ""} + ] # make dupe hash key - dupe_key = hashlib.sha256(str(description + title + severity).encode('utf-8')).hexdigest() + dupe_key = hashlib.sha256( + str(description + title + severity).encode("utf-8") + ).hexdigest() # check if dupes are present. if dupe_key in dupes: find = dupes[dupe_key] diff --git a/dojo/tools/wazuh/parser.py b/dojo/tools/wazuh/parser.py index 748b0b7306..b1ea19d836 100644 --- a/dojo/tools/wazuh/parser.py +++ b/dojo/tools/wazuh/parser.py @@ -32,30 +32,35 @@ def get_findings(self, filename, test): return list() for item in vulnerability: - if item['condition'] != "Package unfixed" and item['severity'] != "Untriaged": - id = item.get('cve') - package_name = item.get('name') - package_version = item.get('version') - description = item.get('condition') - if item.get('severity') == "Untriaged": + if ( + item["condition"] != "Package unfixed" + and item["severity"] != "Untriaged" + ): + id = item.get("cve") + package_name = item.get("name") + package_version = item.get("version") + description = item.get("condition") + if item.get("severity") == "Untriaged": severity = "Info" else: - severity = item.get('severity') - if item.get('status') == "VALID": + severity = item.get("severity") + if item.get("status") == "VALID": active = True else: active = False - links = item.get('external_references') - title = item.get('title') + " (version: " + package_version + ")" - severity = item.get('severity', 'info').capitalize() + links = item.get("external_references") + title = ( + item.get("title") + " (version: " + package_version + ")" + ) + severity = item.get("severity", "info").capitalize() if links: - references = '' + references = "" for link in links: - references += f'{link}\n' + references += f"{link}\n" else: references = None - if id and id.startswith('CVE'): + if id and id.startswith("CVE"): vulnerability_id = id else: vulnerability_id = None diff --git a/dojo/tools/wfuzz/parser.py b/dojo/tools/wfuzz/parser.py index 02a504f33f..271b7d208c 100644 --- a/dojo/tools/wfuzz/parser.py +++ b/dojo/tools/wfuzz/parser.py @@ -29,13 +29,11 @@ def get_description_for_scan_types(self, scan_type): return "Import WFuzz findings in JSON format." def get_findings(self, filename, test): - data = json.load(filename) dupes = {} for item in data: url = hyperlink.parse(item["url"]) - payload = item["payload"] return_code = str(item["code"]) severity = self.SEVERITY[return_code] description = f"The URL {url.to_text()} must not be exposed\n Please review your configuration\n" diff --git a/dojo/tools/whispers/parser.py b/dojo/tools/whispers/parser.py index a1648303bc..3d083cae8a 100644 --- a/dojo/tools/whispers/parser.py +++ b/dojo/tools/whispers/parser.py @@ -50,19 +50,22 @@ def get_findings(self, file, test): "Replace hardcoded secret with a placeholder (ie: ENV-VAR). " "Invalidate the leaked secret and generate a new one. " "Supply the new secret through a placeholder to avoid disclosing " - "sensitive information in code."), - references=Endpoint.from_uri("https://cwe.mitre.org/data/definitions/798.html"), + "sensitive information in code." + ), + references=Endpoint.from_uri( + "https://cwe.mitre.org/data/definitions/798.html" + ), cwe=798, severity=self.SEVERITY_MAP.get( - vuln.get("severity"), - "Info"), + vuln.get("severity"), "Info" + ), file_path=vuln.get("file"), - line=int( - vuln.get("line")), + line=int(vuln.get("line")), vuln_id_from_tool=vuln.get("message"), static_finding=True, dynamic_finding=False, test=test, - )) + ) + ) return findings diff --git a/dojo/tools/whitehat_sentinel/parser.py b/dojo/tools/whitehat_sentinel/parser.py index 900617755f..82596b33b8 100644 --- a/dojo/tools/whitehat_sentinel/parser.py +++ b/dojo/tools/whitehat_sentinel/parser.py @@ -25,18 +25,26 @@ def get_description_for_scan_types(self, scan_type): return "WhiteHat Sentinel output from api/vuln/query_site can be imported in JSON format." def get_findings(self, file, test): - findings_collection = json.load(file) if not findings_collection.keys(): return list() - # Make sure the findings key exists in the dictionary and that it is not null or an empty list - if 'collection' not in findings_collection.keys() or not findings_collection['collection']: - raise ValueError('collection key not present or there were not findings present.') - - # Convert a WhiteHat Vuln with Attack Vectors to a list of DefectDojo findings - dojo_findings = self._convert_whitehat_sentinel_vulns_to_dojo_finding(findings_collection['collection'], test) + # Make sure the findings key exists in the dictionary and that it is + # not null or an empty list + if ( + "collection" not in findings_collection.keys() + or not findings_collection["collection"] + ): + raise ValueError( + "collection key not present or there were not findings present." + ) + + # Convert a WhiteHat Vuln with Attack Vectors to a list of DefectDojo + # findings + dojo_findings = self._convert_whitehat_sentinel_vulns_to_dojo_finding( + findings_collection["collection"], test + ) # # Loop through each vuln from WhiteHat # for whitehat_vuln in findings_collection['collection']: @@ -45,14 +53,24 @@ def get_findings(self, file, test): # dojo_findings.append(dojo_finding) return dojo_findings - def _convert_whitehat_severity_id_to_dojo_severity(self, whitehat_severity_id: int) -> Union[str, None]: + def _convert_whitehat_severity_id_to_dojo_severity( + self, whitehat_severity_id: int + ) -> Union[str, None]: """ Converts a WhiteHat Sentinel numerical severity to a DefectDojo severity. Args: whitehat_severity_id: The WhiteHat Severity ID (called risk_id in the API) Returns: A DefectDojo severity if a mapping can be found; otherwise a null value is returned """ - severities = ['Informational', 'Informational', 'Low', 'Medium', 'High', 'Critical', 'Critical'] + severities = [ + "Informational", + "Informational", + "Low", + "Medium", + "High", + "Critical", + "Critical", + ] try: return severities[int(whitehat_severity_id)] @@ -67,8 +85,8 @@ def _parse_cwe_from_tags(self, whitehat_sentinel_tags) -> str: Returns: The first CWE ID in the list, if it exists """ for tag in whitehat_sentinel_tags: - if tag.startswith('CWE-'): - return tag.split('-')[1] + if tag.startswith("CWE-"): + return tag.split("-")[1] def _parse_description(self, whitehat_sentinel_description: dict): """ @@ -78,19 +96,26 @@ def _parse_description(self, whitehat_sentinel_description: dict): Returns: A dict with description and reference link """ - description_ref = {'description': '', 'reference_link': ''} + description_ref = {"description": "", "reference_link": ""} # The references section is always between

    or tags - reference_heading_regex = '<.+>References<.+>' + reference_heading_regex = "<.+>References<.+>" - description_chunks = re.split(reference_heading_regex, whitehat_sentinel_description['description']) + description_chunks = re.split( + reference_heading_regex, + whitehat_sentinel_description["description"], + ) description = description_chunks[0] - description_ref['description'] = self.__remove_paragraph_tags(description) + description_ref["description"] = self.__remove_paragraph_tags( + description + ) if len(description_chunks) > 1: - description_ref['reference_link'] = self.__get_href_url(description_chunks[1]) + description_ref["reference_link"] = self.__get_href_url( + description_chunks[1] + ) return description_ref @@ -103,15 +128,17 @@ def _parse_solution(self, whitehat_sentinel_vuln_solution): Returns: """ - solution_html = whitehat_sentinel_vuln_solution['solution'] + solution_html = whitehat_sentinel_vuln_solution["solution"] - solution_text = re.sub(r'<.+>', '', solution_html) + solution_text = re.sub(r"<.+>", "", solution_html) - solution_text = solution_text.split('References')[0] + solution_text = solution_text.split("References")[0] - if whitehat_sentinel_vuln_solution.get('solution_prepend'): - solution_text = f"{solution_text}" \ - f"\n {whitehat_sentinel_vuln_solution.get('solution_prepend')}" + if whitehat_sentinel_vuln_solution.get("solution_prepend"): + solution_text = ( + f"{solution_text}" + f"\n {whitehat_sentinel_vuln_solution.get('solution_prepend')}" + ) return solution_text @@ -123,10 +150,10 @@ def __get_href_url(self, text_to_search): Returns: """ - links = '' + links = "" for match in re.findall(r'(', text_to_search): - links = f'{match[1]}\n{links}' + links = f"{match[1]}\n{links}" return links def __remove_paragraph_tags(self, html_string): @@ -137,9 +164,11 @@ def __remove_paragraph_tags(self, html_string): Returns: The original string stipped of paragraph tags """ - return re.sub(r'

    |

    ', '', html_string) + return re.sub(r"

    |

    ", "", html_string) - def _convert_attack_vectors_to_endpoints(self, attack_vectors: List[dict]) -> List['Endpoint']: + def _convert_attack_vectors_to_endpoints( + self, attack_vectors: List[dict] + ) -> List["Endpoint"]: """ Takes a list of Attack Vectors dictionaries from the WhiteHat vuln API and converts them to Defect Dojo Endpoints @@ -152,11 +181,15 @@ def _convert_attack_vectors_to_endpoints(self, attack_vectors: List[dict]) -> Li # This should be in the Endpoint class should it not? for attack_vector in attack_vectors: - endpoints_list.append(Endpoint.from_uri(attack_vector['request']['url'])) + endpoints_list.append( + Endpoint.from_uri(attack_vector["request"]["url"]) + ) return endpoints_list - def _convert_whitehat_sentinel_vulns_to_dojo_finding(self, whitehat_sentinel_vulns: [dict], test: str): + def _convert_whitehat_sentinel_vulns_to_dojo_finding( + self, whitehat_sentinel_vulns: [dict], test: str + ): """ Converts a WhiteHat Sentinel vuln to a DefectDojo finding @@ -168,26 +201,38 @@ def _convert_whitehat_sentinel_vulns_to_dojo_finding(self, whitehat_sentinel_vul dupes = dict() for whitehat_vuln in whitehat_sentinel_vulns: - - date_created = whitehat_vuln['found'].split('T')[0] - mitigated_ts = whitehat_vuln.get('closed'.split('T')[0], None) - cwe = self._parse_cwe_from_tags(whitehat_vuln['attack_vectors'][0].get('scanner_tags', [])) - description_ref = self._parse_description(whitehat_vuln['description']) - description = description_ref['description'] - references = f"https://source.whitehatsec.com/asset-management/site" \ - f"-summary/{whitehat_vuln['site']}/findings/{whitehat_vuln['id']}" \ - f"\n{description_ref['reference_link']}" - steps = whitehat_vuln['description'].get('description_prepend', '') - solution = self._parse_solution(whitehat_vuln['solution']) - risk_id = whitehat_vuln.get('custom_risk') if whitehat_vuln.get( - 'custom_risk') else whitehat_vuln.get('risk') - severity = self._convert_whitehat_severity_id_to_dojo_severity(risk_id) - false_positive = whitehat_vuln.get('status') == 'invalid' - - active = whitehat_vuln.get('status') in ('open') + date_created = whitehat_vuln["found"].split("T")[0] + mitigated_ts = whitehat_vuln.get("closed".split("T")[0], None) + cwe = self._parse_cwe_from_tags( + whitehat_vuln["attack_vectors"][0].get("scanner_tags", []) + ) + description_ref = self._parse_description( + whitehat_vuln["description"] + ) + description = description_ref["description"] + references = ( + f"https://source.whitehatsec.com/asset-management/site" + f"-summary/{whitehat_vuln['site']}/findings/{whitehat_vuln['id']}" + f"\n{description_ref['reference_link']}" + ) + steps = whitehat_vuln["description"].get("description_prepend", "") + solution = self._parse_solution(whitehat_vuln["solution"]) + risk_id = ( + whitehat_vuln.get("custom_risk") + if whitehat_vuln.get("custom_risk") + else whitehat_vuln.get("risk") + ) + severity = self._convert_whitehat_severity_id_to_dojo_severity( + risk_id + ) + false_positive = whitehat_vuln.get("status") == "invalid" + + active = whitehat_vuln.get("status") in ("open") is_mitigated = not active - dupe_key = hashlib.md5(whitehat_vuln["id"].encode("utf-8")).hexdigest() + dupe_key = hashlib.md5( + whitehat_vuln["id"].encode("utf-8") + ).hexdigest() if dupe_key in dupes: finding = dupes[dupe_key] @@ -196,28 +241,31 @@ def _convert_whitehat_sentinel_vulns_to_dojo_finding(self, whitehat_sentinel_vul else: dupes[dupe_key] = True - finding = Finding(title=whitehat_vuln['class'], - test=test, - cwe=cwe, - active=active, - verified=True, - description=description, - steps_to_reproduce=steps, - mitigation=solution, - references=references, - severity=severity, - false_p=false_positive, - date=date_created, - is_mitigated=is_mitigated, - mitigated=mitigated_ts, - last_reviewed=whitehat_vuln.get('lastRetested', None), - dynamic_finding=True, - created=date_created, - unique_id_from_tool=whitehat_vuln['id'] - ) + finding = Finding( + title=whitehat_vuln["class"], + test=test, + cwe=cwe, + active=active, + verified=True, + description=description, + steps_to_reproduce=steps, + mitigation=solution, + references=references, + severity=severity, + false_p=false_positive, + date=date_created, + is_mitigated=is_mitigated, + mitigated=mitigated_ts, + last_reviewed=whitehat_vuln.get("lastRetested", None), + dynamic_finding=True, + created=date_created, + unique_id_from_tool=whitehat_vuln["id"], + ) # Get Endpoints from Attack Vectors - endpoints = self._convert_attack_vectors_to_endpoints(whitehat_vuln['attack_vectors']) + endpoints = self._convert_attack_vectors_to_endpoints( + whitehat_vuln["attack_vectors"] + ) finding.unsaved_endpoints = endpoints dupes[dupe_key] = finding diff --git a/dojo/tools/whitesource/parser.py b/dojo/tools/whitesource/parser.py index bd5356ccac..6730d781ac 100644 --- a/dojo/tools/whitesource/parser.py +++ b/dojo/tools/whitesource/parser.py @@ -4,13 +4,12 @@ from dojo.models import Finding -__author__ = 'dr3dd589' +__author__ = "dr3dd589" logger = logging.getLogger(__name__) class WhitesourceParser(object): - def get_scan_types(self): return ["Whitesource Scan"] @@ -26,82 +25,101 @@ def get_findings(self, file, test): data = file.read() try: - content = json.loads(str(data, 'utf-8')) - except: + content = json.loads(str(data, "utf-8")) + except Exception: content = json.loads(data) def _build_common_output(node, lib_name=None): # project only available in manual export # name --> CVE in manual, library name in pipeline - project = "" cve = None component_name = None component_version = None - if 'library' in node: - project = node.get('project') - description = "**Description** : " + node.get('description', "") + "\n\n" + \ - "**Library Name** : " + node['library'].get('name', "") + "\n\n" + \ - "**Library Filename** : " + node['library'].get('filename', "") + "\n\n" + \ - "**Library Description** : " + node['library'].get('description', "") + "\n\n" + \ - "**Library Type** : " + node['library'].get('type', "") + "\n" - lib_name = node['library'].get('filename') - component_name = node['library'].get('artifactId') - component_version = node['library'].get('version') + if "library" in node: + node.get("project") + description = ( + "**Description** : " + + node.get("description", "") + + "\n\n" + + "**Library Name** : " + + node["library"].get("name", "") + + "\n\n" + + "**Library Filename** : " + + node["library"].get("filename", "") + + "\n\n" + + "**Library Description** : " + + node["library"].get("description", "") + + "\n\n" + + "**Library Type** : " + + node["library"].get("type", "") + + "\n" + ) + lib_name = node["library"].get("filename") + component_name = node["library"].get("artifactId") + component_version = node["library"].get("version") else: - description = node.get('description') + description = node.get("description") - cve = node.get('name') + cve = node.get("name") if cve is None: title = "CVE-None | " + lib_name else: title = cve + " | " + lib_name - # cvss2 by default in CLI, but cvss3 in UI. Adapting to have homogeneous behavior. - if 'cvss3_severity' in node: - cvss_sev = node.get('cvss3_severity') + # cvss2 by default in CLI, but cvss3 in UI. Adapting to have + # homogeneous behavior. + if "cvss3_severity" in node: + cvss_sev = node.get("cvss3_severity") else: - cvss_sev = node.get('severity') + cvss_sev = node.get("severity") severity = cvss_sev.lower().capitalize() - cvss3_score = node.get('cvss3_score', "N/A") - cvss3_vector = node.get('scoreMetadataVector', "N/A") - severity_justification = "CVSS v3 score: {} ({})".format(cvss3_score, cvss3_vector) + cvss3_score = node.get("cvss3_score", "N/A") + cvss3_vector = node.get("scoreMetadataVector", "N/A") + severity_justification = "CVSS v3 score: {} ({})".format( + cvss3_score, cvss3_vector + ) cwe = 1035 # default OWASP a9 until the report actually has them mitigation = "N/A" - if 'topFix' in node: + if "topFix" in node: try: - topfix_node = node.get('topFix') - mitigation = "**Resolution** ({}): {}\n" \ - .format( - topfix_node.get('date'), - topfix_node.get('fixResolution') - ) - except Exception as e: + topfix_node = node.get("topFix") + mitigation = "**Resolution** ({}): {}\n".format( + topfix_node.get("date"), + topfix_node.get("fixResolution"), + ) + except Exception: logger.exception("Error handling topFix node.") filepaths = [] - if 'sourceFiles' in node: + if "sourceFiles" in node: try: - sourceFiles_node = node.get('sourceFiles') + sourceFiles_node = node.get("sourceFiles") for sfile in sourceFiles_node: - filepaths.append(sfile.get('localPath')) - except Exception as e: - logger.exception("Error handling local paths for vulnerability.") - - return {'title': title, - 'description': description, - 'severity': severity, - 'mitigation': mitigation, - 'cve': cve, - 'cwe': cwe, - 'severity_justification': severity_justification, - 'file_path': ", ".join(filepaths), - 'component_name': component_name, - 'component_version': component_version - } + filepaths.append(sfile.get("localPath")) + except Exception: + logger.exception( + "Error handling local paths for vulnerability." + ) + + return { + "title": title, + "description": description, + "severity": severity, + "mitigation": mitigation, + "cve": cve, + "cwe": cwe, + "severity_justification": severity_justification, + "file_path": ", ".join(filepaths), + "component_name": component_name, + "component_version": component_version, + } def _dedup_and_create_finding(dupes, vuln): - dupe_key = hashlib.md5(vuln.get('description').encode('utf-8') + vuln.get('title').encode('utf-8')).hexdigest() + dupe_key = hashlib.md5( + vuln.get("description").encode("utf-8") + + vuln.get("title").encode("utf-8") + ).hexdigest() if dupe_key in dupes: finding = dupes[dupe_key] @@ -111,20 +129,22 @@ def _dedup_and_create_finding(dupes, vuln): else: dupes[dupe_key] = True - finding = Finding(title=vuln.get('title'), - test=test, - description=vuln.get('description'), - severity=vuln.get('severity'), - cwe=vuln.get('cwe'), - mitigation=vuln.get('mitigation'), - references=vuln.get('references'), - file_path=vuln.get('file_path'), - component_name=vuln.get('component_name'), - component_version=vuln.get('component_version'), - severity_justification=vuln.get('severity_justification'), - dynamic_finding=True) - if vuln.get('cve'): - finding.unsaved_vulnerability_ids = [vuln.get('cve')] + finding = Finding( + title=vuln.get("title"), + test=test, + description=vuln.get("description"), + severity=vuln.get("severity"), + cwe=vuln.get("cwe"), + mitigation=vuln.get("mitigation"), + references=vuln.get("references"), + file_path=vuln.get("file_path"), + component_name=vuln.get("component_name"), + component_version=vuln.get("component_version"), + severity_justification=vuln.get("severity_justification"), + dynamic_finding=True, + ) + if vuln.get("cve"): + finding.unsaved_vulnerability_ids = [vuln.get("cve")] dupes[dupe_key] = finding output = [] @@ -132,17 +152,22 @@ def _dedup_and_create_finding(dupes, vuln): # we are likely dealing with a report generated from CLI with -generateScanReport, # which will output vulnerabilities as an array of a library # In this scenario, build up a an array - tree_libs = content.get('libraries') + tree_libs = content.get("libraries") for lib_node in tree_libs: # get the overall lib info here, before going into vulns - if 'vulnerabilities' in lib_node and len(lib_node.get('vulnerabilities')) > 0: - for vuln in lib_node.get('vulnerabilities'): - output.append(_build_common_output(vuln, lib_node.get('name'))) + if ( + "vulnerabilities" in lib_node + and len(lib_node.get("vulnerabilities")) > 0 + ): + for vuln in lib_node.get("vulnerabilities"): + output.append( + _build_common_output(vuln, lib_node.get("name")) + ) elif "vulnerabilities" in content: # likely a manual json export for vulnerabilities only for a project. # Vulns are standalone, and library is a property. - tree_node = content['vulnerabilities'] + tree_node = content["vulnerabilities"] for node in tree_node: output.append(_build_common_output(node)) diff --git a/dojo/tools/wpscan/parser.py b/dojo/tools/wpscan/parser.py index 9c0ac761da..1792de7700 100644 --- a/dojo/tools/wpscan/parser.py +++ b/dojo/tools/wpscan/parser.py @@ -18,13 +18,21 @@ def get_description_for_scan_types(self, scan_type): return "Import JSON report" def get_vulnerabilities( - self, report_date, vulnerabilities, dupes, node=None, plugin=None, detection_confidence=None + self, + report_date, + vulnerabilities, + dupes, + node=None, + plugin=None, + detection_confidence=None, ): for vul in vulnerabilities: description = "\n".join(["**Title:** `" + vul["title"] + "`\n"]) if node and "location" in node: - description += "**Location:** `" + "".join(node["location"]) + "`\n" + description += ( + "**Location:** `" + "".join(node["location"]) + "`\n" + ) if plugin: description += "**Plugin:** `" + "".join(plugin) + "`\n" @@ -37,7 +45,9 @@ def get_vulnerabilities( references=self.generate_references(vul["references"]), dynamic_finding=True, static_finding=False, - scanner_confidence=self._get_scanner_confidence(detection_confidence), + scanner_confidence=self._get_scanner_confidence( + detection_confidence + ), unique_id_from_tool=vul["references"]["wpvulndb"][0], nb_occurences=1, ) @@ -57,10 +67,14 @@ def get_vulnerabilities( if "cve" in vul["references"]: finding.unsaved_vulnerability_ids = list() for vulnerability_id in vul["references"]["cve"]: - finding.unsaved_vulnerability_ids.append(f"CVE-{vulnerability_id}") + finding.unsaved_vulnerability_ids.append( + f"CVE-{vulnerability_id}" + ) # internal de-duplication - dupe_key = hashlib.sha256(str(finding.unique_id_from_tool).encode("utf-8")).hexdigest() + dupe_key = hashlib.sha256( + str(finding.unique_id_from_tool).encode("utf-8") + ).hexdigest() if dupe_key in dupes: find = dupes[dupe_key] if finding.references: @@ -91,7 +105,10 @@ def get_findings(self, file, test): # manage Wordpress version findings if "version" in tree and tree["version"]: - if "vulnerabilities" in tree["version"] and tree["version"]["vulnerabilities"]: + if ( + "vulnerabilities" in tree["version"] + and tree["version"]["vulnerabilities"] + ): self.get_vulnerabilities( report_date, tree["version"]["vulnerabilities"], @@ -103,7 +120,9 @@ def get_findings(self, file, test): # manage interesting interesting_findings for interesting_finding in tree.get("interesting_findings", []): - references = self.generate_references(interesting_finding["references"]) + references = self.generate_references( + interesting_finding["references"] + ) description = "\n".join( [ "**Type:** `" + interesting_finding.get("type") + "`\n", @@ -111,14 +130,20 @@ def get_findings(self, file, test): ] ) if interesting_finding["interesting_entries"]: - description += "**Details:** `" + " ".join(interesting_finding["interesting_entries"]) + "`\n" + description += ( + "**Details:** `" + + " ".join(interesting_finding["interesting_entries"]) + + "`\n" + ) finding = Finding( title=f"Interesting finding: {interesting_finding.get('to_s')}", description=description, severity="Info", dynamic_finding=True, static_finding=False, - scanner_confidence=self._get_scanner_confidence(interesting_finding.get("confidence")), + scanner_confidence=self._get_scanner_confidence( + interesting_finding.get("confidence") + ), ) # manage endpoint endpoint = Endpoint.from_uri(interesting_finding["url"]) @@ -130,7 +155,11 @@ def get_findings(self, file, test): # internal de-duplication dupe_key = hashlib.sha256( - str("interesting_findings" + finding.title + interesting_finding["url"]).encode("utf-8") + str( + "interesting_findings" + + finding.title + + interesting_finding["url"] + ).encode("utf-8") ).hexdigest() if dupe_key in dupes: find = dupes[dupe_key] diff --git a/dojo/tools/xanitizer/__init__.py b/dojo/tools/xanitizer/__init__.py index e7a2f04c71..d6053bef86 100644 --- a/dojo/tools/xanitizer/__init__.py +++ b/dojo/tools/xanitizer/__init__.py @@ -1 +1 @@ -__author__ = 'jankuehl' +__author__ = "jankuehl" diff --git a/dojo/tools/xanitizer/parser.py b/dojo/tools/xanitizer/parser.py index 11d9769f0c..791aec06ef 100644 --- a/dojo/tools/xanitizer/parser.py +++ b/dojo/tools/xanitizer/parser.py @@ -1,4 +1,4 @@ -__author__ = 'jankuehl' +__author__ = "jankuehl" import re @@ -8,7 +8,6 @@ class XanitizerParser(object): - def get_scan_types(self): return ["Xanitizer Scan"] @@ -35,28 +34,32 @@ def parse_xml(self, filename): raise ValueError(se) root = tree.getroot() - if 'XanitizerFindingsList' not in root.tag: - raise ValueError("'{}' is not a valid Xanitizer findings list report XML file.".format(filename)) + if "XanitizerFindingsList" not in root.tag: + raise ValueError( + "'{}' is not a valid Xanitizer findings list report XML file.".format( + filename + ) + ) return root def get_findings_internal(self, root, test): items = list() - globalDate = root.get('timeStamp', default=None) + globalDate = root.get("timeStamp", default=None) if globalDate is not None: # only date no time globalDate = globalDate[:10] - for finding in root.findall('finding'): - line = finding.find('line').text + for finding in root.findall("finding"): + line = finding.find("line").text if line and int(line) <= 0: line = None date = globalDate - if finding.find('date') is not None: + if finding.find("date") is not None: # only date no time - date = finding.find('date').text[:10] + date = finding.find("date").text[:10] description = self.generate_description(finding) @@ -69,7 +72,8 @@ def get_findings_internal(self, root, test): file_path=self.generate_file_path(finding), line=line, date=date, - static_finding=True) + static_finding=True, + ) vulnerability_id = self.find_cve(description) if vulnerability_id: dojofinding.unsaved_vulnerability_ids = [vulnerability_id] @@ -79,90 +83,110 @@ def get_findings_internal(self, root, test): return items def generate_title(self, finding, line): - title = finding.find('problemType').text + title = finding.find("problemType").text - pckg = finding.find('package') - cl = finding.find('class') - file = finding.find('file') + pckg = finding.find("package") + cl = finding.find("class") + file = finding.find("file") if pckg is not None and cl is not None: if line: - title = '{} ({}.{}:{})'.format(title, pckg.text, cl.text, line) + title = "{} ({}.{}:{})".format(title, pckg.text, cl.text, line) else: - title = '{} ({}.{})'.format(title, pckg.text, cl.text) + title = "{} ({}.{})".format(title, pckg.text, cl.text) else: if line: - title = '{} ({}:{})'.format(title, file.text, line) + title = "{} ({}:{})".format(title, file.text, line) else: - title = '{} ({})'.format(title, file.text) + title = "{} ({})".format(title, file.text) return title def generate_description(self, finding): - description = '**Description:**\n{}'.format(finding.find('description').text) - - if finding.find('startNode') is not None: - startnode = finding.find('startNode') - endnode = finding.find('endNode') - description = '{}\n-----\n'.format(description) - description = '{}\n**Starting at:** {} - **Line** {}'.format(description, startnode.get('classFQN'), startnode.get('lineNo')) + description = "**Description:**\n{}".format( + finding.find("description").text + ) + + if finding.find("startNode") is not None: + startnode = finding.find("startNode") + endnode = finding.find("endNode") + description = "{}\n-----\n".format(description) + description = "{}\n**Starting at:** {} - **Line** {}".format( + description, startnode.get("classFQN"), startnode.get("lineNo") + ) description = self.add_code(startnode, False, description) - description = '{}\n\n**Ending at:** {} - **Line** {}'.format(description, endnode.get('classFQN'), endnode.get('lineNo')) + description = "{}\n\n**Ending at:** {} - **Line** {}".format( + description, endnode.get("classFQN"), endnode.get("lineNo") + ) description = self.add_code(endnode, True, description) - elif finding.find('node') is not None: - node = finding.find('node') - description = '{}\n-----\n'.format(description) - line = node.get('lineNo') - location = node.get('classFQN') + elif finding.find("node") is not None: + node = finding.find("node") + description = "{}\n-----\n".format(description) + line = node.get("lineNo") + location = node.get("classFQN") if location is None: - location = node.get('relativePath') + location = node.get("relativePath") if line is not None and int(line) > 0: - description = '{}\n**Finding at:** {} - **Line** {}'.format(description, location, line) + description = "{}\n**Finding at:** {} - **Line** {}".format( + description, location, line + ) else: - description = '{}\n**Finding at:** {}'.format(description, location) + description = "{}\n**Finding at:** {}".format( + description, location + ) description = self.add_code(node, True, description) return description def add_code(self, node, showline, description): - codelines = node.findall('code') + codelines = node.findall("code") if codelines is None or len(codelines) == 0: return description if showline or len(codelines) == 1: for code in codelines: - if code.get('finding') == 'true': - description = '{}\n**Finding Line:** {}'.format(description, code.text) + if code.get("finding") == "true": + description = "{}\n**Finding Line:** {}".format( + description, code.text + ) if len(codelines) > 1: - description = '{}\n**Code Excerpt:** '.format(description) + description = "{}\n**Code Excerpt:** ".format(description) for code in codelines: if code.text: - description = '{}\n{}: {}'.format(description, code.get('lineNo'), code.text) + description = "{}\n{}: {}".format( + description, code.get("lineNo"), code.text + ) else: - description = '{}\n{}: '.format(description, code.get('lineNo')) + description = "{}\n{}: ".format( + description, code.get("lineNo") + ) return description def generate_file_path(self, finding): - file_path = None - - if finding.find('endNode') is not None and finding.find('endNode').get('relativePath'): - return finding.find('endNode').get('relativePath') - elif finding.find('node') is not None and finding.find('node').get('relativePath'): - return finding.find('node').get('relativePath') - - pckg = finding.find('package') - file = finding.find('file') + pass + + if finding.find("endNode") is not None and finding.find("endNode").get( + "relativePath" + ): + return finding.find("endNode").get("relativePath") + elif finding.find("node") is not None and finding.find("node").get( + "relativePath" + ): + return finding.find("node").get("relativePath") + + pckg = finding.find("package") + file = finding.find("file") if pckg is not None: - return '{}/{}'.format(pckg.text.replace('.', '/'), file.text) + return "{}/{}".format(pckg.text.replace(".", "/"), file.text) return file.text def resolve_cwe(self, finding): - if finding.find('cweNumber') is not None: - cwe = finding.find('cweNumber').text - if len(cwe) > 4 and cwe[:4] == 'CWE-': + if finding.find("cweNumber") is not None: + cwe = finding.find("cweNumber").text + if len(cwe) > 4 and cwe[:4] == "CWE-": # remove leading 'CWE-' and ',' '.' return cwe[4:].replace(",", "").replace(".", "") @@ -170,7 +194,7 @@ def resolve_cwe(self, finding): def find_cve(self, description): # copy from models.py - match = re.search(r'CVE-\d{4}-\d{4,7}', description) + match = re.search(r"CVE-\d{4}-\d{4,7}", description) if match: return match.group() @@ -178,18 +202,18 @@ def find_cve(self, description): return None def resolve_severity(self, finding): - if finding.find('rating') is None or not finding.find('rating').text: - return 'Info' + if finding.find("rating") is None or not finding.find("rating").text: + return "Info" - rating = float(finding.find('rating').text) + rating = float(finding.find("rating").text) if rating == 0: - return 'Info' + return "Info" if rating < 4: - return 'Low' + return "Low" if rating < 7: - return 'Medium' + return "Medium" if rating < 9: - return 'High' + return "High" - return 'Critical' + return "Critical" diff --git a/dojo/tools/yarn_audit/parser.py b/dojo/tools/yarn_audit/parser.py index e163ba33d5..325049dd51 100644 --- a/dojo/tools/yarn_audit/parser.py +++ b/dojo/tools/yarn_audit/parser.py @@ -5,7 +5,6 @@ class YarnAuditParser(object): - def get_scan_types(self): return ["Yarn Audit Scan"] @@ -24,67 +23,88 @@ def get_findings(self, json_output, test): def get_items(self, tree, test): items = {} for element in tree: - if element.get('type') == 'auditAdvisory': - node = element.get('data').get('advisory') + if element.get("type") == "auditAdvisory": + node = element.get("data").get("advisory") item = get_item(node, test) - unique_key = str(node.get('id')) + str(node.get('module_name')) + unique_key = str(node.get("id")) + str(node.get("module_name")) items[unique_key] = item - elif element.get('type') == 'error': - error = element.get('data') - raise ValueError('yarn audit report contains errors: %s', error) + elif element.get("type") == "error": + error = element.get("data") + raise ValueError( + "yarn audit report contains errors: %s", error + ) return list(items.values()) def get_item(item_node, test): - - if item_node['severity'] == 'low': - severity = 'Low' - elif item_node['severity'] == 'moderate': - severity = 'Medium' - elif item_node['severity'] == 'high': - severity = 'High' - elif item_node['severity'] == 'critical': - severity = 'Critical' + if item_node["severity"] == "low": + severity = "Low" + elif item_node["severity"] == "moderate": + severity = "Medium" + elif item_node["severity"] == "high": + severity = "High" + elif item_node["severity"] == "critical": + severity = "Critical" else: - severity = 'Info' - - paths = '' - for finding in item_node['findings']: - paths += "\n - " + str(finding['version']) + ":" + str(','.join(finding['paths'][:25])) - if len(finding['paths']) > 25: + severity = "Info" + + paths = "" + for finding in item_node["findings"]: + paths += ( + "\n - " + + str(finding["version"]) + + ":" + + str(",".join(finding["paths"][:25])) + ) + if len(finding["paths"]) > 25: paths += "\n - ..... (list of paths truncated after 25 paths)" cwe = get_npm_cwe(item_node) - dojo_finding = Finding(title=item_node['title'] + " - " + "(" + item_node['module_name'] + ", " + item_node['vulnerable_versions'] + ")", - test=test, - severity=severity, - file_path=item_node['findings'][0]['paths'][0], - description=item_node['url'] + "\n" + - item_node['overview'] + "\n Vulnerable Module: " + - item_node['module_name'] + "\n Vulnerable Versions: " + - str(item_node['vulnerable_versions']) + "\n Patched Version: " + - str(item_node['patched_versions']) + "\n Vulnerable Paths: " + - str(paths) + "\n CWE: " + - str(item_node['cwe']) + "\n Access: " + - str(item_node['access']), - cwe=cwe, - mitigation=item_node['recommendation'], - references=item_node['url'], - component_name=item_node['module_name'], - component_version=item_node['findings'][0]['version'], - false_p=False, - duplicate=False, - out_of_scope=False, - mitigated=None, - impact="No impact provided", - static_finding=True, - dynamic_finding=False) - - if len(item_node['cves']) > 0: + dojo_finding = Finding( + title=item_node["title"] + + " - " + + "(" + + item_node["module_name"] + + ", " + + item_node["vulnerable_versions"] + + ")", + test=test, + severity=severity, + file_path=item_node["findings"][0]["paths"][0], + description=item_node["url"] + + "\n" + + item_node["overview"] + + "\n Vulnerable Module: " + + item_node["module_name"] + + "\n Vulnerable Versions: " + + str(item_node["vulnerable_versions"]) + + "\n Patched Version: " + + str(item_node["patched_versions"]) + + "\n Vulnerable Paths: " + + str(paths) + + "\n CWE: " + + str(item_node["cwe"]) + + "\n Access: " + + str(item_node["access"]), + cwe=cwe, + mitigation=item_node["recommendation"], + references=item_node["url"], + component_name=item_node["module_name"], + component_version=item_node["findings"][0]["version"], + false_p=False, + duplicate=False, + out_of_scope=False, + mitigated=None, + impact="No impact provided", + static_finding=True, + dynamic_finding=False, + ) + + if len(item_node["cves"]) > 0: dojo_finding.unsaved_vulnerability_ids = list() - for vulnerability_id in item_node['cves']: + for vulnerability_id in item_node["cves"]: dojo_finding.unsaved_vulnerability_ids.append(vulnerability_id) return dojo_finding diff --git a/dojo/tools/zap/parser.py b/dojo/tools/zap/parser.py index ff1fd41709..c62362cab5 100755 --- a/dojo/tools/zap/parser.py +++ b/dojo/tools/zap/parser.py @@ -34,15 +34,22 @@ def get_findings(self, file, test): test=test, title=item.findtext("alert"), description=html2text(item.findtext("desc")), - severity=self.MAPPING_SEVERITY.get(item.findtext("riskcode")), - scanner_confidence=self.MAPPING_CONFIDENCE.get(item.findtext("riskcode")), + severity=self.MAPPING_SEVERITY.get( + item.findtext("riskcode") + ), + scanner_confidence=self.MAPPING_CONFIDENCE.get( + item.findtext("riskcode") + ), mitigation=html2text(item.findtext("solution")), references=html2text(item.findtext("reference")), dynamic_finding=True, static_finding=False, vuln_id_from_tool=item.findtext("pluginid"), ) - if item.findtext("cweid") is not None and item.findtext("cweid").isdigit(): + if ( + item.findtext("cweid") is not None + and item.findtext("cweid").isdigit() + ): finding.cwe = int(item.findtext("cweid")) finding.unsaved_endpoints = [] @@ -50,22 +57,31 @@ def get_findings(self, file, test): for instance in item.findall("instances/instance"): endpoint = Endpoint.from_uri(instance.findtext("uri")) # If the requestheader key is set, the report is in the "XML with requests and responses" - # format - load requests and responses and add them to the database - if instance.findtext('requestheader') is not None: + # format - load requests and responses and add them to the + # database + if instance.findtext("requestheader") is not None: # Assemble the request from header and body - request = instance.findtext('requestheader') + instance.findtext('requestbody') - response = instance.findtext('responseheader') + instance.findtext('responsebody') + request = instance.findtext( + "requestheader" + ) + instance.findtext("requestbody") + response = instance.findtext( + "responseheader" + ) + instance.findtext("responsebody") else: # The report is in the regular XML format, without requests and responses. - # Use the default settings for constructing the request and response fields. + # Use the default settings for constructing the request + # and response fields. request = f"{instance.findtext('method')} {endpoint.query}#{endpoint.fragment}" response = f"{instance.findtext('evidence')}" # we remove query and fragment because with some configuration - # the tool generate them on-the-go and it produces a lot of fake endpoints + # the tool generate them on-the-go and it produces a lot of + # fake endpoints endpoint.query = None endpoint.fragment = None finding.unsaved_endpoints.append(endpoint) - finding.unsaved_req_resp.append({"req": request, "resp": response}) + finding.unsaved_req_resp.append( + {"req": request, "resp": response} + ) items.append(finding) return items From e1ff891ba6c0394b18d7bdfdb9e4576efd2d10a8 Mon Sep 17 00:00:00 2001 From: DefectDojo release bot Date: Mon, 10 Jul 2023 15:05:01 +0000 Subject: [PATCH 21/85] Update versions in application files --- components/package.json | 2 +- dojo/__init__.py | 2 +- helm/defectdojo/Chart.yaml | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/components/package.json b/components/package.json index e17869c361..4e8aea8365 100644 --- a/components/package.json +++ b/components/package.json @@ -1,6 +1,6 @@ { "name": "defectdojo", - "version": "2.24.1", + "version": "2.25.0-dev", "license" : "BSD-3-Clause", "private": true, "dependencies": { diff --git a/dojo/__init__.py b/dojo/__init__.py index cdf6a76808..4c1f6f5856 100644 --- a/dojo/__init__.py +++ b/dojo/__init__.py @@ -4,6 +4,6 @@ # Django starts so that shared_task will use this app. from .celery import app as celery_app # noqa -__version__ = '2.24.1' +__version__ = '2.25.0-dev' __url__ = 'https://github.com/DefectDojo/django-DefectDojo' __docs__ = 'https://documentation.defectdojo.com' diff --git a/helm/defectdojo/Chart.yaml b/helm/defectdojo/Chart.yaml index 9d403da76a..8cf30a2776 100644 --- a/helm/defectdojo/Chart.yaml +++ b/helm/defectdojo/Chart.yaml @@ -1,8 +1,8 @@ apiVersion: v2 -appVersion: "2.24.1" +appVersion: "2.25.0-dev" description: A Helm chart for Kubernetes to install DefectDojo name: defectdojo -version: 1.6.75 +version: 1.6.76-dev icon: https://www.defectdojo.org/img/favicon.ico maintainers: - name: madchap From de76a74ea9afc130c1e59ba78567f8a0199dcb50 Mon Sep 17 00:00:00 2001 From: Cody Maffucci <46459665+Maffooch@users.noreply.github.com> Date: Mon, 10 Jul 2023 10:13:12 -0500 Subject: [PATCH 22/85] Correct typos reintroduce fixing conflicts --- dojo/filters.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/dojo/filters.py b/dojo/filters.py index 8c45f257c4..5b669625dc 100644 --- a/dojo/filters.py +++ b/dojo/filters.py @@ -1148,7 +1148,7 @@ class ApiFindingFilter(DojoFilter): help_text='Comma separated list of exact tags') test__tags = CharFieldInFilter(field_name='test__tags__name', lookup_expr='in', test__engagement__tags = CharFieldInFilter(field_name='test__engagement__tags__name', lookup_expr='in', - help_text='Comma seperated list of exact tags present on engagement') + help_text='Comma separated list of exact tags present on engagement') test__engagement__product__tags = CharFieldInFilter(field_name='test__engagement__product__tags__name', lookup_expr='in', help_text='Comma separated list of exact tags present on product') @@ -1158,7 +1158,7 @@ class ApiFindingFilter(DojoFilter): help_text='Comma separated list of exact tags not present on model', exclude='True') not_test__tags = CharFieldInFilter(field_name='test__tags__name', lookup_expr='in', not_test__engagement__tags = CharFieldInFilter(field_name='test__engagement__tags__name', lookup_expr='in', - help_text='Comma seperated list of exact tags not present on engagement', + help_text='Comma separated list of exact tags not present on engagement', exclude='True') not_test__engagement__product__tags = CharFieldInFilter(field_name='test__engagement__product__tags__name', lookup_expr='in', @@ -1895,18 +1895,18 @@ def __init__(self, *args, **kwargs): class ApiTestFilter(DojoFilter): tag = CharFilter(field_name='tags__name', lookup_expr='icontains', help_text='Tag name contains') tags = CharFieldInFilter(field_name='tags__name', lookup_expr='in', - help_text='Comma seperated list of exact tags') + help_text='Comma separated list of exact tags') engagement__tags = CharFieldInFilter(field_name='engagement__tags__name', lookup_expr='in', - help_text='Comma seperated list of exact tags present on engagement') + help_text='Comma separated list of exact tags present on engagement') engagement__product__tags = CharFieldInFilter(field_name='engagement__product__tags__name', lookup_expr='in', help_text='Comma separated list of exact tags present on product') not_tag = CharFilter(field_name='tags__name', lookup_expr='icontains', help_text='Not Tag name contains', exclude='True') not_tags = CharFieldInFilter(field_name='tags__name', lookup_expr='in', - help_text='Comma seperated list of exact tags not present on model', exclude='True') + help_text='Comma separated list of exact tags not present on model', exclude='True') not_engagement__tags = CharFieldInFilter(field_name='engagement__tags__name', lookup_expr='in', - help_text='Comma seperated list of exact tags not present on engagement', + help_text='Comma separated list of exact tags not present on engagement', exclude='True') not_engagement__product__tags = CharFieldInFilter(field_name='engagement__product__tags__name', lookup_expr='in', From f9be980990afc9b3ca31ba17d6e3f67c846f2ab1 Mon Sep 17 00:00:00 2001 From: Cody Maffucci <46459665+Maffooch@users.noreply.github.com> Date: Mon, 10 Jul 2023 10:21:15 -0500 Subject: [PATCH 23/85] Fix Flake8 --- dojo/filters.py | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/dojo/filters.py b/dojo/filters.py index 5b669625dc..8cdc7e186c 100644 --- a/dojo/filters.py +++ b/dojo/filters.py @@ -1149,9 +1149,10 @@ class ApiFindingFilter(DojoFilter): test__tags = CharFieldInFilter(field_name='test__tags__name', lookup_expr='in', test__engagement__tags = CharFieldInFilter(field_name='test__engagement__tags__name', lookup_expr='in', help_text='Comma separated list of exact tags present on engagement') - test__engagement__product__tags = CharFieldInFilter(field_name='test__engagement__product__tags__name', - lookup_expr='in', - help_text='Comma separated list of exact tags present on product') + test__engagement__product__tags = CharFieldInFilter( + field_name='test__engagement__product__tags__name', + lookup_expr='in', + help_text='Comma separated list of exact tags present on product') not_tag = CharFilter(field_name='tags__name', lookup_expr='icontains', help_text='Not Tag name contains', exclude='True') not_tags = CharFieldInFilter(field_name='tags__name', lookup_expr='in', @@ -1160,10 +1161,11 @@ class ApiFindingFilter(DojoFilter): not_test__engagement__tags = CharFieldInFilter(field_name='test__engagement__tags__name', lookup_expr='in', help_text='Comma separated list of exact tags not present on engagement', exclude='True') - not_test__engagement__product__tags = CharFieldInFilter(field_name='test__engagement__product__tags__name', - lookup_expr='in', - help_text='Comma separated list of exact tags not present on product', - exclude='True') + not_test__engagement__product__tags = CharFieldInFilter( + field_name='test__engagement__product__tags__name', + lookup_expr='in', + help_text='Comma separated list of exact tags not present on product', + exclude='True') o = OrderingFilter( # tuple-mapping retains order From 772798babf220b5b9453f0aecdafa5549ef69308 Mon Sep 17 00:00:00 2001 From: Cody Maffucci <46459665+Maffooch@users.noreply.github.com> Date: Mon, 10 Jul 2023 10:28:27 -0500 Subject: [PATCH 24/85] Fix Flake8 for real this time... --- dojo/filters.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/dojo/filters.py b/dojo/filters.py index 8cdc7e186c..e99665293b 100644 --- a/dojo/filters.py +++ b/dojo/filters.py @@ -1146,7 +1146,7 @@ class ApiFindingFilter(DojoFilter): tag = CharFilter(field_name='tags__name', lookup_expr='icontains', help_text='Tag name contains') tags = CharFieldInFilter(field_name='tags__name', lookup_expr='in', help_text='Comma separated list of exact tags') - test__tags = CharFieldInFilter(field_name='test__tags__name', lookup_expr='in', + test__tags = CharFieldInFilter(field_name='test__tags__name', lookup_expr='in', help_text='Comma separated list of exact tags present on test') test__engagement__tags = CharFieldInFilter(field_name='test__engagement__tags__name', lookup_expr='in', help_text='Comma separated list of exact tags present on engagement') test__engagement__product__tags = CharFieldInFilter( @@ -1157,7 +1157,7 @@ class ApiFindingFilter(DojoFilter): not_tag = CharFilter(field_name='tags__name', lookup_expr='icontains', help_text='Not Tag name contains', exclude='True') not_tags = CharFieldInFilter(field_name='tags__name', lookup_expr='in', help_text='Comma separated list of exact tags not present on model', exclude='True') - not_test__tags = CharFieldInFilter(field_name='test__tags__name', lookup_expr='in', + not_test__tags = CharFieldInFilter(field_name='test__tags__name', lookup_expr='in', help_text='Comma separated list of exact tags present on test') not_test__engagement__tags = CharFieldInFilter(field_name='test__engagement__tags__name', lookup_expr='in', help_text='Comma separated list of exact tags not present on engagement', exclude='True') From d1714457c14760b6d60411062775ed963dac09af Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 10 Jul 2023 11:15:08 -0500 Subject: [PATCH 25/85] Bump sqlalchemy from 2.0.17 to 2.0.18 (#8355) Bumps [sqlalchemy](https://github.com/sqlalchemy/sqlalchemy) from 2.0.17 to 2.0.18. - [Release notes](https://github.com/sqlalchemy/sqlalchemy/releases) - [Changelog](https://github.com/sqlalchemy/sqlalchemy/blob/main/CHANGES.rst) - [Commits](https://github.com/sqlalchemy/sqlalchemy/commits) --- updated-dependencies: - dependency-name: sqlalchemy dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 2ad55dad84..1a0f4eaf1e 100644 --- a/requirements.txt +++ b/requirements.txt @@ -43,7 +43,7 @@ python-dateutil==2.8.2 pytz==2023.3 redis==4.6.0 requests==2.31.0 -sqlalchemy==2.0.17 # Required by Celery broker transport +sqlalchemy==2.0.18 # Required by Celery broker transport supervisor==4.2.5 urllib3==1.26.11 uWSGI==2.0.21 From 9e382137f7efa17a141f2e6296712b5be8779e25 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 10 Jul 2023 11:25:57 -0500 Subject: [PATCH 26/85] Bump python from `9efc6e1` to `9efc6e1` (#8367) Bumps python from `9efc6e1` to `9efc6e1`. --- updated-dependencies: - dependency-name: python dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Dockerfile.django-debian | 2 +- Dockerfile.integration-tests-debian | 2 +- Dockerfile.nginx-debian | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Dockerfile.django-debian b/Dockerfile.django-debian index cac385b199..5228d3d79f 100644 --- a/Dockerfile.django-debian +++ b/Dockerfile.django-debian @@ -5,7 +5,7 @@ # Dockerfile.nginx to use the caching mechanism of Docker. # Ref: https://devguide.python.org/#branchstatus -FROM python:3.11.4-slim-bullseye@sha256:91d194f58f50594cda71dcd2e8fdefd90e7ecc57d07823813b67c8521e565dcd as base +FROM python:3.11.4-slim-bullseye@sha256:9b4d90af2003eef5d862f8118d8645d37d170402645a09e48241a3e492a0d4dc as base FROM base as build WORKDIR /app RUN \ diff --git a/Dockerfile.integration-tests-debian b/Dockerfile.integration-tests-debian index ba43714ef0..06ab484a52 100644 --- a/Dockerfile.integration-tests-debian +++ b/Dockerfile.integration-tests-debian @@ -1,7 +1,7 @@ # code: language=Dockerfile -FROM python:3.11.4-slim-bullseye@sha256:91d194f58f50594cda71dcd2e8fdefd90e7ecc57d07823813b67c8521e565dcd as build +FROM python:3.11.4-slim-bullseye@sha256:9b4d90af2003eef5d862f8118d8645d37d170402645a09e48241a3e492a0d4dc as build WORKDIR /app RUN \ apt-get -y update && \ diff --git a/Dockerfile.nginx-debian b/Dockerfile.nginx-debian index 1aa6255645..4a8314c604 100644 --- a/Dockerfile.nginx-debian +++ b/Dockerfile.nginx-debian @@ -5,7 +5,7 @@ # Dockerfile.django-debian to use the caching mechanism of Docker. # Ref: https://devguide.python.org/#branchstatus -FROM python:3.11.4-slim-bullseye@sha256:91d194f58f50594cda71dcd2e8fdefd90e7ecc57d07823813b67c8521e565dcd as base +FROM python:3.11.4-slim-bullseye@sha256:9b4d90af2003eef5d862f8118d8645d37d170402645a09e48241a3e492a0d4dc as base FROM base as build WORKDIR /app RUN \ From 4d32076505a40e4948e61af1c7e971461c80428e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 10 Jul 2023 11:26:35 -0500 Subject: [PATCH 27/85] Bump boto3 from 1.28.0 to 1.28.1 (#8366) Bumps [boto3](https://github.com/boto/boto3) from 1.28.0 to 1.28.1. - [Release notes](https://github.com/boto/boto3/releases) - [Changelog](https://github.com/boto/boto3/blob/develop/CHANGELOG.rst) - [Commits](https://github.com/boto/boto3/compare/1.28.0...1.28.1) --- updated-dependencies: - dependency-name: boto3 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 1a0f4eaf1e..d207094e52 100644 --- a/requirements.txt +++ b/requirements.txt @@ -78,7 +78,7 @@ django-ratelimit==4.0.0 argon2-cffi==21.3.0 blackduck==1.1.0 pycurl==7.45.2 # Required for Celery Broker AWS (SQS) support -boto3==1.28.0 # Required for Celery Broker AWS (SQS) support +boto3==1.28.1 # Required for Celery Broker AWS (SQS) support netaddr==0.8.0 vulners==2.0.10 fontawesomefree==6.4.0 From f0a4d43315743848830dc94bfb454e4c3976a31c Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Fri, 14 Jul 2023 09:25:30 -0500 Subject: [PATCH 28/85] Update dependency postcss from 8.4.25 to v8.4.26 (docs/package.json) (#8377) Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> --- docs/package-lock.json | 14 +++++++------- docs/package.json | 2 +- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/docs/package-lock.json b/docs/package-lock.json index 21416effb1..bd3e42f6c1 100644 --- a/docs/package-lock.json +++ b/docs/package-lock.json @@ -6,7 +6,7 @@ "": { "devDependencies": { "autoprefixer": "10.4.14", - "postcss": "8.4.25", + "postcss": "8.4.26", "postcss-cli": "10.1.0" } }, @@ -596,9 +596,9 @@ } }, "node_modules/postcss": { - "version": "8.4.25", - "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.25.tgz", - "integrity": "sha512-7taJ/8t2av0Z+sQEvNzCkpDynl0tX3uJMCODi6nT3PfASC7dYCWV9aQ+uiCf+KBD4SEFcu+GvJdGdwzQ6OSjCw==", + "version": "8.4.26", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.26.tgz", + "integrity": "sha512-jrXHFF8iTloAenySjM/ob3gSj7pCu0Ji49hnjqzsgSRa50hkWCKD0HQ+gMNJkW38jBI68MpAAg7ZWwHwX8NMMw==", "dev": true, "funding": [ { @@ -1366,9 +1366,9 @@ "dev": true }, "postcss": { - "version": "8.4.25", - "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.25.tgz", - "integrity": "sha512-7taJ/8t2av0Z+sQEvNzCkpDynl0tX3uJMCODi6nT3PfASC7dYCWV9aQ+uiCf+KBD4SEFcu+GvJdGdwzQ6OSjCw==", + "version": "8.4.26", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.26.tgz", + "integrity": "sha512-jrXHFF8iTloAenySjM/ob3gSj7pCu0Ji49hnjqzsgSRa50hkWCKD0HQ+gMNJkW38jBI68MpAAg7ZWwHwX8NMMw==", "dev": true, "requires": { "nanoid": "^3.3.6", diff --git a/docs/package.json b/docs/package.json index c7e2160786..aeb1be46b6 100644 --- a/docs/package.json +++ b/docs/package.json @@ -1,6 +1,6 @@ { "devDependencies": { - "postcss": "8.4.25", + "postcss": "8.4.26", "autoprefixer": "10.4.14", "postcss-cli": "10.1.0" } From 68cc701ad70b415acd7749101fe8f6236df44485 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 14 Jul 2023 09:25:54 -0500 Subject: [PATCH 29/85] Bump cryptography from 41.0.1 to 41.0.2 (#8373) Bumps [cryptography](https://github.com/pyca/cryptography) from 41.0.1 to 41.0.2. - [Changelog](https://github.com/pyca/cryptography/blob/main/CHANGELOG.rst) - [Commits](https://github.com/pyca/cryptography/compare/41.0.1...41.0.2) --- updated-dependencies: - dependency-name: cryptography dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index d207094e52..1681aee399 100644 --- a/requirements.txt +++ b/requirements.txt @@ -38,7 +38,7 @@ openpyxl==3.1.2 xlrd==1.2.0 Pillow==10.0.0 # required by django-imagekit psycopg2-binary==2.9.6 -cryptography==41.0.1 +cryptography==41.0.2 python-dateutil==2.8.2 pytz==2023.3 redis==4.6.0 From bb697cd5d767edd55c8bc39ed946300a950db435 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 14 Jul 2023 09:26:39 -0500 Subject: [PATCH 30/85] Bump gitpython from 3.1.31 to 3.1.32 (#8372) Bumps [gitpython](https://github.com/gitpython-developers/GitPython) from 3.1.31 to 3.1.32. - [Release notes](https://github.com/gitpython-developers/GitPython/releases) - [Changelog](https://github.com/gitpython-developers/GitPython/blob/main/CHANGES) - [Commits](https://github.com/gitpython-developers/GitPython/compare/3.1.31...3.1.32) --- updated-dependencies: - dependency-name: gitpython dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 1681aee399..a4210867fc 100644 --- a/requirements.txt +++ b/requirements.txt @@ -53,7 +53,7 @@ titlecase==2.3 social-auth-app-django==5.2.0 social-auth-core==4.4.2 Python-jose==3.3.0 -gitpython==3.1.31 +gitpython==3.1.32 debugpy==1.6.7 python-gitlab==3.15.0 drf_yasg==1.21.5 From 9774e5a296b36f1ca2dd89dfb6315d848c2fd3b3 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Fri, 14 Jul 2023 09:26:54 -0500 Subject: [PATCH 31/85] Update redis Docker tag from 7.0.11 to v7.0.12 (docker-compose.yml) (#8371) Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> --- docker-compose.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker-compose.yml b/docker-compose.yml index 53ef096186..a016f7c349 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -161,7 +161,7 @@ services: volumes: - defectdojo_rabbitmq:/var/lib/rabbitmq redis: - image: redis:7.0.11-alpine@sha256:121bac949fb5f623b9fa0b4e4c9fb358ffd045966e754cfa3eb9963f3af2fe3b + image: redis:7.0.12-alpine@sha256:1717c713d3b2161db30cd026ceffdb9c238fe876f6959bf62caff9c768fb47ef profiles: - mysql-redis - postgres-redis From c42337536dbff98fcaa946544eecacf06237f172 Mon Sep 17 00:00:00 2001 From: Alejandro Tortolero Date: Fri, 14 Jul 2023 09:27:54 -0500 Subject: [PATCH 32/85] Update files with PEP8 standards in folder dojo #002 (#8321) * Update files in folder dojo/announcement PEP8 standars. * Update files in folder dojo/api_v2 PEP8 standars. * Update files in folder dojo/authorization PEP8 standars. * Update files in folder dojo/benchmark PEP8 standars. * Update files in folder dojo/components PEP8 standars. * Change BaseException to Exception * Removing unusing variable. * Change BaseException to Exception. * Removing unusing variable. --- dojo/announcement/signals.py | 13 +- dojo/announcement/urls.py | 14 +- dojo/announcement/views.py | 100 +- dojo/api_v2/exception_handler.py | 20 +- dojo/api_v2/mixins.py | 18 +- dojo/api_v2/permissions.py | 327 +- dojo/api_v2/prefetch/__init__.py | 2 +- dojo/api_v2/prefetch/mixins.py | 3 +- dojo/api_v2/prefetch/prefetcher.py | 15 +- dojo/api_v2/prefetch/schema.py | 172 +- dojo/api_v2/prefetch/utils.py | 7 +- dojo/api_v2/schema/__init__.py | 23 +- dojo/api_v2/schema/extra_schema.py | 24 +- dojo/api_v2/schema/utils.py | 13 +- dojo/api_v2/serializers.py | 2423 +++++++---- dojo/api_v2/views.py | 3737 +++++++++++------ dojo/authorization/authorization.py | 235 +- .../authorization/authorization_decorators.py | 17 +- dojo/authorization/roles_permissions.py | 110 +- dojo/banner/urls.py | 5 +- dojo/banner/views.py | 37 +- dojo/benchmark/urls.py | 36 +- dojo/benchmark/views.py | 310 +- dojo/components/sql_group_concat.py | 40 +- dojo/components/urls.py | 3 +- dojo/components/views.py | 65 +- 26 files changed, 5126 insertions(+), 2643 deletions(-) diff --git a/dojo/announcement/signals.py b/dojo/announcement/signals.py index 9a604a0cc2..8c61d4f4ad 100644 --- a/dojo/announcement/signals.py +++ b/dojo/announcement/signals.py @@ -10,12 +10,15 @@ def add_announcement_to_new_user(sender, instance, **kwargs): if announcements.count() > 0: dojo_user = Dojo_User.objects.get(id=instance.id) announcement = announcements.first() - cloud_announcement = "Cloud and On-Premise Subscriptions Now Available!" in announcement.message + cloud_announcement = ( + "Cloud and On-Premise Subscriptions Now Available!" + in announcement.message + ) if not cloud_announcement or settings.CREATE_CLOUD_BANNER: user_announcements = UserAnnouncement.objects.filter( - user=dojo_user, - announcement=announcement) + user=dojo_user, announcement=announcement + ) if user_announcements.count() == 0: UserAnnouncement.objects.get_or_create( - user=dojo_user, - announcement=announcement) + user=dojo_user, announcement=announcement + ) diff --git a/dojo/announcement/urls.py b/dojo/announcement/urls.py index 77d4702bab..c62d26f13d 100644 --- a/dojo/announcement/urls.py +++ b/dojo/announcement/urls.py @@ -2,8 +2,14 @@ from dojo.announcement import views urlpatterns = [ - re_path(r'^configure_announcement$', views.configure_announcement, - name='configure_announcement'), - re_path(r'^dismiss_announcement$', views.dismiss_announcement, - name='dismiss_announcement'), + re_path( + r"^configure_announcement$", + views.configure_announcement, + name="configure_announcement", + ), + re_path( + r"^dismiss_announcement$", + views.dismiss_announcement, + name="dismiss_announcement", + ), ] diff --git a/dojo/announcement/views.py b/dojo/announcement/views.py index 7a50cc840e..8e9c155398 100644 --- a/dojo/announcement/views.py +++ b/dojo/announcement/views.py @@ -10,72 +10,92 @@ from dojo.forms import AnnouncementCreateForm, AnnouncementRemoveForm from dojo.models import Announcement, UserAnnouncement, Dojo_User -from dojo.authorization.authorization_decorators import user_is_configuration_authorized +from dojo.authorization.authorization_decorators import ( + user_is_configuration_authorized, +) logger = logging.getLogger(__name__) -@user_is_configuration_authorized('dojo.change_announcement') +@user_is_configuration_authorized("dojo.change_announcement") def configure_announcement(request): remove = False - if request.method == 'GET': + if request.method == "GET": try: announcement = Announcement.objects.get(id=1) - form = AnnouncementRemoveForm(initial={ - 'message': announcement.message, - 'style': announcement.style, - 'dismissable': announcement.dismissable, - }) + form = AnnouncementRemoveForm( + initial={ + "message": announcement.message, + "style": announcement.style, + "dismissable": announcement.dismissable, + } + ) remove = True except Announcement.DoesNotExist: form = AnnouncementCreateForm() - elif request.method == 'POST': + elif request.method == "POST": if "_Remove" in request.POST: Announcement.objects.all().delete() - messages.add_message(request, - messages.SUCCESS, - _('Announcement removed for everyone.'), - extra_tags='alert-success') - return HttpResponseRedirect('dashboard') + messages.add_message( + request, + messages.SUCCESS, + _("Announcement removed for everyone."), + extra_tags="alert-success", + ) + return HttpResponseRedirect("dashboard") form = AnnouncementCreateForm(request.POST) announcement, created = Announcement.objects.get_or_create(id=1) if form.is_valid() and created: - announcement.message = form.cleaned_data['message'] - announcement.style = form.cleaned_data['style'] - announcement.dismissable = form.cleaned_data['dismissable'] + announcement.message = form.cleaned_data["message"] + announcement.style = form.cleaned_data["style"] + announcement.dismissable = form.cleaned_data["dismissable"] announcement.save() if created: - UserAnnouncement.objects.bulk_create([ - UserAnnouncement(user=user_id, announcement=announcement) for user_id in Dojo_User.objects.all() - ]) + UserAnnouncement.objects.bulk_create( + [ + UserAnnouncement( + user=user_id, announcement=announcement + ) + for user_id in Dojo_User.objects.all() + ] + ) messages.add_message( request, messages.SUCCESS, - _('Announcement updated successfully.'), - extra_tags='alert-success', + _("Announcement updated successfully."), + extra_tags="alert-success", ) return HttpResponseRedirect(reverse("configure_announcement")) - add_breadcrumb(title=gettext("Announcement Configuration"), top_level=True, request=request) - return render(request, 'dojo/announcement.html', { - 'form': form, - 'remove': remove - }) + add_breadcrumb( + title=gettext("Announcement Configuration"), + top_level=True, + request=request, + ) + return render( + request, "dojo/announcement.html", {"form": form, "remove": remove} + ) def dismiss_announcement(request): - if request.method == 'POST': - deleted_count, objects_deleted = UserAnnouncement.objects.filter(user=request.user, announcement=1).delete() + if request.method == "POST": + deleted_count, objects_deleted = UserAnnouncement.objects.filter( + user=request.user, announcement=1 + ).delete() if deleted_count > 0: - messages.add_message(request, - messages.SUCCESS, - _('Announcement removed.'), - extra_tags='alert-success') - return HttpResponseRedirect('dashboard') + messages.add_message( + request, + messages.SUCCESS, + _("Announcement removed."), + extra_tags="alert-success", + ) + return HttpResponseRedirect("dashboard") else: - messages.add_message(request, - messages.ERROR, - _('Failed to remove announcement.'), - extra_tags='alert-danger') - return render(request, 'dojo/dismiss_announcement.html') - return render(request, 'dojo/dismiss_announcement.html') + messages.add_message( + request, + messages.ERROR, + _("Failed to remove announcement."), + extra_tags="alert-danger", + ) + return render(request, "dojo/dismiss_announcement.html") + return render(request, "dojo/dismiss_announcement.html") diff --git a/dojo/api_v2/exception_handler.py b/dojo/api_v2/exception_handler.py index 46131c6fa5..5aa677bd10 100644 --- a/dojo/api_v2/exception_handler.py +++ b/dojo/api_v2/exception_handler.py @@ -1,7 +1,11 @@ from django.core.exceptions import ValidationError from django.db.models.deletion import RestrictedError from rest_framework.response import Response -from rest_framework.status import HTTP_400_BAD_REQUEST, HTTP_409_CONFLICT, HTTP_500_INTERNAL_SERVER_ERROR +from rest_framework.status import ( + HTTP_400_BAD_REQUEST, + HTTP_409_CONFLICT, + HTTP_500_INTERNAL_SERVER_ERROR, +) from rest_framework.views import exception_handler import logging @@ -18,12 +22,12 @@ def custom_exception_handler(exc, context): response = Response() response.status_code = HTTP_409_CONFLICT response.data = {} - response.data['message'] = str(exc) + response.data["message"] = str(exc) elif isinstance(exc, ValidationError): response = Response() response.status_code = HTTP_400_BAD_REQUEST response.data = {} - response.data['message'] = str(exc) + response.data["message"] = str(exc) else: if response is None: # There is no standard error response, so we assume an unexpected @@ -33,15 +37,19 @@ def custom_exception_handler(exc, context): response = Response() response.status_code = HTTP_500_INTERNAL_SERVER_ERROR response.data = {} - response.data['message'] = 'Internal server error, check logs for details' + response.data[ + "message" + ] = "Internal server error, check logs for details" else: if response.status_code < 500: # HTTP status codes lower than 500 are no technical errors. # They need not to be logged and we provide the exception # message, if it is different from the detail that is already # in the response. - if isinstance(response.data, dict) and str(exc) != response.data.get('detail', ''): - response.data['message'] = str(exc) + if isinstance(response.data, dict) and str( + exc + ) != response.data.get("detail", ""): + response.data["message"] = str(exc) else: # HTTP status code 500 or higher are technical errors. # They get logged and we don't change the response. diff --git a/dojo/api_v2/mixins.py b/dojo/api_v2/mixins.py index adab4e749b..e0770971f3 100644 --- a/dojo/api_v2/mixins.py +++ b/dojo/api_v2/mixins.py @@ -12,14 +12,16 @@ class DeletePreviewModelMixin: @extend_schema( - methods=['GET'], - responses={status.HTTP_200_OK: serializers.DeletePreviewSerializer(many=True)} + methods=["GET"], + responses={ + status.HTTP_200_OK: serializers.DeletePreviewSerializer(many=True) + }, ) @swagger_auto_schema( - method='get', - responses={'default': serializers.DeletePreviewSerializer(many=True)} + method="get", + responses={"default": serializers.DeletePreviewSerializer(many=True)}, ) - @action(detail=True, methods=["get"], filter_backends=[], suffix='List') + @action(detail=True, methods=["get"], filter_backends=[], suffix="List") def delete_preview(self, request, pk=None): object = self.get_object() @@ -36,8 +38,10 @@ def flatten(elem): rels = [ { "model": type(x).__name__, - "id": x.id if hasattr(x, 'id') else None, - "name": str(x) if not isinstance(x, Token) else "" + "id": x.id if hasattr(x, "id") else None, + "name": str(x) + if not isinstance(x, Token) + else "", } for x in flatten(rels) ] diff --git a/dojo/api_v2/permissions.py b/dojo/api_v2/permissions.py index c14f20695c..aac0eb7926 100644 --- a/dojo/api_v2/permissions.py +++ b/dojo/api_v2/permissions.py @@ -1,5 +1,9 @@ import re -from rest_framework.exceptions import ParseError, PermissionDenied, ValidationError +from rest_framework.exceptions import ( + ParseError, + PermissionDenied, + ValidationError, +) from dojo.api_v2.serializers import ( get_import_meta_data_from_dict, get_product_id_from_dict, @@ -84,15 +88,25 @@ def has_object_permission(self, request, view, obj): class UserHasCredentialPermission(permissions.BasePermission): def has_permission(self, request, view): - if request.data.get('product') is not None: - return check_post_permission(request, Cred_Mapping, "product", Permissions.Credential_Add) - if request.data.get('engagement') is not None: - return check_post_permission(request, Cred_Mapping, "engagement", Permissions.Credential_Add) - if request.data.get('test') is not None: - return check_post_permission(request, Cred_Mapping, "test", Permissions.Credential_Add) - if request.data.get('finding') is not None: - return check_post_permission(request, Cred_Mapping, "finding", Permissions.Credential_Add) - return check_post_permission(request, Cred_Mapping, "product", Permissions.Credential_Add) + if request.data.get("product") is not None: + return check_post_permission( + request, Cred_Mapping, "product", Permissions.Credential_Add + ) + if request.data.get("engagement") is not None: + return check_post_permission( + request, Cred_Mapping, "engagement", Permissions.Credential_Add + ) + if request.data.get("test") is not None: + return check_post_permission( + request, Cred_Mapping, "test", Permissions.Credential_Add + ) + if request.data.get("finding") is not None: + return check_post_permission( + request, Cred_Mapping, "finding", Permissions.Credential_Add + ) + return check_post_permission( + request, Cred_Mapping, "product", Permissions.Credential_Add + ) def has_object_permission(self, request, view, obj): return check_object_permission( @@ -107,19 +121,26 @@ def has_object_permission(self, request, view, obj): class UserHasDojoGroupPermission(permissions.BasePermission): def has_permission(self, request, view): if request.method == "GET": - return user_has_configuration_permission(request.user, "auth.view_group") + return user_has_configuration_permission( + request.user, "auth.view_group" + ) elif request.method == "POST": - return user_has_configuration_permission(request.user, "auth.add_group") + return user_has_configuration_permission( + request.user, "auth.add_group" + ) else: return True def has_object_permission(self, request, view, obj): if request.method == "GET": # Users need to be authorized to view groups in general and only the groups they are a member of - # because with the group they can see user information that might be considered as confidential + # because with the group they can see user information that might + # be considered as confidential return user_has_configuration_permission( request.user, "auth.view_group" - ) and user_has_permission(request.user, obj, Permissions.Group_View) + ) and user_has_permission( + request.user, obj, Permissions.Group_View + ) else: return check_object_permission( request, @@ -153,20 +174,29 @@ def has_permission(self, request, view): product_id = request.data.get("product", None) if product_id: object = get_object_or_404(Product, pk=product_id) - has_permission_result = has_permission_result and user_has_permission( - request.user, object, Permissions.Product_Edit + has_permission_result = ( + has_permission_result + and user_has_permission( + request.user, object, Permissions.Product_Edit + ) ) finding_id = request.data.get("finding", None) if finding_id: object = get_object_or_404(Finding, pk=finding_id) - has_permission_result = has_permission_result and user_has_permission( - request.user, object, Permissions.Finding_Edit + has_permission_result = ( + has_permission_result + and user_has_permission( + request.user, object, Permissions.Finding_Edit + ) ) endpoint_id = request.data.get("endpoint", None) if endpoint_id: object = get_object_or_404(Endpoint, pk=endpoint_id) - has_permission_result = has_permission_result and user_has_permission( - request.user, object, Permissions.Endpoint_Edit + has_permission_result = ( + has_permission_result + and user_has_permission( + request.user, object, Permissions.Endpoint_Edit + ) ) return has_permission_result else: @@ -176,30 +206,39 @@ def has_object_permission(self, request, view, obj): has_permission_result = True product = obj.product if product: - has_permission_result = has_permission_result and check_object_permission( - request, - product, - Permissions.Product_View, - Permissions.Product_Edit, - Permissions.Product_Edit, + has_permission_result = ( + has_permission_result + and check_object_permission( + request, + product, + Permissions.Product_View, + Permissions.Product_Edit, + Permissions.Product_Edit, + ) ) finding = obj.finding if finding: - has_permission_result = has_permission_result and check_object_permission( - request, - finding, - Permissions.Finding_View, - Permissions.Finding_Edit, - Permissions.Finding_Edit, + has_permission_result = ( + has_permission_result + and check_object_permission( + request, + finding, + Permissions.Finding_View, + Permissions.Finding_Edit, + Permissions.Finding_Edit, + ) ) endpoint = obj.endpoint if endpoint: - has_permission_result = has_permission_result and check_object_permission( - request, - endpoint, - Permissions.Endpoint_View, - Permissions.Endpoint_Edit, - Permissions.Endpoint_Edit, + has_permission_result = ( + has_permission_result + and check_object_permission( + request, + endpoint, + Permissions.Endpoint_View, + Permissions.Endpoint_Edit, + Permissions.Endpoint_Edit, + ) ) return has_permission_result @@ -300,7 +339,9 @@ class UserHasRiskAcceptancePermission(permissions.BasePermission): def has_permission(self, request, view): if UserHasRiskAcceptancePermission.path_risk_acceptance_post.match( request.path - ) or UserHasRiskAcceptancePermission.path_risk_acceptance.match(request.path): + ) or UserHasRiskAcceptancePermission.path_risk_acceptance.match( + request.path + ): return check_post_permission( request, Product, "product", Permissions.Risk_Acceptance ) @@ -311,7 +352,9 @@ def has_permission(self, request, view): def has_object_permission(self, request, view, obj): if UserHasRiskAcceptancePermission.path_risk_acceptance_post.match( request.path - ) or UserHasRiskAcceptancePermission.path_risk_acceptance.match(request.path): + ) or UserHasRiskAcceptancePermission.path_risk_acceptance.match( + request.path + ): return check_object_permission( request, obj, @@ -342,10 +385,14 @@ def has_permission(self, request, view): if ( UserHasFindingPermission.path_finding_post.match(request.path) or UserHasFindingPermission.path_finding.match(request.path) - or UserHasFindingPermission.path_stub_finding_post.match(request.path) + or UserHasFindingPermission.path_stub_finding_post.match( + request.path + ) or UserHasFindingPermission.path_stub_finding.match(request.path) ): - return check_post_permission(request, Test, "test", Permissions.Finding_Add) + return check_post_permission( + request, Test, "test", Permissions.Finding_Add + ) else: # related object only need object permission return True @@ -354,7 +401,9 @@ def has_object_permission(self, request, view, obj): if ( UserHasFindingPermission.path_finding_post.match(request.path) or UserHasFindingPermission.path_finding.match(request.path) - or UserHasFindingPermission.path_stub_finding_post.match(request.path) + or UserHasFindingPermission.path_stub_finding_post.match( + request.path + ) or UserHasFindingPermission.path_stub_finding.match(request.path) ): return check_object_permission( @@ -422,7 +471,8 @@ def has_permission(self, request, view): "Need engagement_id or product_name + engagement_name to perform import", ) else: - # the engagement doesn't exist, so we need to check if the user has requested and is allowed to use auto_create + # the engagement doesn't exist, so we need to check if the user has + # requested and is allowed to use auto_create return check_auto_create_permission( request.user, product, @@ -440,9 +490,18 @@ def has_permission(self, request, view): # permission check takes place before validation, so we don't have access to serializer.validated_data() # and we have to validate ourselves unfortunately - _, _, _, _, _, product_name, _, _, _, _ = get_import_meta_data_from_dict( - request.data - ) + ( + _, + _, + _, + _, + _, + product_name, + _, + _, + _, + _, + ) = get_import_meta_data_from_dict(request.data) product = get_target_product_if_exists(product_name) if not product: product_id = get_product_id_from_dict(request.data) @@ -467,7 +526,10 @@ def has_permission(self, request, view): class UserHasProductPermission(permissions.BasePermission): def has_permission(self, request, view): return check_post_permission( - request, Product_Type, "prod_type", Permissions.Product_Type_Add_Product + request, + Product_Type, + "prod_type", + Permissions.Product_Type_Add_Product, ) def has_object_permission(self, request, view, obj): @@ -553,7 +615,10 @@ def has_object_permission(self, request, view, obj): class UserHasProductTypeGroupPermission(permissions.BasePermission): def has_permission(self, request, view): return check_post_permission( - request, Product_Type, "product_type", Permissions.Product_Type_Group_Add + request, + Product_Type, + "product_type", + Permissions.Product_Type_Group_Add, ) def has_object_permission(self, request, view, obj): @@ -586,8 +651,12 @@ def has_permission(self, request, view): product_type = get_target_product_type_if_exists(product_type_name) product = get_target_product_if_exists(product_name, product_type_name) - engagement = get_target_engagement_if_exists(None, engagement_name, product) - test = get_target_test_if_exists(test_id, test_title, scan_type, engagement) + engagement = get_target_engagement_if_exists( + None, engagement_name, product + ) + test = get_target_test_if_exists( + test_id, test_title, scan_type, engagement + ) if test: # existing test, nothing special to check @@ -596,7 +665,9 @@ def has_permission(self, request, view): ) elif test_id: # test_id doesn't exist - raise serializers.ValidationError("Test '%s' doesn't exist" % test_id) + raise serializers.ValidationError( + "Test '%s' doesn't exist" % test_id + ) if not auto_create_context: raise_no_auto_create_import_validation_error( @@ -611,7 +682,8 @@ def has_permission(self, request, view): "Need test_id or product_name + engagement_name + scan_type to perform reimport", ) else: - # the test doesn't exist, so we need to check if the user has requested and is allowed to use auto_create + # the test doesn't exist, so we need to check if the user has + # requested and is allowed to use auto_create return check_auto_create_permission( request.user, product, @@ -665,7 +737,9 @@ def has_object_permission(self, request, view, obj): class UserHasTestImportPermission(permissions.BasePermission): def has_permission(self, request, view): - return check_post_permission(request, Test, "test", Permissions.Test_Edit) + return check_post_permission( + request, Test, "test", Permissions.Test_Edit + ) def has_object_permission(self, request, view, obj): return check_object_permission( @@ -696,7 +770,10 @@ def has_object_permission(self, request, view, obj): class UserHasProductAPIScanConfigurationPermission(permissions.BasePermission): def has_permission(self, request, view): return check_post_permission( - request, Product, "product", Permissions.Product_API_Scan_Configuration_Add + request, + Product, + "product", + Permissions.Product_API_Scan_Configuration_Add, ) def has_object_permission(self, request, view, obj): @@ -716,14 +793,20 @@ def has_permission(self, request, view): engagement_id = request.data.get("engagement", None) if engagement_id: object = get_object_or_404(Engagement, pk=engagement_id) - has_permission_result = has_permission_result and user_has_permission( - request.user, object, Permissions.Engagement_Edit + has_permission_result = ( + has_permission_result + and user_has_permission( + request.user, object, Permissions.Engagement_Edit + ) ) product_id = request.data.get("product", None) if product_id: object = get_object_or_404(Product, pk=product_id) - has_permission_result = has_permission_result and user_has_permission( - request.user, object, Permissions.Product_Edit + has_permission_result = ( + has_permission_result + and user_has_permission( + request.user, object, Permissions.Product_Edit + ) ) return has_permission_result else: @@ -733,21 +816,27 @@ def has_object_permission(self, request, view, obj): has_permission_result = True engagement = obj.engagement if engagement: - has_permission_result = has_permission_result and check_object_permission( - request, - engagement, - Permissions.Engagement_View, - Permissions.Engagement_Edit, - Permissions.Engagement_Edit, + has_permission_result = ( + has_permission_result + and check_object_permission( + request, + engagement, + Permissions.Engagement_View, + Permissions.Engagement_Edit, + Permissions.Engagement_Edit, + ) ) product = obj.product if product: - has_permission_result = has_permission_result and check_object_permission( - request, - product, - Permissions.Product_View, - Permissions.Product_Edit, - Permissions.Product_Edit, + has_permission_result = ( + has_permission_result + and check_object_permission( + request, + product, + Permissions.Product_View, + Permissions.Product_Edit, + Permissions.Product_Edit, + ) ) return has_permission_result @@ -759,20 +848,29 @@ def has_permission(self, request, view): engagement_id = request.data.get("engagement", None) if engagement_id: object = get_object_or_404(Engagement, pk=engagement_id) - has_permission_result = has_permission_result and user_has_permission( - request.user, object, Permissions.Engagement_Edit + has_permission_result = ( + has_permission_result + and user_has_permission( + request.user, object, Permissions.Engagement_Edit + ) ) finding_id = request.data.get("finding", None) if finding_id: object = get_object_or_404(Finding, pk=finding_id) - has_permission_result = has_permission_result and user_has_permission( - request.user, object, Permissions.Finding_Edit + has_permission_result = ( + has_permission_result + and user_has_permission( + request.user, object, Permissions.Finding_Edit + ) ) finding_group_id = request.data.get("finding_group", None) if finding_group_id: object = get_object_or_404(Finding_Group, pk=finding_group_id) - has_permission_result = has_permission_result and user_has_permission( - request.user, object, Permissions.Finding_Group_Edit + has_permission_result = ( + has_permission_result + and user_has_permission( + request.user, object, Permissions.Finding_Group_Edit + ) ) return has_permission_result else: @@ -782,30 +880,39 @@ def has_object_permission(self, request, view, obj): has_permission_result = True engagement = obj.engagement if engagement: - has_permission_result = has_permission_result and check_object_permission( - request, - engagement, - Permissions.Engagement_View, - Permissions.Engagement_Edit, - Permissions.Engagement_Edit, + has_permission_result = ( + has_permission_result + and check_object_permission( + request, + engagement, + Permissions.Engagement_View, + Permissions.Engagement_Edit, + Permissions.Engagement_Edit, + ) ) finding = obj.finding if finding: - has_permission_result = has_permission_result and check_object_permission( - request, - finding, - Permissions.Finding_View, - Permissions.Finding_Edit, - Permissions.Finding_Edit, + has_permission_result = ( + has_permission_result + and check_object_permission( + request, + finding, + Permissions.Finding_View, + Permissions.Finding_Edit, + Permissions.Finding_Edit, + ) ) finding_group = obj.finding_group if finding_group: - has_permission_result = has_permission_result and check_object_permission( - request, - finding_group, - Permissions.Finding_Group_View, - Permissions.Finding_Group_Edit, - Permissions.Finding_Group_Edit, + has_permission_result = ( + has_permission_result + and check_object_permission( + request, + finding_group, + Permissions.Finding_Group_View, + Permissions.Finding_Group_Edit, + Permissions.Finding_Group_Edit, + ) ) return has_permission_result @@ -921,17 +1028,23 @@ def check_auto_create_permission( if engagement: # existing engagement, nothing special to check - return user_has_permission(user, engagement, Permissions.Import_Scan_Result) + return user_has_permission( + user, engagement, Permissions.Import_Scan_Result + ) if product and product_name and engagement_name: if not user_has_permission(user, product, Permissions.Engagement_Add): raise PermissionDenied( - "No permission to create engagements in product '%s'" % product_name + "No permission to create engagements in product '%s'" + % product_name ) - if not user_has_permission(user, product, Permissions.Import_Scan_Result): + if not user_has_permission( + user, product, Permissions.Import_Scan_Result + ): raise PermissionDenied( - "No permission to import scans into product '%s'" % product_name + "No permission to import scans into product '%s'" + % product_name ) # all good @@ -945,11 +1058,15 @@ def check_auto_create_permission( ) if not product_type: - if not user_has_global_permission(user, Permissions.Product_Type_Add): + if not user_has_global_permission( + user, Permissions.Product_Type_Add + ): raise PermissionDenied( - "No permission to create product_type '%s'" % product_type_name + "No permission to create product_type '%s'" + % product_type_name ) - # new product type can be created with current user as owner, so all objects in it can be created as well + # new product type can be created with current user as owner, so + # all objects in it can be created as well return True else: if not user_has_permission( @@ -967,7 +1084,6 @@ def check_auto_create_permission( class UserHasConfigurationPermissionStaff(permissions.DjangoModelPermissions): - # Override map to also provide 'view' permissions perms_map = { "GET": ["%(app_label)s.view_%(model_name)s"], @@ -983,8 +1099,9 @@ def has_permission(self, request, view): return super().has_permission(request, view) -class UserHasConfigurationPermissionSuperuser(permissions.DjangoModelPermissions): - +class UserHasConfigurationPermissionSuperuser( + permissions.DjangoModelPermissions +): # Override map to also provide 'view' permissions perms_map = { "GET": ["%(app_label)s.view_%(model_name)s"], diff --git a/dojo/api_v2/prefetch/__init__.py b/dojo/api_v2/prefetch/__init__.py index 6a4a338484..f0449c7b30 100644 --- a/dojo/api_v2/prefetch/__init__.py +++ b/dojo/api_v2/prefetch/__init__.py @@ -1,4 +1,4 @@ from .mixins import PrefetchListMixin, PrefetchRetrieveMixin from .schema import get_prefetch_schema -__all__ = ['PrefetchListMixin', 'PrefetchRetrieveMixin', 'get_prefetch_schema'] +__all__ = ["PrefetchListMixin", "PrefetchRetrieveMixin", "get_prefetch_schema"] diff --git a/dojo/api_v2/prefetch/mixins.py b/dojo/api_v2/prefetch/mixins.py index 198c52994d..b43a44c7d7 100644 --- a/dojo/api_v2/prefetch/mixins.py +++ b/dojo/api_v2/prefetch/mixins.py @@ -8,7 +8,8 @@ def list(self, request, *args, **kwargs): prefetch_params = request.GET.get("prefetch", "").split(",") prefetcher = _Prefetcher() - # Apply the same operations as the standard list method defined in the django rest framework + # Apply the same operations as the standard list method defined in the + # django rest framework queryset = self.filter_queryset(self.get_queryset()) queryset = self.paginate_queryset(queryset) diff --git a/dojo/api_v2/prefetch/prefetcher.py b/dojo/api_v2/prefetch/prefetcher.py index 9da142a971..5d228165f5 100644 --- a/dojo/api_v2/prefetch/prefetcher.py +++ b/dojo/api_v2/prefetch/prefetcher.py @@ -7,7 +7,7 @@ SERIALIZER_DEFS_MODULE = "dojo.api_v2.serializers" -class _Prefetcher(): +class _Prefetcher: @staticmethod def _build_serializers(): """Returns a map model -> serializer where model is a django model and serializer is the corresponding @@ -16,13 +16,16 @@ def _build_serializers(): Returns: dict[model, serializer]: map of model to their serializer """ + def _is_model_serializer(obj): return inspect.isclass(obj) and issubclass(obj, ModelSerializer) serializers = dict() # We process all the serializers found in the module SERIALIZER_DEFS_MODULE. We restrict the scope to avoid # processing all the classes in the symbol table - available_serializers = inspect.getmembers(sys.modules[SERIALIZER_DEFS_MODULE], _is_model_serializer) + available_serializers = inspect.getmembers( + sys.modules[SERIALIZER_DEFS_MODULE], _is_model_serializer + ) for _, serializer in available_serializers: model = serializer.Meta.model @@ -80,9 +83,13 @@ def _prefetch(self, entry, fields_to_fetch): # Check if the field represents a many-to-many relationship as we need to instantiate # the serializer accordingly many = utils._is_many_to_many_relation(field_meta) - field_data = extra_serializer(many=many).to_representation(field_value) + field_data = extra_serializer(many=many).to_representation( + field_value + ) # For convenience in processing we store the field data in a list - field_data_list = field_data if type(field_data) is list else [field_data] + field_data_list = ( + field_data if isinstance(field_data, list) else [field_data] + ) if field_to_fetch not in self._prefetch_data: self._prefetch_data[field_to_fetch] = dict() diff --git a/dojo/api_v2/prefetch/schema.py b/dojo/api_v2/prefetch/schema.py index 568e45398b..6d04e75180 100644 --- a/dojo/api_v2/prefetch/schema.py +++ b/dojo/api_v2/prefetch/schema.py @@ -6,47 +6,99 @@ def get_prefetch_schema(methods, serializer): - """ Swagger / OpenAPI v2 (drf-yasg) Return a composable swagger schema that contains in the query the fields that can be prefetch from the model - supported by the serializer and in the reponse the structure of these fields in a new top-level attribute - named prefetch. + """Swagger / OpenAPI v2 (drf-yasg) Return a composable swagger schema that contains in the query the fields that can be prefetch from the model + supported by the serializer and in the reponse the structure of these fields in a new top-level attribute + named prefetch. - Returns: - ComposableSchema: A swagger schema + Returns: + ComposableSchema: A swagger schema """ prefetcher = _Prefetcher() fields = _get_prefetchable_fields(serializer()) - field_to_serializer = dict([(name, prefetcher._find_serializer(field_type)) for name, field_type in fields if prefetcher._find_serializer(field_type)]) - fields_to_refname = dict([(name, utils.get_serializer_ref_name(serializer())) for name, serializer in field_to_serializer.items()]) - fields_name = [name for name, field_type in fields if prefetcher._find_serializer(field_type)] + field_to_serializer = dict( + [ + (name, prefetcher._find_serializer(field_type)) + for name, field_type in fields + if prefetcher._find_serializer(field_type) + ] + ) + fields_to_refname = dict( + [ + (name, utils.get_serializer_ref_name(serializer())) + for name, serializer in field_to_serializer.items() + ] + ) + fields_name = [ + name + for name, field_type in fields + if prefetcher._find_serializer(field_type) + ] # New openapi parameter corresponding to the prefetchable fields - prefetch_params = [openapi.Parameter("prefetch", in_=openapi.IN_QUERY, required=False, type=openapi.TYPE_ARRAY, items=openapi.Items(type=openapi.TYPE_STRING, enum=fields_name))] - - additional_props = dict([(name, openapi.Schema(type=openapi.TYPE_OBJECT, read_only=True, additional_properties=LazySchemaRef(fields_to_refname[name], True))) for name in fields_name]) - prefetch_response = {"200": {"prefetch": openapi.Schema(type=openapi.TYPE_OBJECT, properties=additional_props)}} + prefetch_params = [ + openapi.Parameter( + "prefetch", + in_=openapi.IN_QUERY, + required=False, + type=openapi.TYPE_ARRAY, + items=openapi.Items(type=openapi.TYPE_STRING, enum=fields_name), + ) + ] + + additional_props = dict( + [ + ( + name, + openapi.Schema( + type=openapi.TYPE_OBJECT, + read_only=True, + additional_properties=LazySchemaRef( + fields_to_refname[name], True + ), + ), + ) + for name in fields_name + ] + ) + prefetch_response = { + "200": { + "prefetch": openapi.Schema( + type=openapi.TYPE_OBJECT, properties=additional_props + ) + } + } schema = extra_schema.IdentitySchema() for method in methods: - schema = schema.composeWith(extra_schema.ExtraParameters(method, prefetch_params)) - schema = schema.composeWith(extra_schema.ExtraResponseField(method, prefetch_response)) + schema = schema.composeWith( + extra_schema.ExtraParameters(method, prefetch_params) + ) + schema = schema.composeWith( + extra_schema.ExtraResponseField(method, prefetch_response) + ) return schema def _get_path_to_GET_serializer_map(generator): path_to_GET_serializer = dict() - for path, path_pattern, method, view in generator._get_paths_and_endpoints(): + for ( + path, + path_pattern, + method, + view, + ) in generator._get_paths_and_endpoints(): # print(path, path_pattern, method, view) - if method == 'GET': - if hasattr(view, 'get_serializer_class'): + if method == "GET": + if hasattr(view, "get_serializer_class"): path_to_GET_serializer[path] = view.get_serializer_class() return path_to_GET_serializer def prefetch_postprocessing_hook(result, generator, request, public): - """ OpenAPI v3 (drf-spectacular) Some endpoints are using the PrefetchListMixin and PrefetchRetrieveMixin. + """OpenAPI v3 (drf-spectacular) Some endpoints are using the PrefetchListMixin and PrefetchRetrieveMixin. These have nothing to do with Django prefetch_related. The endpoints have an @extend_schema configured with an extra parameter 'prefetch' This parameter contains an array of relations to prefetch. These prefetched models @@ -56,30 +108,78 @@ def prefetch_postprocessing_hook(result, generator, request, public): serializer_classes = _get_path_to_GET_serializer_map(generator) - paths = result.get('paths', {}) + paths = result.get("paths", {}) for path in paths: - if 'get' in paths[path] and 'parameters' in paths[path]['get']: - for parameter in paths[path]['get']['parameters']: - if parameter['name'] == 'prefetch': + if "get" in paths[path] and "parameters" in paths[path]["get"]: + for parameter in paths[path]["get"]["parameters"]: + if parameter["name"] == "prefetch": prefetcher = _Prefetcher() - fields = _get_prefetchable_fields(serializer_classes[path]()) + fields = _get_prefetchable_fields( + serializer_classes[path]() + ) - field_names = [name for name, field_type in fields if prefetcher._find_serializer(field_type)] + field_names = [ + name + for name, field_type in fields + if prefetcher._find_serializer(field_type) + ] - parameter['schema']['type'] = 'array' - parameter['schema']['items'] = { - 'type': "string", - 'enum': field_names + parameter["schema"]["type"] = "array" + parameter["schema"]["items"] = { + "type": "string", + "enum": field_names, } - field_to_serializer = dict([(name, prefetcher._find_serializer(field_type)) for name, field_type in fields if prefetcher._find_serializer(field_type)]) - fields_to_refname = dict([(name, utils.get_serializer_ref_name(serializer())) - for name, serializer in field_to_serializer.items()]) - properties = dict([(name, dict([("type", "object"), ("readOnly", True), ("additionalProperties", dict([("$ref", "#/components/schemas/" + fields_to_refname[name])]))])) - for name in field_names]) - ref = paths[path]['get']['responses']['200']['content']['application/json']['schema']['$ref'] - component_name = ref.split('/')[-1] - result['components']['schemas'][component_name]['properties']['prefetch'] = dict([("type", "object"), ("properties", properties)]) + field_to_serializer = dict( + [ + (name, prefetcher._find_serializer(field_type)) + for name, field_type in fields + if prefetcher._find_serializer(field_type) + ] + ) + fields_to_refname = dict( + [ + (name, utils.get_serializer_ref_name(serializer())) + for name, serializer in field_to_serializer.items() + ] + ) + properties = dict( + [ + ( + name, + dict( + [ + ("type", "object"), + ("readOnly", True), + ( + "additionalProperties", + dict( + [ + ( + "$ref", + "#/components/schemas/" + + fields_to_refname[ + name + ], + ) + ] + ), + ), + ] + ), + ) + for name in field_names + ] + ) + ref = paths[path]["get"]["responses"]["200"]["content"][ + "application/json" + ]["schema"]["$ref"] + component_name = ref.split("/")[-1] + result["components"]["schemas"][component_name][ + "properties" + ]["prefetch"] = dict( + [("type", "object"), ("properties", properties)] + ) return result diff --git a/dojo/api_v2/prefetch/utils.py b/dojo/api_v2/prefetch/utils.py index f9b76c18f0..833fe9ae6e 100644 --- a/dojo/api_v2/prefetch/utils.py +++ b/dojo/api_v2/prefetch/utils.py @@ -36,8 +36,11 @@ def _get_prefetchable_fields(serializer): Args: serializer (Serializer): [description] """ + def _is_field_prefetchable(field): - return _is_one_to_one_relation(field) or _is_many_to_many_relation(field) + return _is_one_to_one_relation(field) or _is_many_to_many_relation( + field + ) meta = getattr(serializer, "Meta", None) if meta is None: @@ -52,7 +55,7 @@ def _is_field_prefetchable(field): field = getattr(model, field_name) if _is_field_prefetchable(field): # ManyToMany relationship can be reverse - if hasattr(field, 'reverse') and field.reverse: + if hasattr(field, "reverse") and field.reverse: fields.append((field_name, field.field.model)) else: fields.append((field_name, field.field.related_model)) diff --git a/dojo/api_v2/schema/__init__.py b/dojo/api_v2/schema/__init__.py index cd8ea5bb23..6a69a16702 100644 --- a/dojo/api_v2/schema/__init__.py +++ b/dojo/api_v2/schema/__init__.py @@ -1,10 +1,17 @@ -from .extra_schema import IdentitySchema, ExtraParameters, ExtraResponseField, ComposableSchema +from .extra_schema import ( + IdentitySchema, + ExtraParameters, + ExtraResponseField, + ComposableSchema, +) from .utils import LazySchemaRef, try_apply, resolve_lazy_ref -__all__ = ['IdentitySchema', - 'ExtraParameters', - 'ExtraResponseField', - 'ComposableSchema', - 'LazySchemaRef', - 'try_apply', - 'resolve_lazy_ref'] +__all__ = [ + "IdentitySchema", + "ExtraParameters", + "ExtraResponseField", + "ComposableSchema", + "LazySchemaRef", + "try_apply", + "resolve_lazy_ref", +] diff --git a/dojo/api_v2/schema/extra_schema.py b/dojo/api_v2/schema/extra_schema.py index 3dc8e2ba4a..86fd565e37 100644 --- a/dojo/api_v2/schema/extra_schema.py +++ b/dojo/api_v2/schema/extra_schema.py @@ -10,6 +10,7 @@ class ComposableSchema: yielding a new composable schema whose transformation is defined as the function composition of the transformation of the two source schema. """ + def transform_operation(self, operation, resolver): """Defines an operation transformation @@ -17,7 +18,6 @@ def transform_operation(self, operation, resolver): operation (Operation): the operation to transform resolver (Resolver): the schema refs resolver """ - pass def composeWith(self, schema): """Allow two schema to be composed into a new schema. @@ -36,7 +36,9 @@ def composeWith(self, schema): class _Wrapper(ComposableSchema): def transform_operation(self, operation, resolver): - return schema.transform_operation(op(operation, resolver), resolver) + return schema.transform_operation( + op(operation, resolver), resolver + ) return _Wrapper() @@ -66,8 +68,8 @@ def transform_operation(self, operation, resolver): class ExtraParameters(ComposableSchema): - """Define a schema that can add parameters to the operation - """ + """Define a schema that can add parameters to the operation""" + def __init__(self, operation_name, extra_parameters, *args, **kwargs): """Initialize the schema @@ -90,8 +92,8 @@ def transform_operation(self, operation, resolver): class ExtraResponseField(ComposableSchema): - """Define a schema that can add fields to the responses of the operation - """ + """Define a schema that can add fields to the responses of the operation""" + def __init__(self, operation_name, extra_fields, *args, **kwargs): """Initialize the schema @@ -123,10 +125,16 @@ def transform_operation(self, operation, resolver): for code, params in self._extra_fields.items(): if code in responses: original_schema = responses[code]["schema"] - schema = original_schema if type(original_schema) is Schema else resolve_ref(original_schema, resolver) + schema = ( + original_schema + if isinstance(original_schema, Schema) + else resolve_ref(original_schema, resolver) + ) schema = copy.deepcopy(schema) for name, param in params.items(): - schema["properties"][name] = resolve_lazy_ref(param, resolver) + schema["properties"][name] = resolve_lazy_ref( + param, resolver + ) responses[code]["schema"] = schema return operation diff --git a/dojo/api_v2/schema/utils.py b/dojo/api_v2/schema/utils.py index a036fa5828..1276202fc8 100644 --- a/dojo/api_v2/schema/utils.py +++ b/dojo/api_v2/schema/utils.py @@ -5,9 +5,12 @@ class LazySchemaRef: """Utility class to support SchemaRef definition without knowing the resolver. The reference can be evaluated later in the context of a swagger generator """ + def __init__(self, schema_name, ignore_unresolved=False): # Bind curried version of the SchemaRef init - self.schema_ref = lambda resolver: SchemaRef(resolver, schema_name, ignore_unresolved) + self.schema_ref = lambda resolver: SchemaRef( + resolver, schema_name, ignore_unresolved + ) def apply(self, resolver): """Resolve the LazySchemaRef with the given resolver @@ -31,7 +34,7 @@ def try_apply(obj, resolver): Returns: object: the original object if it was not resolve otherwise the resolved LazySchemaRef """ - if type(obj) is LazySchemaRef: + if isinstance(obj, LazySchemaRef): return obj.apply(resolver) else: return obj @@ -46,13 +49,15 @@ def resolve_lazy_ref(schema, resolver): Returns: object: the schema without LazySchemaRef """ - if type(schema) is not Schema: + if not isinstance(schema, Schema): return try_apply(schema, resolver) if "properties" in schema: for prop_name, prop in schema["properties"].items(): schema["properties"][prop_name] = resolve_lazy_ref(prop, resolver) if "additionalProperties" in schema: - schema["additionalProperties"] = resolve_lazy_ref(schema["additionalProperties"], resolver) + schema["additionalProperties"] = resolve_lazy_ref( + schema["additionalProperties"], resolver + ) return schema diff --git a/dojo/api_v2/serializers.py b/dojo/api_v2/serializers.py index 946bc72e5d..4a78631b8b 100644 --- a/dojo/api_v2/serializers.py +++ b/dojo/api_v2/serializers.py @@ -7,26 +7,92 @@ from rest_framework.fields import DictField, MultipleChoiceField from datetime import datetime from dojo.endpoint.utils import endpoint_filter -from dojo.importers.reimporter.utils import get_or_create_engagement, get_target_engagement_if_exists, get_target_product_by_id_if_exists, \ - get_target_product_if_exists, get_target_test_if_exists -from dojo.models import IMPORT_ACTIONS, SEVERITIES, SLA_Configuration, STATS_FIELDS, Dojo_User, Finding_Group, Product, \ - Engagement, Test, Finding, \ - User, Stub_Finding, Risk_Acceptance, \ - Finding_Template, Test_Type, Development_Environment, NoteHistory, \ - JIRA_Issue, Tool_Product_Settings, Tool_Configuration, Tool_Type, \ - Product_Type, JIRA_Instance, Endpoint, JIRA_Project, Cred_Mapping, \ - Notes, DojoMeta, Note_Type, App_Analysis, Endpoint_Status, Cred_User, \ - Sonarqube_Issue, Sonarqube_Issue_Transition, Endpoint_Params, \ - Regulation, System_Settings, FileUpload, SEVERITY_CHOICES, Test_Import, \ - Test_Import_Finding_Action, Product_Type_Member, Product_Member, \ - Product_Group, Product_Type_Group, Dojo_Group, Role, Global_Role, Dojo_Group_Member, \ - Language_Type, Languages, Notifications, NOTIFICATION_CHOICES, Engagement_Presets, \ - Network_Locations, UserContactInfo, Product_API_Scan_Configuration, DEFAULT_NOTIFICATION, \ - Vulnerability_Id, Vulnerability_Id_Template, get_current_date, \ - Question, TextQuestion, ChoiceQuestion, Answer, TextAnswer, ChoiceAnswer, \ - Engagement_Survey, Answered_Survey, General_Survey, Check_List - -from dojo.tools.factory import requires_file, get_choices_sorted, requires_tool_type +from dojo.importers.reimporter.utils import ( + get_or_create_engagement, + get_target_engagement_if_exists, + get_target_product_by_id_if_exists, + get_target_product_if_exists, + get_target_test_if_exists, +) +from dojo.models import ( + IMPORT_ACTIONS, + SEVERITIES, + SLA_Configuration, + STATS_FIELDS, + Dojo_User, + Finding_Group, + Product, + Engagement, + Test, + Finding, + User, + Stub_Finding, + Risk_Acceptance, + Finding_Template, + Test_Type, + Development_Environment, + NoteHistory, + JIRA_Issue, + Tool_Product_Settings, + Tool_Configuration, + Tool_Type, + Product_Type, + JIRA_Instance, + Endpoint, + JIRA_Project, + Cred_Mapping, + Notes, + DojoMeta, + Note_Type, + App_Analysis, + Endpoint_Status, + Cred_User, + Sonarqube_Issue, + Sonarqube_Issue_Transition, + Endpoint_Params, + Regulation, + System_Settings, + FileUpload, + SEVERITY_CHOICES, + Test_Import, + Test_Import_Finding_Action, + Product_Type_Member, + Product_Member, + Product_Group, + Product_Type_Group, + Dojo_Group, + Role, + Global_Role, + Dojo_Group_Member, + Language_Type, + Languages, + Notifications, + NOTIFICATION_CHOICES, + Engagement_Presets, + Network_Locations, + UserContactInfo, + Product_API_Scan_Configuration, + DEFAULT_NOTIFICATION, + Vulnerability_Id, + Vulnerability_Id_Template, + get_current_date, + Question, + TextQuestion, + ChoiceQuestion, + Answer, + TextAnswer, + ChoiceAnswer, + Engagement_Survey, + Answered_Survey, + General_Survey, + Check_List, +) + +from dojo.tools.factory import ( + requires_file, + get_choices_sorted, + requires_tool_type, +) from dojo.utils import is_scan_file_too_large from django.conf import settings from rest_framework import serializers @@ -44,10 +110,15 @@ import tagulous from dojo.endpoint.utils import endpoint_meta_import from dojo.importers.importer.importer import DojoDefaultImporter as Importer -from dojo.importers.reimporter.reimporter import DojoDefaultReImporter as ReImporter +from dojo.importers.reimporter.reimporter import ( + DojoDefaultReImporter as ReImporter, +) from dojo.authorization.authorization import user_has_permission from dojo.authorization.roles_permissions import Permissions -from dojo.finding.helper import save_vulnerability_ids, save_vulnerability_ids_template +from dojo.finding.helper import ( + save_vulnerability_ids, + save_vulnerability_ids_template, +) from dojo.user.utils import get_configuration_permissions_codenames @@ -56,43 +127,56 @@ def get_import_meta_data_from_dict(data): - test_id = data.get('test', None) + test_id = data.get("test", None) if test_id: if isinstance(test_id, Test): test_id = test_id.id elif isinstance(test_id, str) and not test_id.isdigit(): - raise serializers.ValidationError('test must be an integer') + raise serializers.ValidationError("test must be an integer") - scan_type = data.get('scan_type', None) + scan_type = data.get("scan_type", None) - test_title = data.get('test_title', None) + test_title = data.get("test_title", None) - engagement_id = data.get('engagement', None) + engagement_id = data.get("engagement", None) if engagement_id: if isinstance(engagement_id, Engagement): engagement_id = engagement_id.id elif isinstance(engagement_id, str) and not engagement_id.isdigit(): - raise serializers.ValidationError('engagement must be an integer') + raise serializers.ValidationError("engagement must be an integer") - engagement_name = data.get('engagement_name', None) + engagement_name = data.get("engagement_name", None) - product_name = data.get('product_name', None) - product_type_name = data.get('product_type_name', None) + product_name = data.get("product_name", None) + product_type_name = data.get("product_type_name", None) - auto_create_context = data.get('auto_create_context', None) + auto_create_context = data.get("auto_create_context", None) - deduplication_on_engagement = data.get('deduplication_on_engagement', False) - do_not_reactivate = data.get('do_not_reactivate', False) - return test_id, test_title, scan_type, engagement_id, engagement_name, product_name, product_type_name, auto_create_context, deduplication_on_engagement, do_not_reactivate + deduplication_on_engagement = data.get( + "deduplication_on_engagement", False + ) + do_not_reactivate = data.get("do_not_reactivate", False) + return ( + test_id, + test_title, + scan_type, + engagement_id, + engagement_name, + product_name, + product_type_name, + auto_create_context, + deduplication_on_engagement, + do_not_reactivate, + ) def get_product_id_from_dict(data): - product_id = data.get('product', None) + product_id = data.get("product", None) if product_id: if isinstance(product_id, Product): product_id = product_id.id elif isinstance(product_id, str) and not product_id.isdigit(): - raise serializers.ValidationError('product must be an integer') + raise serializers.ValidationError("product must be an integer") return product_id @@ -109,31 +193,46 @@ def __init__(self, *args, **kwargs): for sev in SEVERITIES: self.fields[sev.lower()] = StatusStatisticsSerializer() - self.fields['total'] = StatusStatisticsSerializer() + self.fields["total"] = StatusStatisticsSerializer() class DeltaStatisticsSerializer(serializers.Serializer): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) for action in IMPORT_ACTIONS: - self.fields[action[1].lower()] = SeverityStatusStatisticsSerializer() + self.fields[ + action[1].lower() + ] = SeverityStatusStatisticsSerializer() class ImportStatisticsSerializer(serializers.Serializer): - before = SeverityStatusStatisticsSerializer(required=False, help_text="Finding statistics as stored in Defect Dojo before the import") - delta = DeltaStatisticsSerializer(required=False, help_text="Finding statistics of modifications made by the reimport. Only available when TRACK_IMPORT_HISTORY hass not disabled.") - after = SeverityStatusStatisticsSerializer(help_text="Finding statistics as stored in Defect Dojo after the import") + before = SeverityStatusStatisticsSerializer( + required=False, + help_text="Finding statistics as stored in Defect Dojo before the import", + ) + delta = DeltaStatisticsSerializer( + required=False, + help_text="Finding statistics of modifications made by the reimport. Only available when TRACK_IMPORT_HISTORY hass not disabled.", + ) + after = SeverityStatusStatisticsSerializer( + help_text="Finding statistics as stored in Defect Dojo after the import" + ) -@extend_schema_field(serializers.ListField(child=serializers.CharField())) # also takes basic python types +@extend_schema_field( + serializers.ListField(child=serializers.CharField()) +) # also takes basic python types class TagListSerializerField(serializers.ListField): child = serializers.CharField() default_error_messages = { - 'not_a_list': _( - 'Expected a list of items but got type "{input_type}".'), - 'invalid_json': _('Invalid json list. A tag list submitted in string' - ' form must be valid json.'), - 'not_a_str': _('All list items must be of string type.') + "not_a_list": _( + 'Expected a list of items but got type "{input_type}".' + ), + "invalid_json": _( + "Invalid json list. A tag list submitted in string" + " form must be valid json." + ), + "not_a_str": _("All list items must be of string type."), } order_by = None @@ -141,7 +240,7 @@ def __init__(self, **kwargs): pretty_print = kwargs.pop("pretty_print", True) style = kwargs.pop("style", {}) - kwargs["style"] = {'base_template': 'textarea.html'} + kwargs["style"] = {"base_template": "textarea.html"} kwargs["style"].update(style) super(TagListSerializerField, self).__init__(**kwargs) @@ -155,17 +254,17 @@ def to_internal_value(self, data): try: data = json.loads(data) except ValueError: - self.fail('invalid_json') + self.fail("invalid_json") - logger.debug('data as json: %s', data) + logger.debug("data as json: %s", data) if not isinstance(data, list): - self.fail('not_a_list', input_type=type(data).__name__) + self.fail("not_a_list", input_type=type(data).__name__) # data_safe = [] for s in data: if not isinstance(s, six.string_types): - self.fail('not_a_str') + self.fail("not_a_str") self.child.run_validation(s) @@ -184,13 +283,17 @@ def to_internal_value(self, data): def to_representation(self, value): if not isinstance(value, list): # we can't use isinstance because TagRelatedManager is non-existing class - # it cannot be imported or referenced, so we fallback to string comparison - if type(value).__name__ == 'TagRelatedManager': + # it cannot be imported or referenced, so we fallback to string + # comparison + if type(value).__name__ == "TagRelatedManager": value = value.get_tag_list() elif isinstance(value, str): value = tagulous.utils.parse_tags(value) else: - raise ValueError('unable to convert %s into list of tags' % type(value).__name__) + raise ValueError( + "unable to convert %s into list of tags" + % type(value).__name__ + ) return value @@ -206,7 +309,8 @@ def update(self, instance, validated_data): to_be_tagged, validated_data = self._pop_tags(validated_data) tag_object = super(TaggitSerializer, self).update( - instance, validated_data) + instance, validated_data + ) return self._save_tags(tag_object, to_be_tagged) @@ -250,7 +354,8 @@ def __getitem__(self, item): def __str__(self): if self.pretty_print: return json.dumps( - self, sort_keys=True, indent=4, separators=(',', ': ')) + self, sort_keys=True, indent=4, separators=(",", ": ") + ) else: return json.dumps(self) @@ -258,12 +363,17 @@ def __str__(self): class RequestResponseSerializerField(serializers.ListSerializer): child = DictField(child=serializers.CharField()) default_error_messages = { - 'not_a_list': _( - 'Expected a list of items but got type "{input_type}".'), - 'invalid_json': _('Invalid json list. A tag list submitted in string' - ' form must be valid json.'), - 'not_a_dict': _('All list items must be of dict type with keys \'request\' and \'response\''), - 'not_a_str': _('All values in the dict must be of string type.') + "not_a_list": _( + 'Expected a list of items but got type "{input_type}".' + ), + "invalid_json": _( + "Invalid json list. A tag list submitted in string" + " form must be valid json." + ), + "not_a_dict": _( + "All list items must be of dict type with keys 'request' and 'response'" + ), + "not_a_str": _("All values in the dict must be of string type."), } order_by = None @@ -271,7 +381,7 @@ def __init__(self, **kwargs): pretty_print = kwargs.pop("pretty_print", True) style = kwargs.pop("style", {}) - kwargs["style"] = {'base_template': 'textarea.html'} + kwargs["style"] = {"base_template": "textarea.html"} kwargs["style"].update(style) if "data" in kwargs: @@ -291,21 +401,21 @@ def to_internal_value(self, data): try: data = json.loads(data) except ValueError: - self.fail('invalid_json') + self.fail("invalid_json") if not isinstance(data, list): - self.fail('not_a_list', input_type=type(data).__name__) + self.fail("not_a_list", input_type=type(data).__name__) for s in data: if not isinstance(s, dict): - self.fail('not_a_dict', input_type=type(s).__name__) + self.fail("not_a_dict", input_type=type(s).__name__) - request = s.get('request', None) - response = s.get('response', None) + request = s.get("request", None) + response = s.get("response", None) if not isinstance(request, str): - self.fail('not_a_str', input_type=type(request).__name__) + self.fail("not_a_str", input_type=type(request).__name__) if not isinstance(response, str): - self.fail('not_a_str', input_type=type(request).__name__) + self.fail("not_a_str", input_type=type(request).__name__) self.child.run_validation(s) return data @@ -318,7 +428,13 @@ def to_representation(self, value): burps = value.all().order_by(*self.order_by) else: burps = value.all() - value = [{'request': burp.get_request(), 'response': burp.get_response()} for burp in burps] + value = [ + { + "request": burp.get_request(), + "response": burp.get_response(), + } + for burp in burps + ] return value @@ -328,18 +444,24 @@ class BurpRawRequestResponseSerializer(serializers.Serializer): class MetaSerializer(serializers.ModelSerializer): - product = serializers.PrimaryKeyRelatedField(queryset=Product.objects.all(), - required=False, - default=None, - allow_null=True) - endpoint = serializers.PrimaryKeyRelatedField(queryset=Endpoint.objects.all(), - required=False, - default=None, - allow_null=True) - finding = serializers.PrimaryKeyRelatedField(queryset=Finding.objects.all(), - required=False, - default=None, - allow_null=True) + product = serializers.PrimaryKeyRelatedField( + queryset=Product.objects.all(), + required=False, + default=None, + allow_null=True, + ) + endpoint = serializers.PrimaryKeyRelatedField( + queryset=Endpoint.objects.all(), + required=False, + default=None, + allow_null=True, + ) + finding = serializers.PrimaryKeyRelatedField( + queryset=Finding.objects.all(), + required=False, + default=None, + allow_null=True, + ) def validate(self, data): DojoMeta(**data).clean() @@ -347,65 +469,108 @@ def validate(self, data): class Meta: model = DojoMeta - fields = '__all__' + fields = "__all__" class ProductMetaSerializer(serializers.ModelSerializer): class Meta: model = DojoMeta - fields = ('name', 'value') + fields = ("name", "value") class UserSerializer(serializers.ModelSerializer): last_login = serializers.DateTimeField(read_only=True) - password = serializers.CharField(write_only=True, style={'input_type': 'password'}, required=False, - validators=[validate_password]) + password = serializers.CharField( + write_only=True, + style={"input_type": "password"}, + required=False, + validators=[validate_password], + ) configuration_permissions = serializers.PrimaryKeyRelatedField( - allow_null=True, - queryset=Permission.objects.filter(codename__in=get_configuration_permissions_codenames()), - many=True, - required=False, - source='user_permissions') + allow_null=True, + queryset=Permission.objects.filter( + codename__in=get_configuration_permissions_codenames() + ), + many=True, + required=False, + source="user_permissions", + ) class Meta: model = Dojo_User - fields = ('id', 'username', 'first_name', 'last_name', 'email', 'last_login', 'is_active', 'is_superuser', 'password', 'configuration_permissions') + fields = ( + "id", + "username", + "first_name", + "last_name", + "email", + "last_login", + "is_active", + "is_superuser", + "password", + "configuration_permissions", + ) def to_representation(self, instance): ret = super().to_representation(instance) - # This will show only "configuration_permissions" even if user has also other permissions - all_permissions = set(ret['configuration_permissions']) - allowed_configuration_permissions = set(self.fields['configuration_permissions'].child_relation.queryset.values_list('id', flat=True)) - ret['configuration_permissions'] = list(all_permissions.intersection(allowed_configuration_permissions)) + # This will show only "configuration_permissions" even if user has also + # other permissions + all_permissions = set(ret["configuration_permissions"]) + allowed_configuration_permissions = set( + self.fields[ + "configuration_permissions" + ].child_relation.queryset.values_list("id", flat=True) + ) + ret["configuration_permissions"] = list( + all_permissions.intersection(allowed_configuration_permissions) + ) return ret def update(self, instance, validated_data): new_configuration_permissions = None - if 'user_permissions' in validated_data: # This field was renamed from "configuration_permissions" in the meantime - new_configuration_permissions = set(validated_data.pop('user_permissions')) + if ( + "user_permissions" in validated_data + ): # This field was renamed from "configuration_permissions" in the meantime + new_configuration_permissions = set( + validated_data.pop("user_permissions") + ) instance = super().update(instance, validated_data) - # This will update only Permissions from category "configuration_permissions". Others will be untouched + # This will update only Permissions from category + # "configuration_permissions". Others will be untouched if new_configuration_permissions: - allowed_configuration_permissions = set(self.fields['configuration_permissions'].child_relation.queryset.all()) - non_configuration_permissions = set(instance.user_permissions.all()) - allowed_configuration_permissions - new_permissions = non_configuration_permissions.union(new_configuration_permissions) + allowed_configuration_permissions = set( + self.fields[ + "configuration_permissions" + ].child_relation.queryset.all() + ) + non_configuration_permissions = ( + set(instance.user_permissions.all()) + - allowed_configuration_permissions + ) + new_permissions = non_configuration_permissions.union( + new_configuration_permissions + ) instance.user_permissions.set(new_permissions) return instance def create(self, validated_data): - if 'password' in validated_data: - password = validated_data.pop('password') + if "password" in validated_data: + password = validated_data.pop("password") else: password = None new_configuration_permissions = None - if 'user_permissions' in validated_data: # This field was renamed from "configuration_permissions" in the meantime - new_configuration_permissions = set(validated_data.pop('user_permissions')) + if ( + "user_permissions" in validated_data + ): # This field was renamed from "configuration_permissions" in the meantime + new_configuration_permissions = set( + validated_data.pop("user_permissions") + ) user = Dojo_User.objects.create(**validated_data) @@ -414,7 +579,8 @@ def create(self, validated_data): else: user.set_unusable_password() - # This will create only Permissions from category "configuration_permissions". There are no other Permissions. + # This will create only Permissions from category + # "configuration_permissions". There are no other Permissions. if new_configuration_permissions: user.user_permissions.set(new_configuration_permissions) @@ -422,53 +588,61 @@ def create(self, validated_data): return user def validate(self, data): - if self.instance is not None: instance_is_superuser = self.instance.is_superuser else: instance_is_superuser = False - data_is_superuser = data.get('is_superuser', False) - if not self.context['request'].user.is_superuser and (instance_is_superuser or data_is_superuser): - raise ValidationError('Only superusers are allowed to add or edit superusers.') + data_is_superuser = data.get("is_superuser", False) + if not self.context["request"].user.is_superuser and ( + instance_is_superuser or data_is_superuser + ): + raise ValidationError( + "Only superusers are allowed to add or edit superusers." + ) - if self.context['request'].method in ['PATCH', 'PUT'] and 'password' in data: - raise ValidationError('Update of password though API is not allowed') + if ( + self.context["request"].method in ["PATCH", "PUT"] + and "password" in data + ): + raise ValidationError( + "Update of password though API is not allowed" + ) else: return super().validate(data) class UserContactInfoSerializer(serializers.ModelSerializer): - class Meta: model = UserContactInfo - fields = '__all__' + fields = "__all__" class UserStubSerializer(serializers.ModelSerializer): class Meta: model = Dojo_User - fields = ('id', 'username', 'first_name', 'last_name') + fields = ("id", "username", "first_name", "last_name") class RoleSerializer(serializers.ModelSerializer): - class Meta: model = Role - fields = '__all__' + fields = "__all__" class DojoGroupSerializer(serializers.ModelSerializer): - configuration_permissions = serializers.PrimaryKeyRelatedField( - allow_null=True, - queryset=Permission.objects.filter(codename__in=get_configuration_permissions_codenames()), - many=True, - required=False, - source='auth_group.permissions') + allow_null=True, + queryset=Permission.objects.filter( + codename__in=get_configuration_permissions_codenames() + ), + many=True, + required=False, + source="auth_group.permissions", + ) class Meta: model = Dojo_Group - exclude = ('auth_group', ) + exclude = ("auth_group",) def to_representation(self, instance): if not instance.auth_group: @@ -480,21 +654,34 @@ def to_representation(self, instance): auth_group.user_set.add(member) instance.save() ret = super().to_representation(instance) - # This will show only "configuration_permissions" even if user has also other permissions - all_permissions = set(ret['configuration_permissions']) - allowed_configuration_permissions = set(self.fields['configuration_permissions'].child_relation.queryset.values_list('id', flat=True)) - ret['configuration_permissions'] = list(all_permissions.intersection(allowed_configuration_permissions)) + # This will show only "configuration_permissions" even if user has also + # other permissions + all_permissions = set(ret["configuration_permissions"]) + allowed_configuration_permissions = set( + self.fields[ + "configuration_permissions" + ].child_relation.queryset.values_list("id", flat=True) + ) + ret["configuration_permissions"] = list( + all_permissions.intersection(allowed_configuration_permissions) + ) return ret def create(self, validated_data): new_configuration_permissions = None - if 'auth_group' in validated_data and 'permissions' in validated_data['auth_group']: # This field was renamed from "configuration_permissions" in the meantime - new_configuration_permissions = set(validated_data.pop('auth_group')['permissions']) + if ( + "auth_group" in validated_data + and "permissions" in validated_data["auth_group"] + ): # This field was renamed from "configuration_permissions" in the meantime + new_configuration_permissions = set( + validated_data.pop("auth_group")["permissions"] + ) instance = super().create(validated_data) - # This will update only Permissions from category "configuration_permissions". There are no other Permissions. + # This will update only Permissions from category + # "configuration_permissions". There are no other Permissions. if new_configuration_permissions: instance.auth_group.permissions.set(new_configuration_permissions) @@ -502,56 +689,93 @@ def create(self, validated_data): def update(self, instance, validated_data): new_configuration_permissions = None - if 'auth_group' in validated_data and 'permissions' in validated_data['auth_group']: # This field was renamed from "configuration_permissions" in the meantime - new_configuration_permissions = set(validated_data.pop('auth_group')['permissions']) + if ( + "auth_group" in validated_data + and "permissions" in validated_data["auth_group"] + ): # This field was renamed from "configuration_permissions" in the meantime + new_configuration_permissions = set( + validated_data.pop("auth_group")["permissions"] + ) instance = super().update(instance, validated_data) - # This will update only Permissions from category "configuration_permissions". Others will be untouched + # This will update only Permissions from category + # "configuration_permissions". Others will be untouched if new_configuration_permissions: - allowed_configuration_permissions = set(self.fields['configuration_permissions'].child_relation.queryset.all()) - non_configuration_permissions = set(instance.auth_group.permissions.all()) - allowed_configuration_permissions - new_permissions = non_configuration_permissions.union(new_configuration_permissions) + allowed_configuration_permissions = set( + self.fields[ + "configuration_permissions" + ].child_relation.queryset.all() + ) + non_configuration_permissions = ( + set(instance.auth_group.permissions.all()) + - allowed_configuration_permissions + ) + new_permissions = non_configuration_permissions.union( + new_configuration_permissions + ) instance.auth_group.permissions.set(new_permissions) return instance class DojoGroupMemberSerializer(serializers.ModelSerializer): - class Meta: model = Dojo_Group_Member - fields = '__all__' + fields = "__all__" def validate(self, data): - if self.instance is not None and \ - data.get('group') != self.instance.group and \ - not user_has_permission(self.context['request'].user, data.get('group'), Permissions.Group_Manage_Members): - raise PermissionDenied('You are not permitted to add a user to this group') - - if self.instance is None or \ - data.get('group') != self.instance.group or \ - data.get('user') != self.instance.user: - members = Dojo_Group_Member.objects.filter(group=data.get('group'), user=data.get('user')) - if members.count() > 0: - raise ValidationError('Dojo_Group_Member already exists') + if ( + self.instance is not None + and data.get("group") != self.instance.group + and not user_has_permission( + self.context["request"].user, + data.get("group"), + Permissions.Group_Manage_Members, + ) + ): + raise PermissionDenied( + "You are not permitted to add a user to this group" + ) - if self.instance is not None and not data.get('role').is_owner: - owners = Dojo_Group_Member.objects.filter(group=data.get('group'), role__is_owner=True).exclude(id=self.instance.id).count() + if ( + self.instance is None + or data.get("group") != self.instance.group + or data.get("user") != self.instance.user + ): + members = Dojo_Group_Member.objects.filter( + group=data.get("group"), user=data.get("user") + ) + if members.count() > 0: + raise ValidationError("Dojo_Group_Member already exists") + + if self.instance is not None and not data.get("role").is_owner: + owners = ( + Dojo_Group_Member.objects.filter( + group=data.get("group"), role__is_owner=True + ) + .exclude(id=self.instance.id) + .count() + ) if owners < 1: - raise ValidationError('There must be at least one owner') - - if data.get('role').is_owner and not user_has_permission(self.context['request'].user, data.get('group'), Permissions.Group_Add_Owner): - raise PermissionDenied('You are not permitted to add a user as Owner to this group') + raise ValidationError("There must be at least one owner") + + if data.get("role").is_owner and not user_has_permission( + self.context["request"].user, + data.get("group"), + Permissions.Group_Add_Owner, + ): + raise PermissionDenied( + "You are not permitted to add a user as Owner to this group" + ) return data class GlobalRoleSerializer(serializers.ModelSerializer): - class Meta: model = Global_Role - fields = '__all__' + fields = "__all__" def validate(self, data): user = None @@ -561,30 +785,31 @@ def validate(self, data): user = self.instance.user group = self.instance.group - if 'user' in data: - user = data.get('user') - if 'group' in data: - group = data.get('group') + if "user" in data: + user = data.get("user") + if "group" in data: + group = data.get("group") if user is None and group is None: raise ValidationError("Global_Role must have either user or group") if user is not None and group is not None: - raise ValidationError("Global_Role cannot have both user and group") + raise ValidationError( + "Global_Role cannot have both user and group" + ) return data class AddUserSerializer(serializers.ModelSerializer): - class Meta: model = User - fields = ('id', 'username') + fields = ("id", "username") class NoteTypeSerializer(serializers.ModelSerializer): class Meta: model = Note_Type - fields = '__all__' + fields = "__all__" class NoteHistorySerializer(serializers.ModelSerializer): @@ -593,7 +818,7 @@ class NoteHistorySerializer(serializers.ModelSerializer): class Meta: model = NoteHistory - fields = '__all__' + fields = "__all__" class NoteSerializer(serializers.ModelSerializer): @@ -603,14 +828,14 @@ class NoteSerializer(serializers.ModelSerializer): note_type = NoteTypeSerializer(read_only=True, many=False) def update(self, instance, validated_data): - instance.entry = validated_data.get('entry') + instance.entry = validated_data.get("entry") instance.edited = True - instance.editor = self.context['request'].user + instance.editor = self.context["request"].user instance.edit_time = timezone.now() history = NoteHistory( data=instance.entry, time=instance.edit_time, - current_editor=instance.editor + current_editor=instance.editor, ) history.save() instance.history.add(history) @@ -619,7 +844,7 @@ def update(self, instance, validated_data): class Meta: model = Notes - fields = '__all__' + fields = "__all__" class FileSerializer(serializers.ModelSerializer): @@ -627,7 +852,7 @@ class FileSerializer(serializers.ModelSerializer): class Meta: model = FileUpload - fields = '__all__' + fields = "__all__" class RawFileSerializer(serializers.ModelSerializer): @@ -635,7 +860,7 @@ class RawFileSerializer(serializers.ModelSerializer): class Meta: model = FileUpload - fields = ['file'] + fields = ["file"] class RiskAcceptanceProofSerializer(serializers.ModelSerializer): @@ -643,119 +868,192 @@ class RiskAcceptanceProofSerializer(serializers.ModelSerializer): class Meta: model = Risk_Acceptance - fields = ['path'] + fields = ["path"] class ProductMemberSerializer(serializers.ModelSerializer): - class Meta: model = Product_Member - fields = '__all__' + fields = "__all__" def validate(self, data): - if self.instance is not None and \ - data.get('product') != self.instance.product and \ - not user_has_permission(self.context['request'].user, data.get('product'), Permissions.Product_Manage_Members): - raise PermissionDenied('You are not permitted to add a member to this product') - - if self.instance is None or \ - data.get('product') != self.instance.product or \ - data.get('user') != self.instance.user: - members = Product_Member.objects.filter(product=data.get('product'), user=data.get('user')) - if members.count() > 0: - raise ValidationError('Product_Member already exists') + if ( + self.instance is not None + and data.get("product") != self.instance.product + and not user_has_permission( + self.context["request"].user, + data.get("product"), + Permissions.Product_Manage_Members, + ) + ): + raise PermissionDenied( + "You are not permitted to add a member to this product" + ) - if data.get('role').is_owner and not user_has_permission(self.context['request'].user, data.get('product'), Permissions.Product_Member_Add_Owner): - raise PermissionDenied('You are not permitted to add a member as Owner to this product') + if ( + self.instance is None + or data.get("product") != self.instance.product + or data.get("user") != self.instance.user + ): + members = Product_Member.objects.filter( + product=data.get("product"), user=data.get("user") + ) + if members.count() > 0: + raise ValidationError("Product_Member already exists") + + if data.get("role").is_owner and not user_has_permission( + self.context["request"].user, + data.get("product"), + Permissions.Product_Member_Add_Owner, + ): + raise PermissionDenied( + "You are not permitted to add a member as Owner to this product" + ) return data class ProductGroupSerializer(serializers.ModelSerializer): - class Meta: model = Product_Group - fields = '__all__' + fields = "__all__" def validate(self, data): - if self.instance is not None and \ - data.get('product') != self.instance.product and \ - not user_has_permission(self.context['request'].user, data.get('product'), Permissions.Product_Group_Add): - raise PermissionDenied('You are not permitted to add a group to this product') - - if self.instance is None or \ - data.get('product') != self.instance.product or \ - data.get('group') != self.instance.group: - members = Product_Group.objects.filter(product=data.get('product'), group=data.get('group')) - if members.count() > 0: - raise ValidationError('Product_Group already exists') + if ( + self.instance is not None + and data.get("product") != self.instance.product + and not user_has_permission( + self.context["request"].user, + data.get("product"), + Permissions.Product_Group_Add, + ) + ): + raise PermissionDenied( + "You are not permitted to add a group to this product" + ) - if data.get('role').is_owner and not user_has_permission(self.context['request'].user, data.get('product'), Permissions.Product_Group_Add_Owner): - raise PermissionDenied('You are not permitted to add a group as Owner to this product') + if ( + self.instance is None + or data.get("product") != self.instance.product + or data.get("group") != self.instance.group + ): + members = Product_Group.objects.filter( + product=data.get("product"), group=data.get("group") + ) + if members.count() > 0: + raise ValidationError("Product_Group already exists") + + if data.get("role").is_owner and not user_has_permission( + self.context["request"].user, + data.get("product"), + Permissions.Product_Group_Add_Owner, + ): + raise PermissionDenied( + "You are not permitted to add a group as Owner to this product" + ) return data class ProductTypeMemberSerializer(serializers.ModelSerializer): - class Meta: model = Product_Type_Member - fields = '__all__' + fields = "__all__" def validate(self, data): - if self.instance is not None and \ - data.get('product_type') != self.instance.product_type and \ - not user_has_permission(self.context['request'].user, data.get('product_type'), Permissions.Product_Type_Manage_Members): - raise PermissionDenied('You are not permitted to add a member to this product type') - - if self.instance is None or \ - data.get('product_type') != self.instance.product_type or \ - data.get('user') != self.instance.user: - members = Product_Type_Member.objects.filter(product_type=data.get('product_type'), user=data.get('user')) - if members.count() > 0: - raise ValidationError('Product_Type_Member already exists') + if ( + self.instance is not None + and data.get("product_type") != self.instance.product_type + and not user_has_permission( + self.context["request"].user, + data.get("product_type"), + Permissions.Product_Type_Manage_Members, + ) + ): + raise PermissionDenied( + "You are not permitted to add a member to this product type" + ) - if self.instance is not None and not data.get('role').is_owner: - owners = Product_Type_Member.objects.filter(product_type=data.get('product_type'), role__is_owner=True).exclude(id=self.instance.id).count() + if ( + self.instance is None + or data.get("product_type") != self.instance.product_type + or data.get("user") != self.instance.user + ): + members = Product_Type_Member.objects.filter( + product_type=data.get("product_type"), user=data.get("user") + ) + if members.count() > 0: + raise ValidationError("Product_Type_Member already exists") + + if self.instance is not None and not data.get("role").is_owner: + owners = ( + Product_Type_Member.objects.filter( + product_type=data.get("product_type"), role__is_owner=True + ) + .exclude(id=self.instance.id) + .count() + ) if owners < 1: - raise ValidationError('There must be at least one owner') - - if data.get('role').is_owner and not user_has_permission(self.context['request'].user, data.get('product_type'), Permissions.Product_Type_Member_Add_Owner): - raise PermissionDenied('You are not permitted to add a member as Owner to this product type') + raise ValidationError("There must be at least one owner") + + if data.get("role").is_owner and not user_has_permission( + self.context["request"].user, + data.get("product_type"), + Permissions.Product_Type_Member_Add_Owner, + ): + raise PermissionDenied( + "You are not permitted to add a member as Owner to this product type" + ) return data class ProductTypeGroupSerializer(serializers.ModelSerializer): - class Meta: model = Product_Type_Group - fields = '__all__' + fields = "__all__" def validate(self, data): - if self.instance is not None and \ - data.get('product_type') != self.instance.product_type and \ - not user_has_permission(self.context['request'].user, data.get('product_type'), Permissions.Product_Type_Group_Add): - raise PermissionDenied('You are not permitted to add a group to this product type') - - if self.instance is None or \ - data.get('product_type') != self.instance.product_type or \ - data.get('group') != self.instance.group: - members = Product_Type_Group.objects.filter(product_type=data.get('product_type'), group=data.get('group')) - if members.count() > 0: - raise ValidationError('Product_Type_Group already exists') + if ( + self.instance is not None + and data.get("product_type") != self.instance.product_type + and not user_has_permission( + self.context["request"].user, + data.get("product_type"), + Permissions.Product_Type_Group_Add, + ) + ): + raise PermissionDenied( + "You are not permitted to add a group to this product type" + ) - if data.get('role').is_owner and not user_has_permission(self.context['request'].user, data.get('product_type'), Permissions.Product_Type_Group_Add_Owner): - raise PermissionDenied('You are not permitted to add a group as Owner to this product type') + if ( + self.instance is None + or data.get("product_type") != self.instance.product_type + or data.get("group") != self.instance.group + ): + members = Product_Type_Group.objects.filter( + product_type=data.get("product_type"), group=data.get("group") + ) + if members.count() > 0: + raise ValidationError("Product_Type_Group already exists") + + if data.get("role").is_owner and not user_has_permission( + self.context["request"].user, + data.get("product_type"), + Permissions.Product_Type_Group_Add_Owner, + ): + raise PermissionDenied( + "You are not permitted to add a group as Owner to this product type" + ) return data class ProductTypeSerializer(serializers.ModelSerializer): - class Meta: model = Product_Type - fields = '__all__' + fields = "__all__" class EngagementSerializer(TaggitSerializer, serializers.ModelSerializer): @@ -763,52 +1061,62 @@ class EngagementSerializer(TaggitSerializer, serializers.ModelSerializer): class Meta: model = Engagement - exclude = ('inherited_tags', ) + exclude = ("inherited_tags",) def validate(self, data): - if self.context['request'].method == 'POST': - if data.get('target_start') > data.get('target_end'): + if self.context["request"].method == "POST": + if data.get("target_start") > data.get("target_end"): raise serializers.ValidationError( - 'Your target start date exceeds your target end date') + "Your target start date exceeds your target end date" + ) return data def build_relational_field(self, field_name, relation_info): - if field_name == 'notes': - return NoteSerializer, {'many': True, 'read_only': True} - if field_name == 'files': - return FileSerializer, {'many': True, 'read_only': True} + if field_name == "notes": + return NoteSerializer, {"many": True, "read_only": True} + if field_name == "files": + return FileSerializer, {"many": True, "read_only": True} return super().build_relational_field(field_name, relation_info) class EngagementToNotesSerializer(serializers.Serializer): - engagement_id = serializers.PrimaryKeyRelatedField(queryset=Engagement.objects.all(), many=False, allow_null=True) + engagement_id = serializers.PrimaryKeyRelatedField( + queryset=Engagement.objects.all(), many=False, allow_null=True + ) notes = NoteSerializer(many=True) class EngagementToFilesSerializer(serializers.Serializer): - engagement_id = serializers.PrimaryKeyRelatedField(queryset=Engagement.objects.all(), many=False, allow_null=True) + engagement_id = serializers.PrimaryKeyRelatedField( + queryset=Engagement.objects.all(), many=False, allow_null=True + ) files = FileSerializer(many=True) def to_representation(self, data): - engagement = data.get('engagement_id') - files = data.get('files') + engagement = data.get("engagement_id") + files = data.get("files") new_files = [] for file in files: - new_files.append({ - 'id': file.id, - 'file': '{site_url}/{file_access_url}'.format( - site_url=settings.SITE_URL, - file_access_url=file.get_accessible_url(engagement, engagement.id)), - 'title': file.title - }) - new_data = {'engagement_id': engagement.id, 'files': new_files} + new_files.append( + { + "id": file.id, + "file": "{site_url}/{file_access_url}".format( + site_url=settings.SITE_URL, + file_access_url=file.get_accessible_url( + engagement, engagement.id + ), + ), + "title": file.title, + } + ) + new_data = {"engagement_id": engagement.id, "files": new_files} return new_data class EngagementCheckListSerializer(serializers.ModelSerializer): class Meta: model = Check_List - fields = '__all__' + fields = "__all__" class AppAnalysisSerializer(TaggitSerializer, serializers.ModelSerializer): @@ -816,64 +1124,67 @@ class AppAnalysisSerializer(TaggitSerializer, serializers.ModelSerializer): class Meta: model = App_Analysis - fields = '__all__' + fields = "__all__" class ToolTypeSerializer(serializers.ModelSerializer): class Meta: model = Tool_Type - fields = '__all__' + fields = "__all__" class RegulationSerializer(serializers.ModelSerializer): class Meta: model = Regulation - fields = '__all__' + fields = "__all__" class ToolConfigurationSerializer(serializers.ModelSerializer): class Meta: model = Tool_Configuration - fields = '__all__' + fields = "__all__" extra_kwargs = { - 'password': {'write_only': True}, - 'ssh': {'write_only': True}, - 'api_key': {'write_only': True}, + "password": {"write_only": True}, + "ssh": {"write_only": True}, + "api_key": {"write_only": True}, } class ToolProductSettingsSerializer(serializers.ModelSerializer): - setting_url = serializers.CharField(source='url') - product = serializers.PrimaryKeyRelatedField(queryset=Product.objects.all(), required=True) + setting_url = serializers.CharField(source="url") + product = serializers.PrimaryKeyRelatedField( + queryset=Product.objects.all(), required=True + ) class Meta: model = Tool_Product_Settings - fields = '__all__' + fields = "__all__" class EndpointStatusSerializer(serializers.ModelSerializer): class Meta: model = Endpoint_Status - fields = '__all__' + fields = "__all__" def create(self, validated_data): - endpoint = validated_data.get('endpoint') - finding = validated_data.get('finding') + endpoint = validated_data.get("endpoint") + finding = validated_data.get("finding") try: status = Endpoint_Status.objects.create( - finding=finding, - endpoint=endpoint + finding=finding, endpoint=endpoint ) except IntegrityError as ie: if "endpoint-finding relation" in str(ie): - raise serializers.ValidationError('This endpoint-finding relation already exists') + raise serializers.ValidationError( + "This endpoint-finding relation already exists" + ) else: raise - status.mitigated = validated_data.get('mitigated', False) - status.false_positive = validated_data.get('false_positive', False) - status.out_of_scope = validated_data.get('out_of_scope', False) - status.risk_accepted = validated_data.get('risk_accepted', False) - status.date = validated_data.get('date', get_current_date()) + status.mitigated = validated_data.get("mitigated", False) + status.false_positive = validated_data.get("false_positive", False) + status.out_of_scope = validated_data.get("out_of_scope", False) + status.risk_accepted = validated_data.get("risk_accepted", False) + status.date = validated_data.get("date", get_current_date()) status.save() return status @@ -882,7 +1193,9 @@ def update(self, instance, validated_data): return super().update(instance, validated_data) except IntegrityError as ie: if "endpoint-finding relation" in str(ie): - raise serializers.ValidationError('This endpoint-finding relation already exists') + raise serializers.ValidationError( + "This endpoint-finding relation already exists" + ) else: raise @@ -892,32 +1205,34 @@ class EndpointSerializer(TaggitSerializer, serializers.ModelSerializer): class Meta: model = Endpoint - exclude = ('inherited_tags', ) + exclude = ("inherited_tags",) def validate(self, data): # print('EndpointSerialize.validate') - if not self.context['request'].method == 'PATCH': - if 'product' not in data: - raise serializers.ValidationError('Product is required') - protocol = data.get('protocol') - userinfo = data.get('userinfo') - host = data.get('host') - port = data.get('port') - path = data.get('path') - query = data.get('query') - fragment = data.get('fragment') - product = data.get('product') + if not self.context["request"].method == "PATCH": + if "product" not in data: + raise serializers.ValidationError("Product is required") + protocol = data.get("protocol") + userinfo = data.get("userinfo") + host = data.get("host") + port = data.get("port") + path = data.get("path") + query = data.get("query") + fragment = data.get("fragment") + product = data.get("product") else: - protocol = data.get('protocol', self.instance.protocol) - userinfo = data.get('userinfo', self.instance.userinfo) - host = data.get('host', self.instance.host) - port = data.get('port', self.instance.port) - path = data.get('path', self.instance.path) - query = data.get('query', self.instance.query) - fragment = data.get('fragment', self.instance.fragment) - if 'product' in data and data['product'] != self.instance.product: - raise serializers.ValidationError('Change of product is not possible') + protocol = data.get("protocol", self.instance.protocol) + userinfo = data.get("userinfo", self.instance.userinfo) + host = data.get("host", self.instance.host) + port = data.get("port", self.instance.port) + path = data.get("path", self.instance.path) + query = data.get("query", self.instance.query) + fragment = data.get("fragment", self.instance.fragment) + if "product" in data and data["product"] != self.instance.product: + raise serializers.ValidationError( + "Change of product is not possible" + ) product = self.instance.product endpoint_ins = Endpoint( @@ -928,7 +1243,7 @@ def validate(self, data): path=path, query=query, fragment=fragment, - product=product + product=product, ) endpoint_ins.clean() # Run standard validation and clean process; can raise errors @@ -940,27 +1255,35 @@ def validate(self, data): path=endpoint_ins.path, query=endpoint_ins.query, fragment=endpoint_ins.fragment, - product=endpoint_ins.product + product=endpoint_ins.product, ) - if ((self.context['request'].method in ["PUT", "PATCH"] and - ((endpoint.count() > 1) or - (endpoint.count() == 1 and - endpoint.first().pk != self.instance.pk))) or - (self.context['request'].method in ["POST"] and endpoint.count() > 0)): + if ( + self.context["request"].method in ["PUT", "PATCH"] + and ( + (endpoint.count() > 1) + or ( + endpoint.count() == 1 + and endpoint.first().pk != self.instance.pk + ) + ) + ) or ( + self.context["request"].method in ["POST"] and endpoint.count() > 0 + ): raise serializers.ValidationError( - 'It appears as though an endpoint with this data already ' - 'exists for this product.', - code='invalid') + "It appears as though an endpoint with this data already " + "exists for this product.", + code="invalid", + ) # use clean data - data['protocol'] = endpoint_ins.protocol - data['userinfo'] = endpoint_ins.userinfo - data['host'] = endpoint_ins.host - data['port'] = endpoint_ins.port - data['path'] = endpoint_ins.path - data['query'] = endpoint_ins.query - data['fragment'] = endpoint_ins.fragment - data['product'] = endpoint_ins.product + data["protocol"] = endpoint_ins.protocol + data["userinfo"] = endpoint_ins.userinfo + data["host"] = endpoint_ins.host + data["port"] = endpoint_ins.port + data["path"] = endpoint_ins.path + data["query"] = endpoint_ins.query + data["fragment"] = endpoint_ins.fragment + data["product"] = endpoint_ins.product return data @@ -968,7 +1291,7 @@ def validate(self, data): class EndpointParamsSerializer(serializers.ModelSerializer): class Meta: model = Endpoint_Params - fields = '__all__' + fields = "__all__" class JIRAIssueSerializer(serializers.ModelSerializer): @@ -976,27 +1299,33 @@ class JIRAIssueSerializer(serializers.ModelSerializer): class Meta: model = JIRA_Issue - fields = '__all__' + fields = "__all__" def get_url(self, obj) -> str: return jira_helper.get_jira_issue_url(obj) def validate(self, data): - if self.context['request'].method == 'PATCH': - engagement = data.get('engagement', self.instance.engagement) - finding = data.get('finding', self.instance.finding) - finding_group = data.get('finding_group', self.instance.finding_group) + if self.context["request"].method == "PATCH": + engagement = data.get("engagement", self.instance.engagement) + finding = data.get("finding", self.instance.finding) + finding_group = data.get( + "finding_group", self.instance.finding_group + ) else: - engagement = data.get('engagement', None) - finding = data.get('finding', None) - finding_group = data.get('finding_group', None) - - if ((engagement and not finding and not finding_group) or - (finding and not engagement and not finding_group) or - (finding_group and not engagement and not finding)): + engagement = data.get("engagement", None) + finding = data.get("finding", None) + finding_group = data.get("finding_group", None) + + if ( + (engagement and not finding and not finding_group) + or (finding and not engagement and not finding_group) + or (finding_group and not engagement and not finding) + ): pass else: - raise serializers.ValidationError('Either engagement or finding or finding_group has to be set.') + raise serializers.ValidationError( + "Either engagement or finding or finding_group has to be set." + ) return data @@ -1004,27 +1333,29 @@ def validate(self, data): class JIRAInstanceSerializer(serializers.ModelSerializer): class Meta: model = JIRA_Instance - fields = '__all__' + fields = "__all__" extra_kwargs = { - 'password': {'write_only': True}, + "password": {"write_only": True}, } class JIRAProjectSerializer(serializers.ModelSerializer): class Meta: model = JIRA_Project - fields = '__all__' + fields = "__all__" def validate(self, data): - if self.context['request'].method == 'PATCH': - engagement = data.get('engagement', self.instance.engagement) - product = data.get('product', self.instance.product) + if self.context["request"].method == "PATCH": + engagement = data.get("engagement", self.instance.engagement) + product = data.get("product", self.instance.product) else: - engagement = data.get('engagement', None) - product = data.get('product', None) + engagement = data.get("engagement", None) + product = data.get("product", None) - if ((engagement and product) or (not engagement and not product)): - raise serializers.ValidationError('Either engagement or product has to be set.') + if (engagement and product) or (not engagement and not product): + raise serializers.ValidationError( + "Either engagement or product has to be set." + ) return data @@ -1032,26 +1363,25 @@ def validate(self, data): class SonarqubeIssueSerializer(serializers.ModelSerializer): class Meta: model = Sonarqube_Issue - fields = '__all__' + fields = "__all__" class SonarqubeIssueTransitionSerializer(serializers.ModelSerializer): class Meta: model = Sonarqube_Issue_Transition - fields = '__all__' + fields = "__all__" class ProductAPIScanConfigurationSerializer(serializers.ModelSerializer): class Meta: model = Product_API_Scan_Configuration - fields = '__all__' + fields = "__all__" class DevelopmentEnvironmentSerializer(serializers.ModelSerializer): - class Meta: model = Development_Environment - fields = '__all__' + fields = "__all__" class FindingGroupSerializer(serializers.ModelSerializer): @@ -1059,39 +1389,43 @@ class FindingGroupSerializer(serializers.ModelSerializer): class Meta: model = Finding_Group - fields = ('id', 'name', 'test', 'jira_issue') + fields = ("id", "name", "test", "jira_issue") class TestSerializer(TaggitSerializer, serializers.ModelSerializer): tags = TagListSerializerField(required=False) test_type_name = serializers.ReadOnlyField() - finding_groups = FindingGroupSerializer(source='finding_group_set', many=True, read_only=True) + finding_groups = FindingGroupSerializer( + source="finding_group_set", many=True, read_only=True + ) class Meta: model = Test - exclude = ('inherited_tags', ) + exclude = ("inherited_tags",) def build_relational_field(self, field_name, relation_info): - if field_name == 'notes': - return NoteSerializer, {'many': True, 'read_only': True} - if field_name == 'files': - return FileSerializer, {'many': True, 'read_only': True} + if field_name == "notes": + return NoteSerializer, {"many": True, "read_only": True} + if field_name == "files": + return FileSerializer, {"many": True, "read_only": True} return super().build_relational_field(field_name, relation_info) class TestCreateSerializer(TaggitSerializer, serializers.ModelSerializer): engagement = serializers.PrimaryKeyRelatedField( - queryset=Engagement.objects.all()) + queryset=Engagement.objects.all() + ) notes = serializers.PrimaryKeyRelatedField( allow_null=True, queryset=Notes.objects.all(), many=True, - required=False) + required=False, + ) tags = TagListSerializerField(required=False) class Meta: model = Test - exclude = ('inherited_tags', ) + exclude = ("inherited_tags",) class TestTypeSerializer(TaggitSerializer, serializers.ModelSerializer): @@ -1099,47 +1433,56 @@ class TestTypeSerializer(TaggitSerializer, serializers.ModelSerializer): class Meta: model = Test_Type - fields = '__all__' + fields = "__all__" class TestToNotesSerializer(serializers.Serializer): - test_id = serializers.PrimaryKeyRelatedField(queryset=Test.objects.all(), many=False, allow_null=True) + test_id = serializers.PrimaryKeyRelatedField( + queryset=Test.objects.all(), many=False, allow_null=True + ) notes = NoteSerializer(many=True) class TestToFilesSerializer(serializers.Serializer): - test_id = serializers.PrimaryKeyRelatedField(queryset=Test.objects.all(), many=False, allow_null=True) + test_id = serializers.PrimaryKeyRelatedField( + queryset=Test.objects.all(), many=False, allow_null=True + ) files = FileSerializer(many=True) def to_representation(self, data): - test = data.get('test_id') - files = data.get('files') + test = data.get("test_id") + files = data.get("files") new_files = [] for file in files: - new_files.append({ - 'id': file.id, - 'file': '{site_url}/{file_access_url}'.format( - site_url=settings.SITE_URL, - file_access_url=file.get_accessible_url(test, test.id)), - 'title': file.title - }) - new_data = {'test_id': test.id, 'files': new_files} + new_files.append( + { + "id": file.id, + "file": "{site_url}/{file_access_url}".format( + site_url=settings.SITE_URL, + file_access_url=file.get_accessible_url(test, test.id), + ), + "title": file.title, + } + ) + new_data = {"test_id": test.id, "files": new_files} return new_data class TestImportFindingActionSerializer(serializers.ModelSerializer): class Meta: model = Test_Import_Finding_Action - fields = '__all__' + fields = "__all__" class TestImportSerializer(serializers.ModelSerializer): # findings = TestImportFindingActionSerializer(source='test_import_finding_action', many=True, read_only=True) - test_import_finding_action_set = TestImportFindingActionSerializer(many=True, read_only=True) + test_import_finding_action_set = TestImportFindingActionSerializer( + many=True, read_only=True + ) class Meta: model = Test_Import - fields = '__all__' + fields = "__all__" class RiskAcceptanceSerializer(serializers.ModelSerializer): @@ -1160,10 +1503,14 @@ def get_decision(self, obj): @extend_schema_field(serializers.CharField()) @swagger_serializer_method(serializers.CharField()) def get_path(self, obj): - engagement = Engagement.objects.filter(risk_acceptance__id__in=[obj.id]).first() - path = 'No proof has been supplied' + engagement = Engagement.objects.filter( + risk_acceptance__id__in=[obj.id] + ).first() + path = "No proof has been supplied" if engagement and obj.filename() is not None: - path = reverse('download_risk_acceptance', args=(engagement.id, obj.id)) + path = reverse( + "download_risk_acceptance", args=(engagement.id, obj.id) + ) request = self.context.get("request") if request: path = request.build_absolute_uri(path) @@ -1172,18 +1519,22 @@ def get_path(self, obj): @extend_schema_field(serializers.IntegerField()) @swagger_serializer_method(serializers.IntegerField()) def get_engagement(self, obj): - engagement = Engagement.objects.filter(risk_acceptance__id__in=[obj.id]).first() - return EngagementSerializer(read_only=True).to_representation(engagement) + engagement = Engagement.objects.filter( + risk_acceptance__id__in=[obj.id] + ).first() + return EngagementSerializer(read_only=True).to_representation( + engagement + ) class Meta: model = Risk_Acceptance - fields = '__all__' + fields = "__all__" class FindingMetaSerializer(serializers.ModelSerializer): class Meta: model = DojoMeta - fields = ('name', 'value') + fields = ("name", "value") class FindingProdTypeSerializer(serializers.ModelSerializer): @@ -1205,7 +1556,21 @@ class FindingEngagementSerializer(serializers.ModelSerializer): class Meta: model = Engagement - fields = ["id", "name", "description", "product", "target_start", "target_end", "branch_tag", "engagement_type", "build_id", "commit_hash", "version", "created", "updated"] + fields = [ + "id", + "name", + "description", + "product", + "target_start", + "target_end", + "branch_tag", + "engagement_type", + "build_id", + "commit_hash", + "version", + "created", + "updated", + ] class FindingEnvironmentSerializer(serializers.ModelSerializer): @@ -1227,7 +1592,17 @@ class FindingTestSerializer(serializers.ModelSerializer): class Meta: model = Test - fields = ["id", "title", "test_type", "engagement", "environment", "branch_tag", "build_id", "commit_hash", "version"] + fields = [ + "id", + "title", + "test_type", + "engagement", + "environment", + "branch_tag", + "build_id", + "commit_hash", + "version", + ] class FindingRelatedFieldsSerializer(serializers.Serializer): @@ -1237,7 +1612,9 @@ class FindingRelatedFieldsSerializer(serializers.Serializer): @extend_schema_field(FindingTestSerializer) @swagger_serializer_method(FindingTestSerializer) def get_test(self, obj): - return FindingTestSerializer(read_only=True).to_representation(obj.test) + return FindingTestSerializer(read_only=True).to_representation( + obj.test + ) @extend_schema_field(JIRAIssueSerializer) @swagger_serializer_method(JIRAIssueSerializer) @@ -1251,13 +1628,15 @@ def get_jira(self, obj): class VulnerabilityIdSerializer(serializers.ModelSerializer): class Meta: model = Vulnerability_Id - fields = ['vulnerability_id'] + fields = ["vulnerability_id"] class FindingSerializer(TaggitSerializer, serializers.ModelSerializer): tags = TagListSerializerField(required=False) request_response = serializers.SerializerMethodField() - accepted_risks = RiskAcceptanceSerializer(many=True, read_only=True, source='risk_acceptance_set') + accepted_risks = RiskAcceptanceSerializer( + many=True, read_only=True, source="risk_acceptance_set" + ) push_to_jira = serializers.BooleanField(default=False) age = serializers.IntegerField(read_only=True) sla_days_remaining = serializers.IntegerField(read_only=True) @@ -1267,12 +1646,19 @@ class FindingSerializer(TaggitSerializer, serializers.ModelSerializer): jira_creation = serializers.SerializerMethodField(read_only=True) jira_change = serializers.SerializerMethodField(read_only=True) display_status = serializers.SerializerMethodField() - finding_groups = FindingGroupSerializer(source='finding_group_set', many=True, read_only=True) - vulnerability_ids = VulnerabilityIdSerializer(source='vulnerability_id_set', many=True, required=False) + finding_groups = FindingGroupSerializer( + source="finding_group_set", many=True, read_only=True + ) + vulnerability_ids = VulnerabilityIdSerializer( + source="vulnerability_id_set", many=True, required=False + ) class Meta: model = Finding - exclude = ('cve', 'inherited_tags', ) + exclude = ( + "cve", + "inherited_tags", + ) @extend_schema_field(serializers.DateTimeField()) @swagger_serializer_method(serializers.DateTimeField()) @@ -1287,13 +1673,15 @@ def get_jira_change(self, obj): @extend_schema_field(FindingRelatedFieldsSerializer) @swagger_serializer_method(FindingRelatedFieldsSerializer) def get_related_fields(self, obj): - request = self.context.get('request', None) + request = self.context.get("request", None) if request is None: return None query_params = request.query_params - if query_params.get('related_fields', 'false') == 'true': - return FindingRelatedFieldsSerializer(required=False).to_representation(obj) + if query_params.get("related_fields", "false") == "true": + return FindingRelatedFieldsSerializer( + required=False + ).to_representation(obj) else: return None @@ -1306,67 +1694,88 @@ def update(self, instance, validated_data): to_be_tagged, validated_data = self._pop_tags(validated_data) # pop push_to_jira so it won't get send to the model as a field - # TODO: JIRA can we remove this is_push_all_issues, already checked in apiv2 viewset? - push_to_jira = validated_data.pop('push_to_jira') or jira_helper.is_push_all_issues(instance) + # TODO: JIRA can we remove this is_push_all_issues, already checked in + # apiv2 viewset? + push_to_jira = validated_data.pop( + "push_to_jira" + ) or jira_helper.is_push_all_issues(instance) # Save vulnerability ids and pop them - if 'vulnerability_id_set' in validated_data: - vulnerability_id_set = validated_data.pop('vulnerability_id_set') + if "vulnerability_id_set" in validated_data: + vulnerability_id_set = validated_data.pop("vulnerability_id_set") vulnerability_ids = list() if vulnerability_id_set: for vulnerability_id in vulnerability_id_set: - vulnerability_ids.append(vulnerability_id['vulnerability_id']) + vulnerability_ids.append( + vulnerability_id["vulnerability_id"] + ) save_vulnerability_ids(instance, vulnerability_ids) - instance = super(TaggitSerializer, self).update(instance, validated_data) + instance = super(TaggitSerializer, self).update( + instance, validated_data + ) # If we need to push to JIRA, an extra save call is needed. # Also if we need to update the mitigation date of the finding. - # TODO try to combine create and save, but for now I'm just fixing a bug and don't want to change to much + # TODO try to combine create and save, but for now I'm just fixing a + # bug and don't want to change to much if push_to_jira: instance.save(push_to_jira=push_to_jira) - # not sure why we are returning a tag_object, but don't want to change too much now as we're just fixing a bug + # not sure why we are returning a tag_object, but don't want to change + # too much now as we're just fixing a bug tag_object = self._save_tags(instance, to_be_tagged) return tag_object def validate(self, data): - if self.context['request'].method == 'PATCH': - is_active = data.get('active', self.instance.active) - is_verified = data.get('verified', self.instance.verified) - is_duplicate = data.get('duplicate', self.instance.duplicate) - is_false_p = data.get('false_p', self.instance.false_p) - is_risk_accepted = data.get('risk_accepted', self.instance.risk_accepted) + if self.context["request"].method == "PATCH": + is_active = data.get("active", self.instance.active) + is_verified = data.get("verified", self.instance.verified) + is_duplicate = data.get("duplicate", self.instance.duplicate) + is_false_p = data.get("false_p", self.instance.false_p) + is_risk_accepted = data.get( + "risk_accepted", self.instance.risk_accepted + ) else: - is_active = data.get('active', True) - is_verified = data.get('verified', False) - is_duplicate = data.get('duplicate', False) - is_false_p = data.get('false_p', False) - is_risk_accepted = data.get('risk_accepted', False) - - if ((is_active or is_verified) and is_duplicate): - raise serializers.ValidationError('Duplicate findings cannot be' - ' verified or active') + is_active = data.get("active", True) + is_verified = data.get("verified", False) + is_duplicate = data.get("duplicate", False) + is_false_p = data.get("false_p", False) + is_risk_accepted = data.get("risk_accepted", False) + + if (is_active or is_verified) and is_duplicate: + raise serializers.ValidationError( + "Duplicate findings cannot be" " verified or active" + ) if is_false_p and is_verified: - raise serializers.ValidationError('False positive findings cannot ' - 'be verified.') + raise serializers.ValidationError( + "False positive findings cannot " "be verified." + ) if is_risk_accepted and not self.instance.risk_accepted: - if not self.instance.test.engagement.product.enable_simple_risk_acceptance: - raise serializers.ValidationError('Simple risk acceptance is disabled for this product, use the UI to accept this finding.') + if ( + not self.instance.test.engagement.product.enable_simple_risk_acceptance + ): + raise serializers.ValidationError( + "Simple risk acceptance is disabled for this product, use the UI to accept this finding." + ) if is_active and is_risk_accepted: - raise serializers.ValidationError('Active findings cannot be risk accepted.') + raise serializers.ValidationError( + "Active findings cannot be risk accepted." + ) return data def build_relational_field(self, field_name, relation_info): - if field_name == 'notes': - return NoteSerializer, {'many': True, 'read_only': True} + if field_name == "notes": + return NoteSerializer, {"many": True, "read_only": True} return super().build_relational_field(field_name, relation_info) @extend_schema_field(BurpRawRequestResponseSerializer) - @swagger_serializer_method(serializer_or_field=BurpRawRequestResponseSerializer) + @swagger_serializer_method( + serializer_or_field=BurpRawRequestResponseSerializer + ) def get_request_response(self, obj): # burp_req_resp = BurpRawRequestResponse.objects.filter(finding=obj) burp_req_resp = obj.burprawrequestresponse_set.all() @@ -1374,37 +1783,41 @@ def get_request_response(self, obj): for burp in burp_req_resp: request = burp.get_request() response = burp.get_response() - burp_list.append({'request': request, 'response': response}) - serialized_burps = BurpRawRequestResponseSerializer({'req_resp': burp_list}) + burp_list.append({"request": request, "response": response}) + serialized_burps = BurpRawRequestResponseSerializer( + {"req_resp": burp_list} + ) return serialized_burps.data class FindingCreateSerializer(TaggitSerializer, serializers.ModelSerializer): notes = serializers.PrimaryKeyRelatedField( - read_only=True, - allow_null=True, - required=False, - many=True) - test = serializers.PrimaryKeyRelatedField( - queryset=Test.objects.all()) + read_only=True, allow_null=True, required=False, many=True + ) + test = serializers.PrimaryKeyRelatedField(queryset=Test.objects.all()) thread_id = serializers.IntegerField(default=0) found_by = serializers.PrimaryKeyRelatedField( - queryset=Test_Type.objects.all(), - many=True) - url = serializers.CharField( - allow_null=True, - default=None) + queryset=Test_Type.objects.all(), many=True + ) + url = serializers.CharField(allow_null=True, default=None) tags = TagListSerializerField(required=False) push_to_jira = serializers.BooleanField(default=False) - vulnerability_ids = VulnerabilityIdSerializer(source='vulnerability_id_set', many=True, required=False) - reporter = serializers.PrimaryKeyRelatedField(required=False, queryset=User.objects.all()) + vulnerability_ids = VulnerabilityIdSerializer( + source="vulnerability_id_set", many=True, required=False + ) + reporter = serializers.PrimaryKeyRelatedField( + required=False, queryset=User.objects.all() + ) class Meta: model = Finding - exclude = ('cve', 'inherited_tags', ) + exclude = ( + "cve", + "inherited_tags", + ) extra_kwargs = { - 'active': {'required': True}, - 'verified': {'required': True}, + "active": {"required": True}, + "verified": {"required": True}, } # Overriding this to push add Push to JIRA functionality @@ -1413,11 +1826,11 @@ def create(self, validated_data): to_be_tagged, validated_data = self._pop_tags(validated_data) # pop push_to_jira so it won't get send to the model as a field - push_to_jira = validated_data.pop('push_to_jira') + push_to_jira = validated_data.pop("push_to_jira") # Save vulnerability ids and pop them - if 'vulnerability_id_set' in validated_data: - vulnerability_id_set = validated_data.pop('vulnerability_id_set') + if "vulnerability_id_set" in validated_data: + vulnerability_id_set = validated_data.pop("vulnerability_id_set") else: vulnerability_id_set = None @@ -1427,41 +1840,60 @@ def create(self, validated_data): if vulnerability_id_set: vulnerability_ids = list() for vulnerability_id in vulnerability_id_set: - vulnerability_ids.append(vulnerability_id['vulnerability_id']) - validated_data['cve'] = vulnerability_ids[0] + vulnerability_ids.append(vulnerability_id["vulnerability_id"]) + validated_data["cve"] = vulnerability_ids[0] save_vulnerability_ids(new_finding, vulnerability_ids) new_finding.save() - # TODO: JIRA can we remove this is_push_all_issues, already checked in apiv2 viewset? - push_to_jira = push_to_jira or jira_helper.is_push_all_issues(new_finding) + # TODO: JIRA can we remove this is_push_all_issues, already checked in + # apiv2 viewset? + push_to_jira = push_to_jira or jira_helper.is_push_all_issues( + new_finding + ) # If we need to push to JIRA, an extra save call is needed. - # TODO try to combine create and save, but for now I'm just fixing a bug and don't want to change to much + # TODO try to combine create and save, but for now I'm just fixing a + # bug and don't want to change to much if push_to_jira or new_finding: new_finding.save(push_to_jira=push_to_jira) - # not sure why we are returning a tag_object, but don't want to change too much now as we're just fixing a bug + # not sure why we are returning a tag_object, but don't want to change + # too much now as we're just fixing a bug tag_object = self._save_tags(new_finding, to_be_tagged) return tag_object def validate(self, data): - if 'reporter' not in data: - request = self.context['request'] - data['reporter'] = request.user + if "reporter" not in data: + request = self.context["request"] + data["reporter"] = request.user - if ((data.get('active') or data.get('verified')) and data.get('duplicate')): - raise serializers.ValidationError('Duplicate findings cannot be verified or active') - if data.get('false_p') and data.get('verified'): - raise serializers.ValidationError('False positive findings cannot be verified.') + if (data.get("active") or data.get("verified")) and data.get( + "duplicate" + ): + raise serializers.ValidationError( + "Duplicate findings cannot be verified or active" + ) + if data.get("false_p") and data.get("verified"): + raise serializers.ValidationError( + "False positive findings cannot be verified." + ) - if 'risk_accepted' in data and data.get('risk_accepted'): - test = data.get('test') + if "risk_accepted" in data and data.get("risk_accepted"): + test = data.get("test") # test = Test.objects.get(id=test_id) if not test.engagement.product.enable_simple_risk_acceptance: - raise serializers.ValidationError('Simple risk acceptance is disabled for this product, use the UI to accept this finding.') - - if data.get('active') and 'risk_accepted' in data and data.get('risk_accepted'): - raise serializers.ValidationError('Active findings cannot be risk accepted.') + raise serializers.ValidationError( + "Simple risk acceptance is disabled for this product, use the UI to accept this finding." + ) + + if ( + data.get("active") + and "risk_accepted" in data + and data.get("risk_accepted") + ): + raise serializers.ValidationError( + "Active findings cannot be risk accepted." + ) return data @@ -1469,44 +1901,56 @@ def validate(self, data): class VulnerabilityIdTemplateSerializer(serializers.ModelSerializer): class Meta: model = Vulnerability_Id_Template - fields = ['vulnerability_id'] + fields = ["vulnerability_id"] class FindingTemplateSerializer(TaggitSerializer, serializers.ModelSerializer): tags = TagListSerializerField(required=False) - vulnerability_ids = VulnerabilityIdTemplateSerializer(source='vulnerability_id_template_set', many=True, required=False) + vulnerability_ids = VulnerabilityIdTemplateSerializer( + source="vulnerability_id_template_set", many=True, required=False + ) class Meta: model = Finding_Template - exclude = ('cve', ) + exclude = ("cve",) def create(self, validated_data): # Save vulnerability ids and pop them - if 'vulnerability_id_template_set' in validated_data: - vulnerability_id_set = validated_data.pop('vulnerability_id_template_set') + if "vulnerability_id_template_set" in validated_data: + vulnerability_id_set = validated_data.pop( + "vulnerability_id_template_set" + ) else: vulnerability_id_set = None - new_finding_template = super(TaggitSerializer, self).create(validated_data) + new_finding_template = super(TaggitSerializer, self).create( + validated_data + ) if vulnerability_id_set: vulnerability_ids = list() for vulnerability_id in vulnerability_id_set: - vulnerability_ids.append(vulnerability_id['vulnerability_id']) - validated_data['cve'] = vulnerability_ids[0] - save_vulnerability_ids_template(new_finding_template, vulnerability_ids) + vulnerability_ids.append(vulnerability_id["vulnerability_id"]) + validated_data["cve"] = vulnerability_ids[0] + save_vulnerability_ids_template( + new_finding_template, vulnerability_ids + ) new_finding_template.save() return new_finding_template def update(self, instance, validated_data): # Save vulnerability ids and pop them - if 'vulnerability_id_template_set' in validated_data: - vulnerability_id_set = validated_data.pop('vulnerability_id_template_set') + if "vulnerability_id_template_set" in validated_data: + vulnerability_id_set = validated_data.pop( + "vulnerability_id_template_set" + ) vulnerability_ids = list() if vulnerability_id_set: for vulnerability_id in vulnerability_id_set: - vulnerability_ids.append(vulnerability_id['vulnerability_id']) + vulnerability_ids.append( + vulnerability_id["vulnerability_id"] + ) save_vulnerability_ids_template(instance, vulnerability_ids) return super(TaggitSerializer, self).update(instance, validated_data) @@ -1515,30 +1959,29 @@ def update(self, instance, validated_data): class CredentialSerializer(serializers.ModelSerializer): class Meta: model = Cred_User - exclude = ('password', ) + exclude = ("password",) class CredentialMappingSerializer(serializers.ModelSerializer): class Meta: model = Cred_Mapping - fields = '__all__' + fields = "__all__" class StubFindingSerializer(serializers.ModelSerializer): class Meta: model = Stub_Finding - fields = '__all__' + fields = "__all__" class StubFindingCreateSerializer(serializers.ModelSerializer): - test = serializers.PrimaryKeyRelatedField( - queryset=Test.objects.all()) + test = serializers.PrimaryKeyRelatedField(queryset=Test.objects.all()) class Meta: model = Stub_Finding - fields = '__all__' + fields = "__all__" extra_kwargs = { - 'reporter': {'default': serializers.CurrentUserDefault()}, + "reporter": {"default": serializers.CurrentUserDefault()}, } @@ -1551,74 +1994,130 @@ class ProductSerializer(TaggitSerializer, serializers.ModelSerializer): class Meta: model = Product - exclude = ('tid', 'updated', ) + exclude = ( + "tid", + "updated", + ) def get_findings_count(self, obj) -> int: return obj.findings_count # -> List[int] as return type doesn't seem enough for drf-yasg - @swagger_serializer_method(serializer_or_field=serializers.ListField(child=serializers.IntegerField())) + @swagger_serializer_method( + serializer_or_field=serializers.ListField( + child=serializers.IntegerField() + ) + ) def get_findings_list(self, obj) -> List[int]: return obj.open_findings_list class ImportScanSerializer(serializers.Serializer): - scan_date = serializers.DateField(required=False, help_text="Scan completion date will be used on all findings.") + scan_date = serializers.DateField( + required=False, + help_text="Scan completion date will be used on all findings.", + ) minimum_severity = serializers.ChoiceField( choices=SEVERITY_CHOICES, - default='Info', help_text='Minimum severity level to be imported') - active = serializers.BooleanField(help_text="Override the active setting from the tool.") - verified = serializers.BooleanField(help_text="Override the verified setting from the tool.") - scan_type = serializers.ChoiceField( - choices=get_choices_sorted()) + default="Info", + help_text="Minimum severity level to be imported", + ) + active = serializers.BooleanField( + help_text="Override the active setting from the tool." + ) + verified = serializers.BooleanField( + help_text="Override the verified setting from the tool." + ) + scan_type = serializers.ChoiceField(choices=get_choices_sorted()) # TODO why do we allow only existing endpoints? - endpoint_to_add = serializers.PrimaryKeyRelatedField(queryset=Endpoint.objects.all(), - required=False, - default=None, - help_text="The IP address, host name or full URL. It must be valid") + endpoint_to_add = serializers.PrimaryKeyRelatedField( + queryset=Endpoint.objects.all(), + required=False, + default=None, + help_text="The IP address, host name or full URL. It must be valid", + ) file = serializers.FileField(allow_empty_file=True, required=False) product_type_name = serializers.CharField(required=False) product_name = serializers.CharField(required=False) engagement_name = serializers.CharField(required=False) - engagement_end_date = serializers.DateField(required=False, help_text="End Date for Engagement. Default is current time + 365 days. Required format year-month-day") - source_code_management_uri = serializers.URLField(max_length=600, required=False, help_text="Resource link to source code") + engagement_end_date = serializers.DateField( + required=False, + help_text="End Date for Engagement. Default is current time + 365 days. Required format year-month-day", + ) + source_code_management_uri = serializers.URLField( + max_length=600, + required=False, + help_text="Resource link to source code", + ) engagement = serializers.PrimaryKeyRelatedField( - queryset=Engagement.objects.all(), required=False) + queryset=Engagement.objects.all(), required=False + ) test_title = serializers.CharField(required=False) auto_create_context = serializers.BooleanField(required=False) deduplication_on_engagement = serializers.BooleanField(required=False) lead = serializers.PrimaryKeyRelatedField( - allow_null=True, - default=None, - queryset=User.objects.all()) - tags = TagListSerializerField(required=False, help_text="Add tags that help describe this scan.") - close_old_findings = serializers.BooleanField(required=False, default=False, + allow_null=True, default=None, queryset=User.objects.all() + ) + tags = TagListSerializerField( + required=False, help_text="Add tags that help describe this scan." + ) + close_old_findings = serializers.BooleanField( + required=False, + default=False, help_text="Select if old findings no longer present in the report get closed as mitigated when importing. " - "If service has been set, only the findings for this service will be closed.") - close_old_findings_product_scope = serializers.BooleanField(required=False, default=False, + "If service has been set, only the findings for this service will be closed.", + ) + close_old_findings_product_scope = serializers.BooleanField( + required=False, + default=False, help_text="Select if close_old_findings applies to all findings of the same type in the product. " - "By default, it is false meaning that only old findings of the same type in the engagement are in scope.") + "By default, it is false meaning that only old findings of the same type in the engagement are in scope.", + ) push_to_jira = serializers.BooleanField(default=False) environment = serializers.CharField(required=False) - version = serializers.CharField(required=False, help_text="Version that was scanned.") - build_id = serializers.CharField(required=False, help_text="ID of the build that was scanned.") - branch_tag = serializers.CharField(required=False, help_text="Branch or Tag that was scanned.") - commit_hash = serializers.CharField(required=False, help_text="Commit that was scanned.") - api_scan_configuration = serializers.PrimaryKeyRelatedField(allow_null=True, default=None, - queryset=Product_API_Scan_Configuration.objects.all()) - service = serializers.CharField(required=False, + version = serializers.CharField( + required=False, help_text="Version that was scanned." + ) + build_id = serializers.CharField( + required=False, help_text="ID of the build that was scanned." + ) + branch_tag = serializers.CharField( + required=False, help_text="Branch or Tag that was scanned." + ) + commit_hash = serializers.CharField( + required=False, help_text="Commit that was scanned." + ) + api_scan_configuration = serializers.PrimaryKeyRelatedField( + allow_null=True, + default=None, + queryset=Product_API_Scan_Configuration.objects.all(), + ) + service = serializers.CharField( + required=False, help_text="A service is a self-contained piece of functionality within a Product. " - "This is an optional field which is used in deduplication and closing of old findings when set. " - "This affects the whole engagement/product depending on your deduplication scope.") + "This is an optional field which is used in deduplication and closing of old findings when set. " + "This affects the whole engagement/product depending on your deduplication scope.", + ) - group_by = serializers.ChoiceField(required=False, choices=Finding_Group.GROUP_BY_OPTIONS, help_text='Choose an option to automatically group new findings by the chosen option.') - create_finding_groups_for_all_findings = serializers.BooleanField(help_text="If set to false, finding groups will only be created when there is more than one grouped finding", required=False, default=True) + group_by = serializers.ChoiceField( + required=False, + choices=Finding_Group.GROUP_BY_OPTIONS, + help_text="Choose an option to automatically group new findings by the chosen option.", + ) + create_finding_groups_for_all_findings = serializers.BooleanField( + help_text="If set to false, finding groups will only be created when there is more than one grouped finding", + required=False, + default=True, + ) # extra fields populated in response - # need to use the _id suffix as without the serializer framework gets confused - test = serializers.IntegerField(read_only=True) # left for backwards compatibility + # need to use the _id suffix as without the serializer framework gets + # confused + test = serializers.IntegerField( + read_only=True + ) # left for backwards compatibility test_id = serializers.IntegerField(read_only=True) engagement_id = serializers.IntegerField(read_only=True) product_id = serializers.IntegerField(read_only=True) @@ -1628,72 +2127,122 @@ class ImportScanSerializer(serializers.Serializer): def save(self, push_to_jira=False): data = self.validated_data - close_old_findings = data.get('close_old_findings') - close_old_findings_product_scope = data.get('close_old_findings_product_scope') - minimum_severity = data.get('minimum_severity') - endpoint_to_add = data.get('endpoint_to_add') - scan_date = data.get('scan_date', None) - # Will save in the provided environment or in the `Development` one if absent - version = data.get('version', None) - build_id = data.get('build_id', None) - branch_tag = data.get('branch_tag', None) - commit_hash = data.get('commit_hash', None) - api_scan_configuration = data.get('api_scan_configuration', None) - service = data.get('service', None) - source_code_management_uri = data.get('source_code_management_uri', None) - - if 'active' in self.initial_data: - active = data.get('active') + close_old_findings = data.get("close_old_findings") + close_old_findings_product_scope = data.get( + "close_old_findings_product_scope" + ) + minimum_severity = data.get("minimum_severity") + endpoint_to_add = data.get("endpoint_to_add") + scan_date = data.get("scan_date", None) + # Will save in the provided environment or in the `Development` one if + # absent + version = data.get("version", None) + build_id = data.get("build_id", None) + branch_tag = data.get("branch_tag", None) + commit_hash = data.get("commit_hash", None) + api_scan_configuration = data.get("api_scan_configuration", None) + service = data.get("service", None) + source_code_management_uri = data.get( + "source_code_management_uri", None + ) + + if "active" in self.initial_data: + active = data.get("active") else: active = None - if 'verified' in self.initial_data: - verified = data.get('verified') + if "verified" in self.initial_data: + verified = data.get("verified") else: verified = None - environment_name = data.get('environment', 'Development') - environment = Development_Environment.objects.get(name=environment_name) - tags = data.get('tags', None) - lead = data.get('lead') + environment_name = data.get("environment", "Development") + environment = Development_Environment.objects.get( + name=environment_name + ) + tags = data.get("tags", None) + lead = data.get("lead") - scan = data.get('file', None) + scan = data.get("file", None) endpoints_to_add = [endpoint_to_add] if endpoint_to_add else None - group_by = data.get('group_by', None) - create_finding_groups_for_all_findings = data.get('create_finding_groups_for_all_findings', True) + group_by = data.get("group_by", None) + create_finding_groups_for_all_findings = data.get( + "create_finding_groups_for_all_findings", True + ) - engagement_end_date = data.get('engagement_end_date', None) - _, test_title, scan_type, engagement_id, engagement_name, product_name, product_type_name, auto_create_context, deduplication_on_engagement, do_not_reactivate = get_import_meta_data_from_dict(data) - engagement = get_or_create_engagement(engagement_id, engagement_name, product_name, product_type_name, auto_create_context, - deduplication_on_engagement, source_code_management_uri=source_code_management_uri, target_end=engagement_end_date) + engagement_end_date = data.get("engagement_end_date", None) + ( + _, + test_title, + scan_type, + engagement_id, + engagement_name, + product_name, + product_type_name, + auto_create_context, + deduplication_on_engagement, + do_not_reactivate, + ) = get_import_meta_data_from_dict(data) + engagement = get_or_create_engagement( + engagement_id, + engagement_name, + product_name, + product_type_name, + auto_create_context, + deduplication_on_engagement, + source_code_management_uri=source_code_management_uri, + target_end=engagement_end_date, + ) - # have to make the scan_date_time timezone aware otherwise uploads via the API would fail (but unit tests for api upload would pass...) - scan_date_time = timezone.make_aware(datetime.combine(scan_date, datetime.min.time())) if scan_date else None + # have to make the scan_date_time timezone aware otherwise uploads via + # the API would fail (but unit tests for api upload would pass...) + scan_date_time = ( + timezone.make_aware( + datetime.combine(scan_date, datetime.min.time()) + ) + if scan_date + else None + ) importer = Importer() try: - test, finding_count, closed_finding_count, test_import = importer.import_scan(scan, scan_type, engagement, lead, environment, - active=active, verified=verified, tags=tags, - minimum_severity=minimum_severity, - endpoints_to_add=endpoints_to_add, - scan_date=scan_date_time, version=version, - branch_tag=branch_tag, build_id=build_id, - commit_hash=commit_hash, - push_to_jira=push_to_jira, - close_old_findings=close_old_findings, - close_old_findings_product_scope=close_old_findings_product_scope, - group_by=group_by, - api_scan_configuration=api_scan_configuration, - service=service, - title=test_title, - create_finding_groups_for_all_findings=create_finding_groups_for_all_findings) + ( + test, + finding_count, + closed_finding_count, + test_import, + ) = importer.import_scan( + scan, + scan_type, + engagement, + lead, + environment, + active=active, + verified=verified, + tags=tags, + minimum_severity=minimum_severity, + endpoints_to_add=endpoints_to_add, + scan_date=scan_date_time, + version=version, + branch_tag=branch_tag, + build_id=build_id, + commit_hash=commit_hash, + push_to_jira=push_to_jira, + close_old_findings=close_old_findings, + close_old_findings_product_scope=close_old_findings_product_scope, + group_by=group_by, + api_scan_configuration=api_scan_configuration, + service=service, + title=test_title, + create_finding_groups_for_all_findings=create_finding_groups_for_all_findings, + ) if test: - data['test'] = test.id - data['test_id'] = test.id - data['engagement_id'] = test.engagement.id - data['product_id'] = test.engagement.product.id - data['product_type_id'] = test.engagement.product.prod_type.id - data['statistics'] = {'after': test.statistics} + data["test"] = test.id + data["test_id"] = test.id + data["engagement_id"] = test.engagement.id + data["product_id"] = test.engagement.product.id + data["product_type_id"] = test.engagement.product.prod_type.id + data["statistics"] = {"after": test.statistics} # convert to exception otherwise django rest framework will swallow them as 400 error # exceptions are already logged in the importer @@ -1706,47 +2255,78 @@ def validate(self, data): scan_type = data.get("scan_type") file = data.get("file") if not file and requires_file(scan_type): - raise serializers.ValidationError('Uploading a Report File is required for {}'.format(scan_type)) + raise serializers.ValidationError( + "Uploading a Report File is required for {}".format(scan_type) + ) if file and is_scan_file_too_large(file): raise serializers.ValidationError( - 'Report file is too large. Maximum supported size is {} MB'.format(settings.SCAN_FILE_MAX_SIZE)) + "Report file is too large. Maximum supported size is {} MB".format( + settings.SCAN_FILE_MAX_SIZE + ) + ) tool_type = requires_tool_type(scan_type) if tool_type: - api_scan_configuration = data.get('api_scan_configuration') - if api_scan_configuration and tool_type != api_scan_configuration.tool_configuration.tool_type.name: - raise serializers.ValidationError(f'API scan configuration must be of tool type {tool_type}') + api_scan_configuration = data.get("api_scan_configuration") + if ( + api_scan_configuration + and tool_type + != api_scan_configuration.tool_configuration.tool_type.name + ): + raise serializers.ValidationError( + f"API scan configuration must be of tool type {tool_type}" + ) return data def validate_scan_date(self, value): if value and value > timezone.localdate(): raise serializers.ValidationError( - 'The scan_date cannot be in the future!') + "The scan_date cannot be in the future!" + ) return value class ReImportScanSerializer(TaggitSerializer, serializers.Serializer): - scan_date = serializers.DateField(required=False, help_text="Scan completion date will be used on all findings.") + scan_date = serializers.DateField( + required=False, + help_text="Scan completion date will be used on all findings.", + ) minimum_severity = serializers.ChoiceField( choices=SEVERITY_CHOICES, - default='Info', help_text='Minimum severity level to be imported') - active = serializers.BooleanField(help_text="Override the active setting from the tool.") - verified = serializers.BooleanField(help_text="Override the verified setting from the tool.") - help_do_not_reactivate = 'Select if the import should ignore active findings from the report, useful for triage-less scanners. Will keep existing findings closed, without reactivating them. For more information check the docs.' - do_not_reactivate = serializers.BooleanField(default=False, required=False, help_text=help_do_not_reactivate) + default="Info", + help_text="Minimum severity level to be imported", + ) + active = serializers.BooleanField( + help_text="Override the active setting from the tool." + ) + verified = serializers.BooleanField( + help_text="Override the verified setting from the tool." + ) + help_do_not_reactivate = "Select if the import should ignore active findings from the report, useful for triage-less scanners. Will keep existing findings closed, without reactivating them. For more information check the docs." + do_not_reactivate = serializers.BooleanField( + default=False, required=False, help_text=help_do_not_reactivate + ) scan_type = serializers.ChoiceField( - choices=get_choices_sorted(), - required=True) - endpoint_to_add = serializers.PrimaryKeyRelatedField(queryset=Endpoint.objects.all(), - default=None, - required=False) + choices=get_choices_sorted(), required=True + ) + endpoint_to_add = serializers.PrimaryKeyRelatedField( + queryset=Endpoint.objects.all(), default=None, required=False + ) file = serializers.FileField(allow_empty_file=True, required=False) product_type_name = serializers.CharField(required=False) product_name = serializers.CharField(required=False) engagement_name = serializers.CharField(required=False) - engagement_end_date = serializers.DateField(required=False, help_text="End Date for Engagement. Default is current time + 365 days. Required format year-month-day") - source_code_management_uri = serializers.URLField(max_length=600, required=False, help_text="Resource link to source code") - test = serializers.PrimaryKeyRelatedField(required=False, - queryset=Test.objects.all()) + engagement_end_date = serializers.DateField( + required=False, + help_text="End Date for Engagement. Default is current time + 365 days. Required format year-month-day", + ) + source_code_management_uri = serializers.URLField( + max_length=600, + required=False, + help_text="Resource link to source code", + ) + test = serializers.PrimaryKeyRelatedField( + required=False, queryset=Test.objects.all() + ) test_title = serializers.CharField(required=False) auto_create_context = serializers.BooleanField(required=False) deduplication_on_engagement = serializers.BooleanField(required=False) @@ -1755,86 +2335,149 @@ class ReImportScanSerializer(TaggitSerializer, serializers.Serializer): # Close the old findings if the parameter is not provided. This is to # mentain the old API behavior after reintroducing the close_old_findings parameter # also for ReImport. - close_old_findings = serializers.BooleanField(required=False, default=True, - help_text="Select if old findings no longer present in the report get closed as mitigated when importing.") - close_old_findings_product_scope = serializers.BooleanField(required=False, default=False, + close_old_findings = serializers.BooleanField( + required=False, + default=True, + help_text="Select if old findings no longer present in the report get closed as mitigated when importing.", + ) + close_old_findings_product_scope = serializers.BooleanField( + required=False, + default=False, help_text="Select if close_old_findings applies to all findings of the same type in the product. " - "By default, it is false meaning that only old findings of the same type in the engagement are in scope. " - "Note that this only applies on the first call to reimport-scan.") - version = serializers.CharField(required=False, help_text="Version that will be set on existing Test object. Leave empty to leave existing value in place.") - build_id = serializers.CharField(required=False, help_text="ID of the build that was scanned.") - branch_tag = serializers.CharField(required=False, help_text="Branch or Tag that was scanned.") - commit_hash = serializers.CharField(required=False, help_text="Commit that was scanned.") - api_scan_configuration = serializers.PrimaryKeyRelatedField(allow_null=True, default=None, - queryset=Product_API_Scan_Configuration.objects.all()) - service = serializers.CharField(required=False, + "By default, it is false meaning that only old findings of the same type in the engagement are in scope. " + "Note that this only applies on the first call to reimport-scan.", + ) + version = serializers.CharField( + required=False, + help_text="Version that will be set on existing Test object. Leave empty to leave existing value in place.", + ) + build_id = serializers.CharField( + required=False, help_text="ID of the build that was scanned." + ) + branch_tag = serializers.CharField( + required=False, help_text="Branch or Tag that was scanned." + ) + commit_hash = serializers.CharField( + required=False, help_text="Commit that was scanned." + ) + api_scan_configuration = serializers.PrimaryKeyRelatedField( + allow_null=True, + default=None, + queryset=Product_API_Scan_Configuration.objects.all(), + ) + service = serializers.CharField( + required=False, help_text="A service is a self-contained piece of functionality within a Product. " - "This is an optional field which is used in deduplication and closing of old findings when set. " - "This affects the whole engagement/product depending on your deduplication scope.") + "This is an optional field which is used in deduplication and closing of old findings when set. " + "This affects the whole engagement/product depending on your deduplication scope.", + ) environment = serializers.CharField(required=False) lead = serializers.PrimaryKeyRelatedField( - allow_null=True, - default=None, - queryset=User.objects.all()) - tags = TagListSerializerField(required=False, help_text="Modify existing tags that help describe this scan. (Existing test tags will be overwritten)") + allow_null=True, default=None, queryset=User.objects.all() + ) + tags = TagListSerializerField( + required=False, + help_text="Modify existing tags that help describe this scan. (Existing test tags will be overwritten)", + ) - group_by = serializers.ChoiceField(required=False, choices=Finding_Group.GROUP_BY_OPTIONS, help_text='Choose an option to automatically group new findings by the chosen option.') - create_finding_groups_for_all_findings = serializers.BooleanField(help_text="If set to false, finding groups will only be created when there is more than one grouped finding", required=False, default=True) + group_by = serializers.ChoiceField( + required=False, + choices=Finding_Group.GROUP_BY_OPTIONS, + help_text="Choose an option to automatically group new findings by the chosen option.", + ) + create_finding_groups_for_all_findings = serializers.BooleanField( + help_text="If set to false, finding groups will only be created when there is more than one grouped finding", + required=False, + default=True, + ) # extra fields populated in response - # need to use the _id suffix as without the serializer framework gets confused + # need to use the _id suffix as without the serializer framework gets + # confused test_id = serializers.IntegerField(read_only=True) - engagement_id = serializers.IntegerField(read_only=True) # need to use the _id suffix as without the serializer framework gets confused + engagement_id = serializers.IntegerField( + read_only=True + ) # need to use the _id suffix as without the serializer framework gets confused product_id = serializers.IntegerField(read_only=True) product_type_id = serializers.IntegerField(read_only=True) statistics = ImportStatisticsSerializer(read_only=True, required=False) def save(self, push_to_jira=False): - logger.debug('push_to_jira: %s', push_to_jira) + logger.debug("push_to_jira: %s", push_to_jira) data = self.validated_data - scan_type = data.get('scan_type') - endpoint_to_add = data.get('endpoint_to_add') - minimum_severity = data.get('minimum_severity') - scan_date = data.get('scan_date', None) - close_old_findings = data.get('close_old_findings') - close_old_findings_product_scope = data.get('close_old_findings_product_scope') - do_not_reactivate = data.get('do_not_reactivate', False) - version = data.get('version', None) - build_id = data.get('build_id', None) - branch_tag = data.get('branch_tag', None) - commit_hash = data.get('commit_hash', None) - api_scan_configuration = data.get('api_scan_configuration', None) - service = data.get('service', None) - lead = data.get('lead', None) - tags = data.get('tags', None) - environment_name = data.get('environment', 'Development') - environment = Development_Environment.objects.get(name=environment_name) - scan = data.get('file', None) + scan_type = data.get("scan_type") + endpoint_to_add = data.get("endpoint_to_add") + minimum_severity = data.get("minimum_severity") + scan_date = data.get("scan_date", None) + close_old_findings = data.get("close_old_findings") + close_old_findings_product_scope = data.get( + "close_old_findings_product_scope" + ) + do_not_reactivate = data.get("do_not_reactivate", False) + version = data.get("version", None) + build_id = data.get("build_id", None) + branch_tag = data.get("branch_tag", None) + commit_hash = data.get("commit_hash", None) + api_scan_configuration = data.get("api_scan_configuration", None) + service = data.get("service", None) + lead = data.get("lead", None) + tags = data.get("tags", None) + environment_name = data.get("environment", "Development") + environment = Development_Environment.objects.get( + name=environment_name + ) + scan = data.get("file", None) endpoints_to_add = [endpoint_to_add] if endpoint_to_add else None - source_code_management_uri = data.get('source_code_management_uri', None) - engagement_end_date = data.get('engagement_end_date', None) + source_code_management_uri = data.get( + "source_code_management_uri", None + ) + engagement_end_date = data.get("engagement_end_date", None) - if 'active' in self.initial_data: - active = data.get('active') + if "active" in self.initial_data: + active = data.get("active") else: active = None - if 'verified' in self.initial_data: - verified = data.get('verified') + if "verified" in self.initial_data: + verified = data.get("verified") else: verified = None - group_by = data.get('group_by', None) - create_finding_groups_for_all_findings = data.get('create_finding_groups_for_all_findings', True) + group_by = data.get("group_by", None) + create_finding_groups_for_all_findings = data.get( + "create_finding_groups_for_all_findings", True + ) - test_id, test_title, scan_type, _, engagement_name, product_name, product_type_name, auto_create_context, deduplication_on_engagement, do_not_reactivate = get_import_meta_data_from_dict(data) + ( + test_id, + test_title, + scan_type, + _, + engagement_name, + product_name, + product_type_name, + auto_create_context, + deduplication_on_engagement, + do_not_reactivate, + ) = get_import_meta_data_from_dict(data) # we passed validation, so the test is present product = get_target_product_if_exists(product_name) - engagement = get_target_engagement_if_exists(None, engagement_name, product) - test = get_target_test_if_exists(test_id, test_title, scan_type, engagement) + engagement = get_target_engagement_if_exists( + None, engagement_name, product + ) + test = get_target_test_if_exists( + test_id, test_title, scan_type, engagement + ) - # have to make the scan_date_time timezone aware otherwise uploads via the API would fail (but unit tests for api upload would pass...) - scan_date_time = timezone.make_aware(datetime.combine(scan_date, datetime.min.time())) if scan_date else None + # have to make the scan_date_time timezone aware otherwise uploads via + # the API would fail (but unit tests for api upload would pass...) + scan_date_time = ( + timezone.make_aware( + datetime.combine(scan_date, datetime.min.time()) + ) + if scan_date + else None + ) statistics_before, statistics_delta = None, None try: @@ -1842,57 +2485,102 @@ def save(self, push_to_jira=False): # reimport into provided / latest test statistics_before = test.statistics reimporter = ReImporter() - test, finding_count, new_finding_count, closed_finding_count, reactivated_finding_count, untouched_finding_count, test_import = \ - reimporter.reimport_scan(scan, scan_type, test, active=active, verified=verified, - tags=tags, minimum_severity=minimum_severity, - endpoints_to_add=endpoints_to_add, scan_date=scan_date_time, - version=version, branch_tag=branch_tag, build_id=build_id, - commit_hash=commit_hash, push_to_jira=push_to_jira, - close_old_findings=close_old_findings, - group_by=group_by, api_scan_configuration=api_scan_configuration, - service=service, do_not_reactivate=do_not_reactivate, - create_finding_groups_for_all_findings=create_finding_groups_for_all_findings) + ( + test, + finding_count, + new_finding_count, + closed_finding_count, + reactivated_finding_count, + untouched_finding_count, + test_import, + ) = reimporter.reimport_scan( + scan, + scan_type, + test, + active=active, + verified=verified, + tags=tags, + minimum_severity=minimum_severity, + endpoints_to_add=endpoints_to_add, + scan_date=scan_date_time, + version=version, + branch_tag=branch_tag, + build_id=build_id, + commit_hash=commit_hash, + push_to_jira=push_to_jira, + close_old_findings=close_old_findings, + group_by=group_by, + api_scan_configuration=api_scan_configuration, + service=service, + do_not_reactivate=do_not_reactivate, + create_finding_groups_for_all_findings=create_finding_groups_for_all_findings, + ) if test_import: statistics_delta = test_import.statistics elif auto_create_context: # perform Import to create test - logger.debug('reimport for non-existing test, using import to create new test') - engagement = get_or_create_engagement(None, engagement_name, product_name, product_type_name, auto_create_context, - deduplication_on_engagement, source_code_management_uri=source_code_management_uri, target_end=engagement_end_date) + logger.debug( + "reimport for non-existing test, using import to create new test" + ) + engagement = get_or_create_engagement( + None, + engagement_name, + product_name, + product_type_name, + auto_create_context, + deduplication_on_engagement, + source_code_management_uri=source_code_management_uri, + target_end=engagement_end_date, + ) importer = Importer() - test, finding_count, closed_finding_count, _ = importer.import_scan(scan, scan_type, engagement, lead, environment, - active=active, verified=verified, tags=tags, - minimum_severity=minimum_severity, - endpoints_to_add=endpoints_to_add, - scan_date=scan_date_time, version=version, - branch_tag=branch_tag, build_id=build_id, - commit_hash=commit_hash, - push_to_jira=push_to_jira, - close_old_findings=close_old_findings, - close_old_findings_product_scope=close_old_findings_product_scope, - group_by=group_by, - api_scan_configuration=api_scan_configuration, - service=service, - title=test_title, - create_finding_groups_for_all_findings=create_finding_groups_for_all_findings) + ( + test, + finding_count, + closed_finding_count, + _, + ) = importer.import_scan( + scan, + scan_type, + engagement, + lead, + environment, + active=active, + verified=verified, + tags=tags, + minimum_severity=minimum_severity, + endpoints_to_add=endpoints_to_add, + scan_date=scan_date_time, + version=version, + branch_tag=branch_tag, + build_id=build_id, + commit_hash=commit_hash, + push_to_jira=push_to_jira, + close_old_findings=close_old_findings, + close_old_findings_product_scope=close_old_findings_product_scope, + group_by=group_by, + api_scan_configuration=api_scan_configuration, + service=service, + title=test_title, + create_finding_groups_for_all_findings=create_finding_groups_for_all_findings, + ) else: # should be captured by validation / permission check already - raise NotFound('test not found') + raise NotFound("test not found") if test: - data['test'] = test - data['test_id'] = test.id - data['engagement_id'] = test.engagement.id - data['product_id'] = test.engagement.product.id - data['product_type_id'] = test.engagement.product.prod_type.id - data['statistics'] = {} + data["test"] = test + data["test_id"] = test.id + data["engagement_id"] = test.engagement.id + data["product_id"] = test.engagement.product.id + data["product_type_id"] = test.engagement.product.prod_type.id + data["statistics"] = {} if statistics_before: - data['statistics']['before'] = statistics_before + data["statistics"]["before"] = statistics_before if statistics_delta: - data['statistics']['delta'] = statistics_delta - data['statistics']['after'] = test.statistics + data["statistics"]["delta"] = statistics_delta + data["statistics"]["after"] = test.statistics # convert to exception otherwise django rest framework will swallow them as 400 error # exceptions are already logged in the importer @@ -1905,66 +2593,94 @@ def validate(self, data): scan_type = data.get("scan_type") file = data.get("file") if not file and requires_file(scan_type): - raise serializers.ValidationError('Uploading a Report File is required for {}'.format(scan_type)) + raise serializers.ValidationError( + "Uploading a Report File is required for {}".format(scan_type) + ) if file and is_scan_file_too_large(file): raise serializers.ValidationError( - 'Report file is too large. Maximum supported size is {} MB'.format(settings.SCAN_FILE_MAX_SIZE)) + "Report file is too large. Maximum supported size is {} MB".format( + settings.SCAN_FILE_MAX_SIZE + ) + ) tool_type = requires_tool_type(scan_type) if tool_type: - api_scan_configuration = data.get('api_scan_configuration') - if api_scan_configuration and tool_type != api_scan_configuration.tool_configuration.tool_type.name: - raise serializers.ValidationError(f'API scan configuration must be of tool type {tool_type}') + api_scan_configuration = data.get("api_scan_configuration") + if ( + api_scan_configuration + and tool_type + != api_scan_configuration.tool_configuration.tool_type.name + ): + raise serializers.ValidationError( + f"API scan configuration must be of tool type {tool_type}" + ) return data def validate_scan_date(self, value): if value and value > timezone.localdate(): raise serializers.ValidationError( - 'The scan_date cannot be in the future!') + "The scan_date cannot be in the future!" + ) return value class EndpointMetaImporterSerializer(serializers.Serializer): - file = serializers.FileField( - required=True) - create_endpoints = serializers.BooleanField( - default=True, - required=False) - create_tags = serializers.BooleanField( - default=True, - required=False) - create_dojo_meta = serializers.BooleanField( - default=False, - required=False) + file = serializers.FileField(required=True) + create_endpoints = serializers.BooleanField(default=True, required=False) + create_tags = serializers.BooleanField(default=True, required=False) + create_dojo_meta = serializers.BooleanField(default=False, required=False) product_name = serializers.CharField(required=False) product = serializers.PrimaryKeyRelatedField( - queryset=Product.objects.all(), required=False) + queryset=Product.objects.all(), required=False + ) # extra fields populated in response - # need to use the _id suffix as without the serializer framework gets confused + # need to use the _id suffix as without the serializer framework gets + # confused product_id = serializers.IntegerField(read_only=True) def validate(self, data): file = data.get("file") if file and is_scan_file_too_large(file): raise serializers.ValidationError( - 'Report file is too large. Maximum supported size is {} MB'.format(settings.SCAN_FILE_MAX_SIZE)) + "Report file is too large. Maximum supported size is {} MB".format( + settings.SCAN_FILE_MAX_SIZE + ) + ) return data def save(self): data = self.validated_data - file = data.get('file') - - create_endpoints = data.get('create_endpoints', True) - create_tags = data.get('create_tags', True) - create_dojo_meta = data.get('create_dojo_meta', False) + file = data.get("file") - _, _, _, _, _, product_name, _, _, _, _ = get_import_meta_data_from_dict(data) + create_endpoints = data.get("create_endpoints", True) + create_tags = data.get("create_tags", True) + create_dojo_meta = data.get("create_dojo_meta", False) + + ( + _, + _, + _, + _, + _, + product_name, + _, + _, + _, + _, + ) = get_import_meta_data_from_dict(data) product = get_target_product_if_exists(product_name) if not product: product_id = get_product_id_from_dict(data) product = get_target_product_by_id_if_exists(product_id) try: - endpoint_meta_import(file, product, create_endpoints, create_tags, create_dojo_meta, origin='API') + endpoint_meta_import( + file, + product, + create_endpoints, + create_tags, + create_dojo_meta, + origin="API", + ) except SyntaxError as se: raise Exception(se) except ValueError as ve: @@ -1972,100 +2688,115 @@ def save(self): class LanguageTypeSerializer(serializers.ModelSerializer): - class Meta: model = Language_Type - fields = '__all__' + fields = "__all__" class LanguageSerializer(serializers.ModelSerializer): - class Meta: model = Languages - fields = '__all__' + fields = "__all__" class ImportLanguagesSerializer(serializers.Serializer): - product = serializers.PrimaryKeyRelatedField(queryset=Product.objects.all(), required=True) + product = serializers.PrimaryKeyRelatedField( + queryset=Product.objects.all(), required=True + ) file = serializers.FileField(required=True) def save(self): data = self.validated_data - product = data['product'] - languages = data['file'] + product = data["product"] + languages = data["file"] try: data = languages.read() try: - deserialized = json.loads(str(data, 'utf-8')) - except: + deserialized = json.loads(str(data, "utf-8")) + except Exception: deserialized = json.loads(data) - except: + except Exception: raise Exception("Invalid format") Languages.objects.filter(product=product).delete() for name in deserialized: - if name not in ['header', 'SUM']: + if name not in ["header", "SUM"]: element = deserialized[name] try: - language_type, created = Language_Type.objects.get_or_create(language=name) + ( + language_type, + created, + ) = Language_Type.objects.get_or_create(language=name) except Language_Type.MultipleObjectsReturned: - language_type = Language_Type.objects.filter(language=name).first() + language_type = Language_Type.objects.filter( + language=name + ).first() language = Languages() language.product = product language.language = language_type - language.files = element.get('nFiles', 0) - language.blank = element.get('blank', 0) - language.comment = element.get('comment', 0) - language.code = element.get('code', 0) + language.files = element.get("nFiles", 0) + language.blank = element.get("blank", 0) + language.comment = element.get("comment", 0) + language.code = element.get("code", 0) language.save() def validate(self, data): - if is_scan_file_too_large(data['file']): + if is_scan_file_too_large(data["file"]): raise serializers.ValidationError( - 'File is too large. Maximum supported size is {} MB'.format(settings.SCAN_FILE_MAX_SIZE)) + "File is too large. Maximum supported size is {} MB".format( + settings.SCAN_FILE_MAX_SIZE + ) + ) return data class AddNewNoteOptionSerializer(serializers.ModelSerializer): - class Meta: model = Notes - fields = ['entry', 'private', 'note_type'] + fields = ["entry", "private", "note_type"] class AddNewFileOptionSerializer(serializers.ModelSerializer): - class Meta: model = FileUpload - fields = '__all__' + fields = "__all__" class FindingToNotesSerializer(serializers.Serializer): - finding_id = serializers.PrimaryKeyRelatedField(queryset=Finding.objects.all(), many=False, allow_null=True) + finding_id = serializers.PrimaryKeyRelatedField( + queryset=Finding.objects.all(), many=False, allow_null=True + ) notes = NoteSerializer(many=True) class FindingToFilesSerializer(serializers.Serializer): - finding_id = serializers.PrimaryKeyRelatedField(queryset=Finding.objects.all(), many=False, allow_null=True) + finding_id = serializers.PrimaryKeyRelatedField( + queryset=Finding.objects.all(), many=False, allow_null=True + ) files = FileSerializer(many=True) def to_representation(self, data): - finding = data.get('finding_id') - files = data.get('files') + finding = data.get("finding_id") + files = data.get("files") new_files = [] for file in files: - new_files.append({ - 'id': file.id, - 'file': '{site_url}/{file_access_url}'.format( - site_url=settings.SITE_URL, - file_access_url=file.get_accessible_url(finding, finding.id)), - 'title': file.title - }) - new_data = {'finding_id': finding.id, 'files': new_files} + new_files.append( + { + "id": file.id, + "file": "{site_url}/{file_access_url}".format( + site_url=settings.SITE_URL, + file_access_url=file.get_accessible_url( + finding, finding.id + ), + ), + "title": file.title, + } + ) + new_data = {"finding_id": finding.id, "files": new_files} return new_data @@ -2078,7 +2809,13 @@ class FindingCloseSerializer(serializers.ModelSerializer): class Meta: model = Finding - fields = ('is_mitigated', 'mitigated', 'false_p', 'out_of_scope', 'duplicate') + fields = ( + "is_mitigated", + "mitigated", + "false_p", + "out_of_scope", + "duplicate", + ) class ReportGenerateOptionSerializer(serializers.Serializer): @@ -2096,7 +2833,9 @@ class ExecutiveSummarySerializer(serializers.Serializer): test_target_start = serializers.DateTimeField() test_target_end = serializers.DateTimeField() test_environment_name = serializers.CharField(max_length=200) - test_strategy_ref = serializers.URLField(max_length=200, min_length=None, allow_blank=True) + test_strategy_ref = serializers.URLField( + max_length=200, min_length=None, allow_blank=True + ) total_findings = serializers.IntegerField() @@ -2116,7 +2855,9 @@ class ReportGenerateSerializer(serializers.Serializer): title = serializers.CharField(max_length=200) user_id = serializers.IntegerField() host = serializers.CharField(max_length=200) - finding_notes = FindingToNotesSerializer(many=True, allow_null=True, required=False) + finding_notes = FindingToNotesSerializer( + many=True, allow_null=True, required=False + ) class TagSerializer(serializers.Serializer): @@ -2124,25 +2865,26 @@ class TagSerializer(serializers.Serializer): class SystemSettingsSerializer(TaggitSerializer, serializers.ModelSerializer): - class Meta: model = System_Settings - fields = '__all__' + fields = "__all__" def validate(self, data): - if self.instance is not None: default_group = self.instance.default_group default_group_role = self.instance.default_group_role - if 'default_group' in data: - default_group = data['default_group'] - if 'default_group_role' in data: - default_group_role = data['default_group_role'] + if "default_group" in data: + default_group = data["default_group"] + if "default_group_role" in data: + default_group_role = data["default_group_role"] - if (default_group is None and default_group_role is not None) or \ - (default_group is not None and default_group_role is None): - raise ValidationError('default_group and default_group_role must either both be set or both be empty.') + if (default_group is None and default_group_role is not None) or ( + default_group is not None and default_group_role is None + ): + raise ValidationError( + "default_group and default_group_role must either both be set or both be empty." + ) return data @@ -2152,36 +2894,71 @@ class FindingNoteSerializer(serializers.Serializer): class NotificationsSerializer(serializers.ModelSerializer): - - product = serializers.PrimaryKeyRelatedField(queryset=Product.objects.all(), - required=False, - default=None, - allow_null=True) - user = serializers.PrimaryKeyRelatedField(queryset=Dojo_User.objects.all(), - required=False, - default=None, - allow_null=True) - product_type_added = MultipleChoiceField(choices=NOTIFICATION_CHOICES, default=DEFAULT_NOTIFICATION) - product_added = MultipleChoiceField(choices=NOTIFICATION_CHOICES, default=DEFAULT_NOTIFICATION) - engagement_added = MultipleChoiceField(choices=NOTIFICATION_CHOICES, default=DEFAULT_NOTIFICATION) - test_added = MultipleChoiceField(choices=NOTIFICATION_CHOICES, default=DEFAULT_NOTIFICATION) - scan_added = MultipleChoiceField(choices=NOTIFICATION_CHOICES, default=DEFAULT_NOTIFICATION) - jira_update = MultipleChoiceField(choices=NOTIFICATION_CHOICES, default=DEFAULT_NOTIFICATION) - upcoming_engagement = MultipleChoiceField(choices=NOTIFICATION_CHOICES, default=DEFAULT_NOTIFICATION) - stale_engagement = MultipleChoiceField(choices=NOTIFICATION_CHOICES, default=DEFAULT_NOTIFICATION) - auto_close_engagement = MultipleChoiceField(choices=NOTIFICATION_CHOICES, default=DEFAULT_NOTIFICATION) - close_engagement = MultipleChoiceField(choices=NOTIFICATION_CHOICES, default=DEFAULT_NOTIFICATION) - user_mentioned = MultipleChoiceField(choices=NOTIFICATION_CHOICES, default=DEFAULT_NOTIFICATION) - code_review = MultipleChoiceField(choices=NOTIFICATION_CHOICES, default=DEFAULT_NOTIFICATION) - review_requested = MultipleChoiceField(choices=NOTIFICATION_CHOICES, default=DEFAULT_NOTIFICATION) - other = MultipleChoiceField(choices=NOTIFICATION_CHOICES, default=DEFAULT_NOTIFICATION) - sla_breach = MultipleChoiceField(choices=NOTIFICATION_CHOICES, default=DEFAULT_NOTIFICATION) - risk_acceptance_expiration = MultipleChoiceField(choices=NOTIFICATION_CHOICES, default=DEFAULT_NOTIFICATION) + product = serializers.PrimaryKeyRelatedField( + queryset=Product.objects.all(), + required=False, + default=None, + allow_null=True, + ) + user = serializers.PrimaryKeyRelatedField( + queryset=Dojo_User.objects.all(), + required=False, + default=None, + allow_null=True, + ) + product_type_added = MultipleChoiceField( + choices=NOTIFICATION_CHOICES, default=DEFAULT_NOTIFICATION + ) + product_added = MultipleChoiceField( + choices=NOTIFICATION_CHOICES, default=DEFAULT_NOTIFICATION + ) + engagement_added = MultipleChoiceField( + choices=NOTIFICATION_CHOICES, default=DEFAULT_NOTIFICATION + ) + test_added = MultipleChoiceField( + choices=NOTIFICATION_CHOICES, default=DEFAULT_NOTIFICATION + ) + scan_added = MultipleChoiceField( + choices=NOTIFICATION_CHOICES, default=DEFAULT_NOTIFICATION + ) + jira_update = MultipleChoiceField( + choices=NOTIFICATION_CHOICES, default=DEFAULT_NOTIFICATION + ) + upcoming_engagement = MultipleChoiceField( + choices=NOTIFICATION_CHOICES, default=DEFAULT_NOTIFICATION + ) + stale_engagement = MultipleChoiceField( + choices=NOTIFICATION_CHOICES, default=DEFAULT_NOTIFICATION + ) + auto_close_engagement = MultipleChoiceField( + choices=NOTIFICATION_CHOICES, default=DEFAULT_NOTIFICATION + ) + close_engagement = MultipleChoiceField( + choices=NOTIFICATION_CHOICES, default=DEFAULT_NOTIFICATION + ) + user_mentioned = MultipleChoiceField( + choices=NOTIFICATION_CHOICES, default=DEFAULT_NOTIFICATION + ) + code_review = MultipleChoiceField( + choices=NOTIFICATION_CHOICES, default=DEFAULT_NOTIFICATION + ) + review_requested = MultipleChoiceField( + choices=NOTIFICATION_CHOICES, default=DEFAULT_NOTIFICATION + ) + other = MultipleChoiceField( + choices=NOTIFICATION_CHOICES, default=DEFAULT_NOTIFICATION + ) + sla_breach = MultipleChoiceField( + choices=NOTIFICATION_CHOICES, default=DEFAULT_NOTIFICATION + ) + risk_acceptance_expiration = MultipleChoiceField( + choices=NOTIFICATION_CHOICES, default=DEFAULT_NOTIFICATION + ) template = serializers.BooleanField(default=False) class Meta: model = Notifications - fields = '__all__' + fields = "__all__" def validate(self, data): user = None @@ -2191,16 +2968,27 @@ def validate(self, data): user = self.instance.user product = self.instance.product - if 'user' in data: - user = data.get('user') - if 'product' in data: - product = data.get('product') - - if self.instance is None or user != self.instance.user or product != self.instance.product: - notifications = Notifications.objects.filter(user=user, product=product, template=False).count() + if "user" in data: + user = data.get("user") + if "product" in data: + product = data.get("product") + + if ( + self.instance is None + or user != self.instance.user + or product != self.instance.product + ): + notifications = Notifications.objects.filter( + user=user, product=product, template=False + ).count() if notifications > 0: - raise ValidationError("Notification for user and product already exists") - if data.get('template') and Notifications.objects.filter(template=True).count() > 0: + raise ValidationError( + "Notification for user and product already exists" + ) + if ( + data.get("template") + and Notifications.objects.filter(template=True).count() > 0 + ): raise ValidationError("Notification template already exists") return data @@ -2209,19 +2997,19 @@ def validate(self, data): class EngagementPresetsSerializer(serializers.ModelSerializer): class Meta: model = Engagement_Presets - fields = '__all__' + fields = "__all__" class NetworkLocationsSerializer(serializers.ModelSerializer): class Meta: model = Network_Locations - fields = '__all__' + fields = "__all__" class SLAConfigurationSerializer(serializers.ModelSerializer): class Meta: model = SLA_Configuration - fields = '__all__' + fields = "__all__" class UserProfileSerializer(serializers.Serializer): @@ -2242,7 +3030,7 @@ class DeletePreviewSerializer(serializers.Serializer): class ConfigurationPermissionSerializer(serializers.ModelSerializer): class Meta: model = Permission - exclude = ('content_type', ) + exclude = ("content_type",) class QuestionnaireQuestionSerializer(serializers.ModelSerializer): @@ -2256,19 +3044,19 @@ def to_representation(self, instance): class Meta: model = Question - exclude = ('polymorphic_ctype', ) + exclude = ("polymorphic_ctype",) class QuestionSerializer(serializers.ModelSerializer): class Meta: model = Question - exclude = ('polymorphic_ctype', ) + exclude = ("polymorphic_ctype",) class TextQuestionSerializer(serializers.ModelSerializer): class Meta: model = TextQuestion - exclude = ('polymorphic_ctype', ) + exclude = ("polymorphic_ctype",) class ChoiceQuestionSerializer(serializers.ModelSerializer): @@ -2276,14 +3064,13 @@ class ChoiceQuestionSerializer(serializers.ModelSerializer): class Meta: model = ChoiceQuestion - exclude = ('polymorphic_ctype', ) + exclude = ("polymorphic_ctype",) class QuestionnaireAnsweredSurveySerializer(serializers.ModelSerializer): - class Meta: model = Answered_Survey - fields = '__all__' + fields = "__all__" class QuestionnaireAnswerSerializer(serializers.ModelSerializer): @@ -2297,7 +3084,7 @@ def to_representation(self, instance): class Meta: model = Answer - exclude = ('polymorphic_ctype', ) + exclude = ("polymorphic_ctype",) class AnswerSerializer(serializers.ModelSerializer): @@ -2306,7 +3093,7 @@ class AnswerSerializer(serializers.ModelSerializer): class Meta: model = Answer - exclude = ('polymorphic_ctype', ) + exclude = ("polymorphic_ctype",) class TextAnswerSerializer(serializers.ModelSerializer): @@ -2315,7 +3102,7 @@ class TextAnswerSerializer(serializers.ModelSerializer): class Meta: model = TextAnswer - exclude = ('polymorphic_ctype', ) + exclude = ("polymorphic_ctype",) class ChoiceAnswerSerializer(serializers.ModelSerializer): @@ -2325,14 +3112,16 @@ class ChoiceAnswerSerializer(serializers.ModelSerializer): class Meta: model = ChoiceAnswer - exclude = ('polymorphic_ctype', ) + exclude = ("polymorphic_ctype",) class QuestionnaireEngagementSurveySerializer(serializers.ModelSerializer): questions = serializers.SerializerMethodField() @extend_schema_field(serializers.ListField(child=serializers.CharField())) - @swagger_serializer_method(serializers.ListField(child=serializers.CharField())) + @swagger_serializer_method( + serializers.ListField(child=serializers.CharField()) + ) def get_questions(self, obj): questions = obj.questions.all() formated_questions = [] @@ -2343,7 +3132,7 @@ def get_questions(self, obj): class Meta: model = Engagement_Survey - fields = '__all__' + fields = "__all__" class QuestionnaireGeneralSurveySerializer(serializers.ModelSerializer): @@ -2351,4 +3140,4 @@ class QuestionnaireGeneralSurveySerializer(serializers.ModelSerializer): class Meta: model = General_Survey - fields = '__all__' + fields = "__all__" diff --git a/dojo/api_v2/views.py b/dojo/api_v2/views.py index 3c1a936d6a..3104ecf4cc 100644 --- a/dojo/api_v2/views.py +++ b/dojo/api_v2/views.py @@ -21,49 +21,152 @@ import base64 import mimetypes from dojo.engagement.services import close_engagement, reopen_engagement -from dojo.importers.reimporter.utils import get_target_engagement_if_exists, get_target_product_if_exists, get_target_test_if_exists -from dojo.models import Language_Type, Languages, Notifications, Product, Product_Type, Engagement, SLA_Configuration, \ - Test, Test_Import, Test_Type, Finding, \ - User, Stub_Finding, Finding_Template, Notes, \ - JIRA_Issue, Tool_Product_Settings, Tool_Configuration, Tool_Type, \ - Endpoint, JIRA_Project, JIRA_Instance, DojoMeta, Development_Environment, \ - Dojo_User, Note_Type, System_Settings, App_Analysis, Endpoint_Status, \ - Sonarqube_Issue, Sonarqube_Issue_Transition, Regulation, Risk_Acceptance, \ - BurpRawRequestResponse, FileUpload, Product_Type_Member, Product_Member, Dojo_Group, \ - Product_Group, Product_Type_Group, Role, Global_Role, Dojo_Group_Member, Engagement_Presets, Network_Locations, \ - UserContactInfo, Product_API_Scan_Configuration, Cred_Mapping, Cred_User, Question, Answer, \ - Engagement_Survey, Answered_Survey, General_Survey, Check_List +from dojo.importers.reimporter.utils import ( + get_target_engagement_if_exists, + get_target_product_if_exists, + get_target_test_if_exists, +) +from dojo.models import ( + Language_Type, + Languages, + Notifications, + Product, + Product_Type, + Engagement, + SLA_Configuration, + Test, + Test_Import, + Test_Type, + Finding, + User, + Stub_Finding, + Finding_Template, + Notes, + JIRA_Issue, + Tool_Product_Settings, + Tool_Configuration, + Tool_Type, + Endpoint, + JIRA_Project, + JIRA_Instance, + DojoMeta, + Development_Environment, + Dojo_User, + Note_Type, + System_Settings, + App_Analysis, + Endpoint_Status, + Sonarqube_Issue, + Sonarqube_Issue_Transition, + Regulation, + Risk_Acceptance, + BurpRawRequestResponse, + FileUpload, + Product_Type_Member, + Product_Member, + Dojo_Group, + Product_Group, + Product_Type_Group, + Role, + Global_Role, + Dojo_Group_Member, + Engagement_Presets, + Network_Locations, + UserContactInfo, + Product_API_Scan_Configuration, + Cred_Mapping, + Cred_User, + Question, + Answer, + Engagement_Survey, + Answered_Survey, + General_Survey, + Check_List, +) from dojo.endpoint.views import get_endpoint_ids -from dojo.reports.views import report_url_resolver, prefetch_related_findings_for_report -from dojo.finding.views import set_finding_as_original_internal, reset_finding_duplicate_status_internal, \ - duplicate_cluster -from dojo.filters import ReportFindingFilter, ApiCredentialsFilter, \ - ApiFindingFilter, ApiProductFilter, ApiEngagementFilter, ApiEndpointFilter, \ - ApiAppAnalysisFilter, ApiTestFilter, ApiTemplateFindingFilter, ApiRiskAcceptanceFilter +from dojo.reports.views import ( + report_url_resolver, + prefetch_related_findings_for_report, +) +from dojo.finding.views import ( + set_finding_as_original_internal, + reset_finding_duplicate_status_internal, + duplicate_cluster, +) +from dojo.filters import ( + ReportFindingFilter, + ApiCredentialsFilter, + ApiFindingFilter, + ApiProductFilter, + ApiEngagementFilter, + ApiEndpointFilter, + ApiAppAnalysisFilter, + ApiTestFilter, + ApiTemplateFindingFilter, + ApiRiskAcceptanceFilter, +) from dojo.risk_acceptance import api as ra_api from dateutil.relativedelta import relativedelta from django.conf import settings from datetime import datetime -from dojo.utils import get_period_counts_legacy, get_system_setting, get_setting, async_delete -from dojo.api_v2 import serializers, permissions, prefetch, schema, mixins as dojo_mixins +from dojo.utils import ( + get_period_counts_legacy, + get_system_setting, + get_setting, + async_delete, +) +from dojo.api_v2 import ( + serializers, + permissions, + prefetch, + schema, + mixins as dojo_mixins, +) import dojo.jira_link.helper as jira_helper import logging import tagulous -from dojo.product_type.queries import get_authorized_product_types, get_authorized_product_type_members, \ - get_authorized_product_type_groups -from dojo.product.queries import get_authorized_products, get_authorized_app_analysis, get_authorized_dojo_meta, \ - get_authorized_product_members, get_authorized_product_groups, get_authorized_languages, \ - get_authorized_engagement_presets, get_authorized_product_api_scan_configurations +from dojo.product_type.queries import ( + get_authorized_product_types, + get_authorized_product_type_members, + get_authorized_product_type_groups, +) +from dojo.product.queries import ( + get_authorized_products, + get_authorized_app_analysis, + get_authorized_dojo_meta, + get_authorized_product_members, + get_authorized_product_groups, + get_authorized_languages, + get_authorized_engagement_presets, + get_authorized_product_api_scan_configurations, +) from dojo.engagement.queries import get_authorized_engagements from dojo.risk_acceptance.queries import get_authorized_risk_acceptances from dojo.test.queries import get_authorized_tests, get_authorized_test_imports -from dojo.finding.queries import get_authorized_findings, get_authorized_stub_findings -from dojo.endpoint.queries import get_authorized_endpoints, get_authorized_endpoint_status -from dojo.group.queries import get_authorized_groups, get_authorized_group_members -from dojo.jira_link.queries import get_authorized_jira_projects, get_authorized_jira_issues +from dojo.finding.queries import ( + get_authorized_findings, + get_authorized_stub_findings, +) +from dojo.endpoint.queries import ( + get_authorized_endpoints, + get_authorized_endpoint_status, +) +from dojo.group.queries import ( + get_authorized_groups, + get_authorized_group_members, +) +from dojo.jira_link.queries import ( + get_authorized_jira_projects, + get_authorized_jira_issues, +) from dojo.tool_product.queries import get_authorized_tool_product_settings from dojo.cred.queries import get_authorized_cred_mappings -from drf_spectacular.utils import OpenApiParameter, OpenApiResponse, extend_schema, extend_schema_view +from drf_spectacular.utils import ( + OpenApiParameter, + OpenApiResponse, + extend_schema, + extend_schema_view, +) from dojo.authorization.roles_permissions import Permissions from dojo.user.utils import get_configuration_permissions_codenames @@ -71,45 +174,64 @@ # Authorization: authenticated users -class RoleViewSet(mixins.ListModelMixin, - mixins.RetrieveModelMixin, - viewsets.GenericViewSet): +class RoleViewSet( + mixins.ListModelMixin, mixins.RetrieveModelMixin, viewsets.GenericViewSet +): serializer_class = serializers.RoleSerializer queryset = Role.objects.all() filter_backends = (DjangoFilterBackend,) - filterset_fields = ['id', 'name'] - permission_classes = (IsAuthenticated, ) + filterset_fields = ["id", "name"] + permission_classes = (IsAuthenticated,) # Authorization: object-based @extend_schema_view( - list=extend_schema(parameters=[ - OpenApiParameter("prefetch", OpenApiTypes.STR, OpenApiParameter.QUERY, required=False, - description="List of fields for which to prefetch model instances and add those to the response"), - ], + list=extend_schema( + parameters=[ + OpenApiParameter( + "prefetch", + OpenApiTypes.STR, + OpenApiParameter.QUERY, + required=False, + description="List of fields for which to prefetch model instances and add those to the response", + ), + ], + ), + retrieve=extend_schema( + parameters=[ + OpenApiParameter( + "prefetch", + OpenApiTypes.STR, + OpenApiParameter.QUERY, + required=False, + description="List of fields for which to prefetch model instances and add those to the response", + ), + ], ), - retrieve=extend_schema(parameters=[ - OpenApiParameter("prefetch", OpenApiTypes.STR, OpenApiParameter.QUERY, required=False, - description="List of fields for which to prefetch model instances and add those to the response"), - ], - ) ) -class DojoGroupViewSet(prefetch.PrefetchListMixin, - prefetch.PrefetchRetrieveMixin, - mixins.ListModelMixin, - mixins.RetrieveModelMixin, - mixins.DestroyModelMixin, - mixins.UpdateModelMixin, - mixins.CreateModelMixin, - viewsets.GenericViewSet, - dojo_mixins.DeletePreviewModelMixin): +class DojoGroupViewSet( + prefetch.PrefetchListMixin, + prefetch.PrefetchRetrieveMixin, + mixins.ListModelMixin, + mixins.RetrieveModelMixin, + mixins.DestroyModelMixin, + mixins.UpdateModelMixin, + mixins.CreateModelMixin, + viewsets.GenericViewSet, + dojo_mixins.DeletePreviewModelMixin, +): serializer_class = serializers.DojoGroupSerializer queryset = Dojo_Group.objects.none() filter_backends = (DjangoFilterBackend,) - filterset_fields = ['id', 'name', 'social_provider'] - swagger_schema = prefetch.get_prefetch_schema(["dojo_groups_list", "dojo_groups_read"], - serializers.DojoGroupSerializer).to_schema() - permission_classes = (IsAuthenticated, permissions.UserHasDojoGroupPermission) + filterset_fields = ["id", "name", "social_provider"] + swagger_schema = prefetch.get_prefetch_schema( + ["dojo_groups_list", "dojo_groups_read"], + serializers.DojoGroupSerializer, + ).to_schema() + permission_classes = ( + IsAuthenticated, + permissions.UserHasDojoGroupPermission, + ) def get_queryset(self): return get_authorized_groups(Permissions.Group_View).distinct() @@ -117,78 +239,108 @@ def get_queryset(self): # Authorization: object-based @extend_schema_view( - list=extend_schema(parameters=[ - OpenApiParameter("prefetch", OpenApiTypes.STR, OpenApiParameter.QUERY, required=False, - description="List of fields for which to prefetch model instances and add those to the response"), - ], + list=extend_schema( + parameters=[ + OpenApiParameter( + "prefetch", + OpenApiTypes.STR, + OpenApiParameter.QUERY, + required=False, + description="List of fields for which to prefetch model instances and add those to the response", + ), + ], + ), + retrieve=extend_schema( + parameters=[ + OpenApiParameter( + "prefetch", + OpenApiTypes.STR, + OpenApiParameter.QUERY, + required=False, + description="List of fields for which to prefetch model instances and add those to the response", + ), + ], ), - retrieve=extend_schema(parameters=[ - OpenApiParameter("prefetch", OpenApiTypes.STR, OpenApiParameter.QUERY, required=False, - description="List of fields for which to prefetch model instances and add those to the response"), - ], - ) ) -class DojoGroupMemberViewSet(prefetch.PrefetchListMixin, - prefetch.PrefetchRetrieveMixin, - mixins.ListModelMixin, - mixins.RetrieveModelMixin, - mixins.CreateModelMixin, - mixins.DestroyModelMixin, - mixins.UpdateModelMixin, - viewsets.GenericViewSet, - dojo_mixins.DeletePreviewModelMixin): +class DojoGroupMemberViewSet( + prefetch.PrefetchListMixin, + prefetch.PrefetchRetrieveMixin, + mixins.ListModelMixin, + mixins.RetrieveModelMixin, + mixins.CreateModelMixin, + mixins.DestroyModelMixin, + mixins.UpdateModelMixin, + viewsets.GenericViewSet, + dojo_mixins.DeletePreviewModelMixin, +): serializer_class = serializers.DojoGroupMemberSerializer queryset = Dojo_Group_Member.objects.none() filter_backends = (DjangoFilterBackend,) - filterset_fields = ['id', 'group_id', 'user_id'] - swagger_schema = prefetch.get_prefetch_schema(["dojo_group_members_list", "dojo_group_members_read"], - serializers.DojoGroupMemberSerializer).to_schema() - permission_classes = (IsAuthenticated, permissions.UserHasDojoGroupMemberPermission) + filterset_fields = ["id", "group_id", "user_id"] + swagger_schema = prefetch.get_prefetch_schema( + ["dojo_group_members_list", "dojo_group_members_read"], + serializers.DojoGroupMemberSerializer, + ).to_schema() + permission_classes = ( + IsAuthenticated, + permissions.UserHasDojoGroupMemberPermission, + ) def get_queryset(self): return get_authorized_group_members(Permissions.Group_View).distinct() def partial_update(self, request, pk=None): # Object authorization won't work if not all data is provided - response = {'message': 'Patch function is not offered in this path.'} + response = {"message": "Patch function is not offered in this path."} return Response(response, status=status.HTTP_405_METHOD_NOT_ALLOWED) # Authorization: superuser -class GlobalRoleViewSet(prefetch.PrefetchListMixin, - prefetch.PrefetchRetrieveMixin, - mixins.ListModelMixin, - mixins.RetrieveModelMixin, - mixins.DestroyModelMixin, - mixins.UpdateModelMixin, - mixins.CreateModelMixin, - viewsets.GenericViewSet, - dojo_mixins.DeletePreviewModelMixin): +class GlobalRoleViewSet( + prefetch.PrefetchListMixin, + prefetch.PrefetchRetrieveMixin, + mixins.ListModelMixin, + mixins.RetrieveModelMixin, + mixins.DestroyModelMixin, + mixins.UpdateModelMixin, + mixins.CreateModelMixin, + viewsets.GenericViewSet, + dojo_mixins.DeletePreviewModelMixin, +): serializer_class = serializers.GlobalRoleSerializer queryset = Global_Role.objects.all() filter_backends = (DjangoFilterBackend,) - filterset_fields = ['id', 'user', 'group', 'role'] - swagger_schema = prefetch.get_prefetch_schema(["global_roles_list", "global_roles_read"], - serializers.GlobalRoleSerializer).to_schema() + filterset_fields = ["id", "user", "group", "role"] + swagger_schema = prefetch.get_prefetch_schema( + ["global_roles_list", "global_roles_read"], + serializers.GlobalRoleSerializer, + ).to_schema() permission_classes = (permissions.IsSuperUser, DjangoModelPermissions) # Authorization: object-based -class EndPointViewSet(prefetch.PrefetchListMixin, - prefetch.PrefetchRetrieveMixin, - mixins.ListModelMixin, - mixins.RetrieveModelMixin, - mixins.UpdateModelMixin, - mixins.DestroyModelMixin, - mixins.CreateModelMixin, - viewsets.GenericViewSet, - dojo_mixins.DeletePreviewModelMixin): +class EndPointViewSet( + prefetch.PrefetchListMixin, + prefetch.PrefetchRetrieveMixin, + mixins.ListModelMixin, + mixins.RetrieveModelMixin, + mixins.UpdateModelMixin, + mixins.DestroyModelMixin, + mixins.CreateModelMixin, + viewsets.GenericViewSet, + dojo_mixins.DeletePreviewModelMixin, +): serializer_class = serializers.EndpointSerializer queryset = Endpoint.objects.none() filter_backends = (DjangoFilterBackend,) filterset_class = ApiEndpointFilter - swagger_schema = prefetch.get_prefetch_schema(["endpoints_list", "endpoints_read"], serializers.EndpointSerializer).to_schema() - permission_classes = (IsAuthenticated, permissions.UserHasEndpointPermission) + swagger_schema = prefetch.get_prefetch_schema( + ["endpoints_list", "endpoints_read"], serializers.EndpointSerializer + ).to_schema() + permission_classes = ( + IsAuthenticated, + permissions.UserHasEndpointPermission, + ) def get_queryset(self): return get_authorized_endpoints(Permissions.Endpoint_View).distinct() @@ -201,21 +353,34 @@ def get_queryset(self): request_body=serializers.ReportGenerateOptionSerializer, responses={status.HTTP_200_OK: serializers.ReportGenerateSerializer}, ) - @action(detail=True, methods=['post'], permission_classes=[IsAuthenticated]) + @action( + detail=True, methods=["post"], permission_classes=[IsAuthenticated] + ) def generate_report(self, request, pk=None): endpoint = self.get_object() options = {} # prepare post data - report_options = serializers.ReportGenerateOptionSerializer(data=request.data) + report_options = serializers.ReportGenerateOptionSerializer( + data=request.data + ) if report_options.is_valid(): - options['include_finding_notes'] = report_options.validated_data['include_finding_notes'] - options['include_finding_images'] = report_options.validated_data['include_finding_images'] - options['include_executive_summary'] = report_options.validated_data['include_executive_summary'] - options['include_table_of_contents'] = report_options.validated_data['include_table_of_contents'] + options["include_finding_notes"] = report_options.validated_data[ + "include_finding_notes" + ] + options["include_finding_images"] = report_options.validated_data[ + "include_finding_images" + ] + options[ + "include_executive_summary" + ] = report_options.validated_data["include_executive_summary"] + options[ + "include_table_of_contents" + ] = report_options.validated_data["include_table_of_contents"] else: - return Response(report_options.errors, - status=status.HTTP_400_BAD_REQUEST) + return Response( + report_options.errors, status=status.HTTP_400_BAD_REQUEST + ) data = report_generate(request, endpoint, options) report = serializers.ReportGenerateSerializer(data) @@ -223,45 +388,78 @@ def generate_report(self, request, pk=None): # Authorization: object-based -class EndpointStatusViewSet(prefetch.PrefetchListMixin, - prefetch.PrefetchRetrieveMixin, - mixins.ListModelMixin, - mixins.RetrieveModelMixin, - mixins.UpdateModelMixin, - mixins.DestroyModelMixin, - mixins.CreateModelMixin, - viewsets.GenericViewSet, - dojo_mixins.DeletePreviewModelMixin): +class EndpointStatusViewSet( + prefetch.PrefetchListMixin, + prefetch.PrefetchRetrieveMixin, + mixins.ListModelMixin, + mixins.RetrieveModelMixin, + mixins.UpdateModelMixin, + mixins.DestroyModelMixin, + mixins.CreateModelMixin, + viewsets.GenericViewSet, + dojo_mixins.DeletePreviewModelMixin, +): serializer_class = serializers.EndpointStatusSerializer queryset = Endpoint_Status.objects.none() filter_backends = (DjangoFilterBackend,) - filterset_fields = ['mitigated', 'false_positive', 'out_of_scope', 'risk_accepted', 'mitigated_by', 'finding', 'endpoint'] - swagger_schema = prefetch.get_prefetch_schema(["endpoint_status_list", "endpoint_status_read"], serializers.EndpointStatusSerializer).to_schema() - permission_classes = (IsAuthenticated, permissions.UserHasEndpointStatusPermission) + filterset_fields = [ + "mitigated", + "false_positive", + "out_of_scope", + "risk_accepted", + "mitigated_by", + "finding", + "endpoint", + ] + swagger_schema = prefetch.get_prefetch_schema( + ["endpoint_status_list", "endpoint_status_read"], + serializers.EndpointStatusSerializer, + ).to_schema() + permission_classes = ( + IsAuthenticated, + permissions.UserHasEndpointStatusPermission, + ) def get_queryset(self): - return get_authorized_endpoint_status(Permissions.Endpoint_View).distinct() + return get_authorized_endpoint_status( + Permissions.Endpoint_View + ).distinct() # Authorization: object-based -class EngagementViewSet(prefetch.PrefetchListMixin, - prefetch.PrefetchRetrieveMixin, - mixins.ListModelMixin, - mixins.RetrieveModelMixin, - mixins.UpdateModelMixin, - mixins.DestroyModelMixin, - mixins.CreateModelMixin, - ra_api.AcceptedRisksMixin, - viewsets.GenericViewSet, - dojo_mixins.DeletePreviewModelMixin): +class EngagementViewSet( + prefetch.PrefetchListMixin, + prefetch.PrefetchRetrieveMixin, + mixins.ListModelMixin, + mixins.RetrieveModelMixin, + mixins.UpdateModelMixin, + mixins.DestroyModelMixin, + mixins.CreateModelMixin, + ra_api.AcceptedRisksMixin, + viewsets.GenericViewSet, + dojo_mixins.DeletePreviewModelMixin, +): serializer_class = serializers.EngagementSerializer queryset = Engagement.objects.none() filter_backends = (DjangoFilterBackend,) filterset_class = ApiEngagementFilter - swagger_schema = prefetch.get_prefetch_schema(["engagements_list", "engagements_read"], serializers.EngagementSerializer).composeWith( - prefetch.get_prefetch_schema(["engagements_complete_checklist_read"], serializers.EngagementCheckListSerializer) - ).to_schema() - permission_classes = (IsAuthenticated, permissions.UserHasEngagementPermission) + swagger_schema = ( + prefetch.get_prefetch_schema( + ["engagements_list", "engagements_read"], + serializers.EngagementSerializer, + ) + .composeWith( + prefetch.get_prefetch_schema( + ["engagements_complete_checklist_read"], + serializers.EngagementCheckListSerializer, + ) + ) + .to_schema() + ) + permission_classes = ( + IsAuthenticated, + permissions.UserHasEngagementPermission, + ) @property def risk_application_model_class(self): @@ -277,14 +475,14 @@ def destroy(self, request, *args, **kwargs): return Response(status=status.HTTP_204_NO_CONTENT) def get_queryset(self): - return get_authorized_engagements(Permissions.Engagement_View).prefetch_related( - 'notes', - 'risk_acceptance', - 'files').distinct() + return ( + get_authorized_engagements(Permissions.Engagement_View) + .prefetch_related("notes", "risk_acceptance", "files") + .distinct() + ) @extend_schema( - request=OpenApiTypes.NONE, - responses={status.HTTP_200_OK: ""} + request=OpenApiTypes.NONE, responses={status.HTTP_200_OK: ""} ) @swagger_auto_schema( request_body=no_body, responses={status.HTTP_200_OK: ""} @@ -296,8 +494,7 @@ def close(self, request, pk=None): return HttpResponse() @extend_schema( - request=OpenApiTypes.NONE, - responses={status.HTTP_200_OK: ""} + request=OpenApiTypes.NONE, responses={status.HTTP_200_OK: ""} ) @swagger_auto_schema( request_body=no_body, responses={status.HTTP_200_OK: ""} @@ -316,147 +513,199 @@ def reopen(self, request, pk=None): request_body=serializers.ReportGenerateOptionSerializer, responses={status.HTTP_200_OK: serializers.ReportGenerateSerializer}, ) - @action(detail=True, methods=['post'], permission_classes=[IsAuthenticated]) + @action( + detail=True, methods=["post"], permission_classes=[IsAuthenticated] + ) def generate_report(self, request, pk=None): engagement = self.get_object() options = {} # prepare post data - report_options = serializers.ReportGenerateOptionSerializer(data=request.data) + report_options = serializers.ReportGenerateOptionSerializer( + data=request.data + ) if report_options.is_valid(): - options['include_finding_notes'] = report_options.validated_data['include_finding_notes'] - options['include_finding_images'] = report_options.validated_data['include_finding_images'] - options['include_executive_summary'] = report_options.validated_data['include_executive_summary'] - options['include_table_of_contents'] = report_options.validated_data['include_table_of_contents'] + options["include_finding_notes"] = report_options.validated_data[ + "include_finding_notes" + ] + options["include_finding_images"] = report_options.validated_data[ + "include_finding_images" + ] + options[ + "include_executive_summary" + ] = report_options.validated_data["include_executive_summary"] + options[ + "include_table_of_contents" + ] = report_options.validated_data["include_table_of_contents"] else: - return Response(report_options.errors, - status=status.HTTP_400_BAD_REQUEST) + return Response( + report_options.errors, status=status.HTTP_400_BAD_REQUEST + ) data = report_generate(request, engagement, options) report = serializers.ReportGenerateSerializer(data) return Response(report.data) @extend_schema( - methods=['GET'], - responses={status.HTTP_200_OK: serializers.EngagementToNotesSerializer} + methods=["GET"], + responses={ + status.HTTP_200_OK: serializers.EngagementToNotesSerializer + }, ) @extend_schema( - methods=['POST'], + methods=["POST"], request=serializers.AddNewNoteOptionSerializer, - responses={status.HTTP_201_CREATED: serializers.NoteSerializer} + responses={status.HTTP_201_CREATED: serializers.NoteSerializer}, ) @swagger_auto_schema( - method='get', - responses={status.HTTP_200_OK: serializers.EngagementToNotesSerializer} + method="get", + responses={ + status.HTTP_200_OK: serializers.EngagementToNotesSerializer + }, ) @swagger_auto_schema( - methods=['post'], + methods=["post"], request_body=serializers.AddNewNoteOptionSerializer, - responses={status.HTTP_201_CREATED: serializers.NoteSerializer} + responses={status.HTTP_201_CREATED: serializers.NoteSerializer}, ) @action(detail=True, methods=["get", "post"]) def notes(self, request, pk=None): engagement = self.get_object() - if request.method == 'POST': - new_note = serializers.AddNewNoteOptionSerializer(data=request.data) + if request.method == "POST": + new_note = serializers.AddNewNoteOptionSerializer( + data=request.data + ) if new_note.is_valid(): - entry = new_note.validated_data['entry'] - private = new_note.validated_data.get('private', False) - note_type = new_note.validated_data.get('note_type', None) + entry = new_note.validated_data["entry"] + private = new_note.validated_data.get("private", False) + note_type = new_note.validated_data.get("note_type", None) else: - return Response(new_note.errors, - status=status.HTTP_400_BAD_REQUEST) + return Response( + new_note.errors, status=status.HTTP_400_BAD_REQUEST + ) author = request.user - note = Notes(entry=entry, author=author, private=private, note_type=note_type) + note = Notes( + entry=entry, + author=author, + private=private, + note_type=note_type, + ) note.save() engagement.notes.add(note) - serialized_note = serializers.NoteSerializer({ - "author": author, "entry": entry, - "private": private - }) - result = serializers.EngagementToNotesSerializer({ - "engagement_id": engagement, "notes": [serialized_note.data] - }) - return Response(serialized_note.data, - status=status.HTTP_201_CREATED) + serialized_note = serializers.NoteSerializer( + {"author": author, "entry": entry, "private": private} + ) + result = serializers.EngagementToNotesSerializer( + {"engagement_id": engagement, "notes": [serialized_note.data]} + ) + return Response( + serialized_note.data, status=status.HTTP_201_CREATED + ) notes = engagement.notes.all() - serialized_notes = serializers.EngagementToNotesSerializer({ - "engagement_id": engagement, "notes": notes - }) - return Response(serialized_notes.data, - status=status.HTTP_200_OK) + serialized_notes = serializers.EngagementToNotesSerializer( + {"engagement_id": engagement, "notes": notes} + ) + return Response(serialized_notes.data, status=status.HTTP_200_OK) @extend_schema( - methods=['GET'], - responses={status.HTTP_200_OK: serializers.EngagementToFilesSerializer} + methods=["GET"], + responses={ + status.HTTP_200_OK: serializers.EngagementToFilesSerializer + }, ) @extend_schema( - methods=['POST'], + methods=["POST"], request=serializers.AddNewFileOptionSerializer, - responses={status.HTTP_201_CREATED: serializers.FileSerializer} + responses={status.HTTP_201_CREATED: serializers.FileSerializer}, ) @swagger_auto_schema( - method='get', - responses={status.HTTP_200_OK: serializers.EngagementToFilesSerializer} + method="get", + responses={ + status.HTTP_200_OK: serializers.EngagementToFilesSerializer + }, ) @swagger_auto_schema( - method='post', + method="post", request_body=serializers.AddNewFileOptionSerializer, - responses={status.HTTP_201_CREATED: serializers.FileSerializer} + responses={status.HTTP_201_CREATED: serializers.FileSerializer}, + ) + @action( + detail=True, methods=["get", "post"], parser_classes=(MultiPartParser,) ) - @action(detail=True, methods=["get", "post"], parser_classes=(MultiPartParser,)) def files(self, request, pk=None): engagement = self.get_object() - if request.method == 'POST': + if request.method == "POST": new_file = serializers.FileSerializer(data=request.data) if new_file.is_valid(): - title = new_file.validated_data['title'] - file = new_file.validated_data['file'] + title = new_file.validated_data["title"] + file = new_file.validated_data["file"] else: - return Response(new_file.errors, status=status.HTTP_400_BAD_REQUEST) + return Response( + new_file.errors, status=status.HTTP_400_BAD_REQUEST + ) file = FileUpload(title=title, file=file) file.save() engagement.files.add(file) serialized_file = serializers.FileSerializer(file) - return Response(serialized_file.data, status=status.HTTP_201_CREATED) + return Response( + serialized_file.data, status=status.HTTP_201_CREATED + ) files = engagement.files.all() - serialized_files = serializers.EngagementToFilesSerializer({ - "engagement_id": engagement, "files": files - }) + serialized_files = serializers.EngagementToFilesSerializer( + {"engagement_id": engagement, "files": files} + ) return Response(serialized_files.data, status=status.HTTP_200_OK) @extend_schema( - methods=['POST'], + methods=["POST"], request=serializers.EngagementCheckListSerializer, - responses={status.HTTP_201_CREATED: serializers.EngagementCheckListSerializer} + responses={ + status.HTTP_201_CREATED: serializers.EngagementCheckListSerializer + }, ) @swagger_auto_schema( - method='post', + method="post", request_body=serializers.EngagementCheckListSerializer, - responses={status.HTTP_201_CREATED: serializers.EngagementCheckListSerializer} + responses={ + status.HTTP_201_CREATED: serializers.EngagementCheckListSerializer + }, ) @action(detail=True, methods=["get", "post"]) def complete_checklist(self, request, pk=None): from dojo.api_v2.prefetch.prefetcher import _Prefetcher + engagement = self.get_object() check_lists = Check_List.objects.filter(engagement=engagement) - if request.method == 'POST': + if request.method == "POST": if check_lists.count() > 0: - return Response({"message": "A completed checklist for this engagement already exists."}, status=status.HTTP_400_BAD_REQUEST) - check_list = serializers.EngagementCheckListSerializer(data=request.data) + return Response( + { + "message": "A completed checklist for this engagement already exists." + }, + status=status.HTTP_400_BAD_REQUEST, + ) + check_list = serializers.EngagementCheckListSerializer( + data=request.data + ) if not check_list.is_valid(): - return Response(check_list.errors, status=status.HTTP_400_BAD_REQUEST) + return Response( + check_list.errors, status=status.HTTP_400_BAD_REQUEST + ) check_list = Check_List(**check_list.data) check_list.engagement = engagement check_list.save() - serialized_check_list = serializers.EngagementCheckListSerializer(check_list) - return Response(serialized_check_list.data, status=status.HTTP_201_CREATED) + serialized_check_list = serializers.EngagementCheckListSerializer( + check_list + ) + return Response( + serialized_check_list.data, status=status.HTTP_201_CREATED + ) prefetch_params = request.GET.get("prefetch", "").split(",") prefetcher = _Prefetcher() entry = check_lists.first() @@ -467,69 +716,93 @@ def complete_checklist(self, request, pk=None): return Response(result, status=status.HTTP_200_OK) @extend_schema( - methods=['GET'], + methods=["GET"], responses={ status.HTTP_200_OK: serializers.RawFileSerializer, - } + }, ) @swagger_auto_schema( - method='get', + method="get", responses={ status.HTTP_200_OK: serializers.RawFileSerializer, - } + }, + ) + @action( + detail=True, + methods=["get"], + url_path=r"files/download/(?P\d+)", ) - @action(detail=True, methods=["get"], url_path=r'files/download/(?P\d+)') def download_file(self, request, file_id, pk=None): engagement = self.get_object() # Get the file object file_object_qs = engagement.files.filter(id=file_id) - file_object = file_object_qs.first() if len(file_object_qs) > 0 else None + file_object = ( + file_object_qs.first() if len(file_object_qs) > 0 else None + ) if file_object is None: - return Response({"error": "File ID not associated with Engagement"}, status=status.HTTP_404_NOT_FOUND) + return Response( + {"error": "File ID not associated with Engagement"}, + status=status.HTTP_404_NOT_FOUND, + ) # Get the path of the file in media root - file_path = f'{settings.MEDIA_ROOT}/{file_object.file.url.lstrip(settings.MEDIA_URL)}' + file_path = f"{settings.MEDIA_ROOT}/{file_object.file.url.lstrip(settings.MEDIA_URL)}" file_handle = open(file_path, "rb") # send file - response = FileResponse(file_handle, content_type=f'{mimetypes.guess_type(file_path)}', status=status.HTTP_200_OK) - response['Content-Length'] = file_object.file.size - response['Content-Disposition'] = f'attachment; filename="{file_object.file.name}"' + response = FileResponse( + file_handle, + content_type=f"{mimetypes.guess_type(file_path)}", + status=status.HTTP_200_OK, + ) + response["Content-Length"] = file_object.file.size + response[ + "Content-Disposition" + ] = f'attachment; filename="{file_object.file.name}"' return response -class RiskAcceptanceViewSet(prefetch.PrefetchListMixin, - prefetch.PrefetchRetrieveMixin, - mixins.ListModelMixin, - mixins.RetrieveModelMixin, - mixins.DestroyModelMixin, - viewsets.GenericViewSet, - dojo_mixins.DeletePreviewModelMixin): +class RiskAcceptanceViewSet( + prefetch.PrefetchListMixin, + prefetch.PrefetchRetrieveMixin, + mixins.ListModelMixin, + mixins.RetrieveModelMixin, + mixins.DestroyModelMixin, + viewsets.GenericViewSet, + dojo_mixins.DeletePreviewModelMixin, +): serializer_class = serializers.RiskAcceptanceSerializer queryset = Risk_Acceptance.objects.none() filter_backends = (DjangoFilterBackend,) filterset_class = ApiRiskAcceptanceFilter - swagger_schema = prefetch.get_prefetch_schema(["risk_acceptance_list", "risk_acceptance_read"], serializers.RiskAcceptanceSerializer).to_schema() - permission_classes = (IsAuthenticated, permissions.UserHasRiskAcceptancePermission) + swagger_schema = prefetch.get_prefetch_schema( + ["risk_acceptance_list", "risk_acceptance_read"], + serializers.RiskAcceptanceSerializer, + ).to_schema() + permission_classes = ( + IsAuthenticated, + permissions.UserHasRiskAcceptancePermission, + ) def get_queryset(self): - return get_authorized_risk_acceptances( - Permissions.Risk_Acceptance).prefetch_related( - 'notes', - 'engagement_set', - 'owner', - 'accepted_findings').distinct() + return ( + get_authorized_risk_acceptances(Permissions.Risk_Acceptance) + .prefetch_related( + "notes", "engagement_set", "owner", "accepted_findings" + ) + .distinct() + ) @extend_schema( - methods=['GET'], + methods=["GET"], responses={ status.HTTP_200_OK: serializers.RiskAcceptanceProofSerializer, - } + }, ) @swagger_auto_schema( - method='get', + method="get", responses={ status.HTTP_200_OK: serializers.RiskAcceptanceProofSerializer, - } + }, ) @action(detail=True, methods=["get"]) def download_proof(self, request, pk=None): @@ -537,208 +810,298 @@ def download_proof(self, request, pk=None): # Get the file object file_object = risk_acceptance.path if file_object is None or risk_acceptance.filename() is None: - return Response({"error": "Proof has not provided to this risk acceptance..."}, status=status.HTTP_404_NOT_FOUND) + return Response( + {"error": "Proof has not provided to this risk acceptance..."}, + status=status.HTTP_404_NOT_FOUND, + ) # Get the path of the file in media root - file_path = f'{settings.MEDIA_ROOT}/{file_object.name}' + file_path = f"{settings.MEDIA_ROOT}/{file_object.name}" file_handle = open(file_path, "rb") # send file - response = FileResponse(file_handle, content_type=f'{mimetypes.guess_type(file_path)}', status=status.HTTP_200_OK) - response['Content-Length'] = file_object.size - response['Content-Disposition'] = f'attachment; filename="{risk_acceptance.filename()}"' + response = FileResponse( + file_handle, + content_type=f"{mimetypes.guess_type(file_path)}", + status=status.HTTP_200_OK, + ) + response["Content-Length"] = file_object.size + response[ + "Content-Disposition" + ] = f'attachment; filename="{risk_acceptance.filename()}"' return response # These are technologies in the UI and the API! # Authorization: object-based -class AppAnalysisViewSet(prefetch.PrefetchListMixin, - prefetch.PrefetchRetrieveMixin, - mixins.ListModelMixin, - mixins.RetrieveModelMixin, - mixins.UpdateModelMixin, - mixins.DestroyModelMixin, - mixins.CreateModelMixin, - viewsets.GenericViewSet, - dojo_mixins.DeletePreviewModelMixin): +class AppAnalysisViewSet( + prefetch.PrefetchListMixin, + prefetch.PrefetchRetrieveMixin, + mixins.ListModelMixin, + mixins.RetrieveModelMixin, + mixins.UpdateModelMixin, + mixins.DestroyModelMixin, + mixins.CreateModelMixin, + viewsets.GenericViewSet, + dojo_mixins.DeletePreviewModelMixin, +): serializer_class = serializers.AppAnalysisSerializer queryset = App_Analysis.objects.none() filter_backends = (DjangoFilterBackend,) filterset_class = ApiAppAnalysisFilter - swagger_schema = prefetch.get_prefetch_schema(["technologies_list", "technologies_read"], serializers.AppAnalysisSerializer).to_schema() - permission_classes = (IsAuthenticated, permissions.UserHasAppAnalysisPermission) + swagger_schema = prefetch.get_prefetch_schema( + ["technologies_list", "technologies_read"], + serializers.AppAnalysisSerializer, + ).to_schema() + permission_classes = ( + IsAuthenticated, + permissions.UserHasAppAnalysisPermission, + ) def get_queryset(self): return get_authorized_app_analysis(Permissions.Product_View) # Authorization: object-based -class CredentialsViewSet(prefetch.PrefetchListMixin, - prefetch.PrefetchRetrieveMixin, - mixins.ListModelMixin, - mixins.RetrieveModelMixin, - mixins.UpdateModelMixin, - mixins.DestroyModelMixin, - mixins.CreateModelMixin, - viewsets.GenericViewSet, - dojo_mixins.DeletePreviewModelMixin): +class CredentialsViewSet( + prefetch.PrefetchListMixin, + prefetch.PrefetchRetrieveMixin, + mixins.ListModelMixin, + mixins.RetrieveModelMixin, + mixins.UpdateModelMixin, + mixins.DestroyModelMixin, + mixins.CreateModelMixin, + viewsets.GenericViewSet, + dojo_mixins.DeletePreviewModelMixin, +): serializer_class = serializers.CredentialSerializer queryset = Cred_User.objects.all() filter_backends = (DjangoFilterBackend,) - swagger_schema = prefetch.get_prefetch_schema(["credentials_list", "credentials_read"], serializers.CredentialSerializer).to_schema() + swagger_schema = prefetch.get_prefetch_schema( + ["credentials_list", "credentials_read"], + serializers.CredentialSerializer, + ).to_schema() permission_classes = (permissions.IsSuperUser, DjangoModelPermissions) # Authorization: configuration -class CredentialsMappingViewSet(prefetch.PrefetchListMixin, - prefetch.PrefetchRetrieveMixin, - mixins.ListModelMixin, - mixins.RetrieveModelMixin, - mixins.UpdateModelMixin, - mixins.DestroyModelMixin, - mixins.CreateModelMixin, - viewsets.GenericViewSet, - dojo_mixins.DeletePreviewModelMixin): +class CredentialsMappingViewSet( + prefetch.PrefetchListMixin, + prefetch.PrefetchRetrieveMixin, + mixins.ListModelMixin, + mixins.RetrieveModelMixin, + mixins.UpdateModelMixin, + mixins.DestroyModelMixin, + mixins.CreateModelMixin, + viewsets.GenericViewSet, + dojo_mixins.DeletePreviewModelMixin, +): serializer_class = serializers.CredentialMappingSerializer queryset = Cred_Mapping.objects.none() filter_backends = (DjangoFilterBackend,) filterset_class = ApiCredentialsFilter - swagger_schema = prefetch.get_prefetch_schema(["credential_mappings_list", "credential_mappings_read"], serializers.CredentialMappingSerializer).to_schema() - permission_classes = (IsAuthenticated, permissions.UserHasCredentialPermission) + swagger_schema = prefetch.get_prefetch_schema( + ["credential_mappings_list", "credential_mappings_read"], + serializers.CredentialMappingSerializer, + ).to_schema() + permission_classes = ( + IsAuthenticated, + permissions.UserHasCredentialPermission, + ) def get_queryset(self): return get_authorized_cred_mappings(Permissions.Credential_View) # Authorization: configuration -class FindingTemplatesViewSet(mixins.ListModelMixin, - mixins.RetrieveModelMixin, - mixins.UpdateModelMixin, - mixins.CreateModelMixin, - mixins.DestroyModelMixin, - viewsets.GenericViewSet, - dojo_mixins.DeletePreviewModelMixin): +class FindingTemplatesViewSet( + mixins.ListModelMixin, + mixins.RetrieveModelMixin, + mixins.UpdateModelMixin, + mixins.CreateModelMixin, + mixins.DestroyModelMixin, + viewsets.GenericViewSet, + dojo_mixins.DeletePreviewModelMixin, +): serializer_class = serializers.FindingTemplateSerializer queryset = Finding_Template.objects.all() filter_backends = (DjangoFilterBackend,) filterset_class = ApiTemplateFindingFilter - permission_classes = (permissions.UserHasConfigurationPermissionStaff, ) + permission_classes = (permissions.UserHasConfigurationPermissionStaff,) # Authorization: object-based @extend_schema_view( - list=extend_schema(parameters=[ - OpenApiParameter("related_fields", OpenApiTypes.BOOL, OpenApiParameter.QUERY, required=False, - description="Expand finding external relations (engagement, environment, product, \ - product_type, test, test_type)"), - OpenApiParameter("prefetch", OpenApiTypes.STR, OpenApiParameter.QUERY, required=False, - description="List of fields for which to prefetch model instances and add those to the response"), - ], + list=extend_schema( + parameters=[ + OpenApiParameter( + "related_fields", + OpenApiTypes.BOOL, + OpenApiParameter.QUERY, + required=False, + description="Expand finding external relations (engagement, environment, product, \ + product_type, test, test_type)", + ), + OpenApiParameter( + "prefetch", + OpenApiTypes.STR, + OpenApiParameter.QUERY, + required=False, + description="List of fields for which to prefetch model instances and add those to the response", + ), + ], + ), + retrieve=extend_schema( + parameters=[ + OpenApiParameter( + "related_fields", + OpenApiTypes.BOOL, + OpenApiParameter.QUERY, + required=False, + description="Expand finding external relations (engagement, environment, product, \ + product_type, test, test_type)", + ), + OpenApiParameter( + "prefetch", + OpenApiTypes.STR, + OpenApiParameter.QUERY, + required=False, + description="List of fields for which to prefetch model instances and add those to the response", + ), + ], ), - retrieve=extend_schema(parameters=[ - OpenApiParameter("related_fields", OpenApiTypes.BOOL, OpenApiParameter.QUERY, required=False, - description="Expand finding external relations (engagement, environment, product, \ - product_type, test, test_type)"), - OpenApiParameter("prefetch", OpenApiTypes.STR, OpenApiParameter.QUERY, required=False, - description="List of fields for which to prefetch model instances and add those to the response"), - ], - ) ) -class FindingViewSet(prefetch.PrefetchListMixin, - prefetch.PrefetchRetrieveMixin, - mixins.UpdateModelMixin, - mixins.DestroyModelMixin, - mixins.CreateModelMixin, - ra_api.AcceptedFindingsMixin, - viewsets.GenericViewSet, - dojo_mixins.DeletePreviewModelMixin): +class FindingViewSet( + prefetch.PrefetchListMixin, + prefetch.PrefetchRetrieveMixin, + mixins.UpdateModelMixin, + mixins.DestroyModelMixin, + mixins.CreateModelMixin, + ra_api.AcceptedFindingsMixin, + viewsets.GenericViewSet, + dojo_mixins.DeletePreviewModelMixin, +): serializer_class = serializers.FindingSerializer queryset = Finding.objects.none() filter_backends = (DjangoFilterBackend,) filterset_class = ApiFindingFilter - permission_classes = (IsAuthenticated, permissions.UserHasFindingPermission) - - _related_field_parameters = [openapi.Parameter( - name="related_fields", - in_=openapi.IN_QUERY, - description="Expand finding external relations (engagement, environment, product, product_type, test, test_type)", - type=openapi.TYPE_BOOLEAN)] - swagger_schema = prefetch.get_prefetch_schema(["findings_list", "findings_read"], serializers.FindingSerializer). \ - composeWith(schema.ExtraParameters("findings_list", _related_field_parameters)). \ - composeWith(schema.ExtraParameters("findings_read", _related_field_parameters)). \ - to_schema() + permission_classes = ( + IsAuthenticated, + permissions.UserHasFindingPermission, + ) + + _related_field_parameters = [ + openapi.Parameter( + name="related_fields", + in_=openapi.IN_QUERY, + description="Expand finding external relations (engagement, environment, product, product_type, test, test_type)", + type=openapi.TYPE_BOOLEAN, + ) + ] + swagger_schema = ( + prefetch.get_prefetch_schema( + ["findings_list", "findings_read"], serializers.FindingSerializer + ) + .composeWith( + schema.ExtraParameters("findings_list", _related_field_parameters) + ) + .composeWith( + schema.ExtraParameters("findings_read", _related_field_parameters) + ) + .to_schema() + ) # Overriding mixins.UpdateModeMixin perform_update() method to grab push_to_jira # data and add that as a parameter to .save() def perform_update(self, serializer): # IF JIRA is enabled and this product has a JIRA configuration - push_to_jira = serializer.validated_data.get('push_to_jira') + push_to_jira = serializer.validated_data.get("push_to_jira") jira_project = jira_helper.get_jira_project(serializer.instance) - if get_system_setting('enable_jira') and jira_project: + if get_system_setting("enable_jira") and jira_project: push_to_jira = push_to_jira or jira_project.push_all_issues serializer.save(push_to_jira=push_to_jira) def get_queryset(self): - findings = get_authorized_findings(Permissions.Finding_View).prefetch_related('endpoints', - 'reviewers', - 'found_by', - 'notes', - 'risk_acceptance_set', - 'test', - 'tags', - 'jira_issue', - 'finding_group_set', - 'files', - 'burprawrequestresponse_set', - 'status_finding', - 'finding_meta', - 'test__test_type', - 'test__engagement', - 'test__environment', - 'test__engagement__product', - 'test__engagement__product__prod_type') + findings = get_authorized_findings( + Permissions.Finding_View + ).prefetch_related( + "endpoints", + "reviewers", + "found_by", + "notes", + "risk_acceptance_set", + "test", + "tags", + "jira_issue", + "finding_group_set", + "files", + "burprawrequestresponse_set", + "status_finding", + "finding_meta", + "test__test_type", + "test__engagement", + "test__environment", + "test__engagement__product", + "test__engagement__product__prod_type", + ) return findings.distinct() def get_serializer_class(self): - if self.request and self.request.method == 'POST': + if self.request and self.request.method == "POST": return serializers.FindingCreateSerializer else: return serializers.FindingSerializer @extend_schema( - methods=['POST'], + methods=["POST"], request=serializers.FindingCloseSerializer, - responses={status.HTTP_200_OK: serializers.FindingCloseSerializer} + responses={status.HTTP_200_OK: serializers.FindingCloseSerializer}, ) @swagger_auto_schema( - method='post', + method="post", request_body=serializers.FindingCloseSerializer, - responses={status.HTTP_200_OK: serializers.FindingCloseSerializer} + responses={status.HTTP_200_OK: serializers.FindingCloseSerializer}, ) @action(detail=True, methods=["post"]) def close(self, request, pk=None): finding = self.get_object() - if request.method == 'POST': - finding_close = serializers.FindingCloseSerializer(data=request.data) + if request.method == "POST": + finding_close = serializers.FindingCloseSerializer( + data=request.data + ) if finding_close.is_valid(): - finding.is_mitigated = finding_close.validated_data['is_mitigated'] + finding.is_mitigated = finding_close.validated_data[ + "is_mitigated" + ] if settings.EDITABLE_MITIGATED_DATA: - finding.mitigated = finding_close.validated_data['mitigated'] or timezone.now() + finding.mitigated = ( + finding_close.validated_data["mitigated"] + or timezone.now() + ) else: finding.mitigated = timezone.now() finding.mitigated_by = request.user finding.active = False - finding.false_p = finding_close.validated_data.get('false_p', False) - finding.duplicate = finding_close.validated_data.get('duplicate', False) - finding.out_of_scope = finding_close.validated_data.get('out_of_scope', False) + finding.false_p = finding_close.validated_data.get( + "false_p", False + ) + finding.duplicate = finding_close.validated_data.get( + "duplicate", False + ) + finding.out_of_scope = finding_close.validated_data.get( + "out_of_scope", False + ) endpoints_status = finding.status_finding.all() for e_status in endpoints_status: e_status.mitigated_by = request.user if settings.EDITABLE_MITIGATED_DATA: - e_status.mitigated_time = finding_close.validated_data["mitigated"] or timezone.now() + e_status.mitigated_time = ( + finding_close.validated_data["mitigated"] + or timezone.now() + ) else: e_status.mitigated_time = timezone.now() e_status.mitigated = True @@ -746,131 +1109,161 @@ def close(self, request, pk=None): e_status.save() finding.save() else: - return Response(finding_close.errors, - status=status.HTTP_400_BAD_REQUEST) + return Response( + finding_close.errors, status=status.HTTP_400_BAD_REQUEST + ) serialized_finding = serializers.FindingCloseSerializer(finding) return Response(serialized_finding.data) @extend_schema( - methods=['GET'], - responses={status.HTTP_200_OK: serializers.TagSerializer} + methods=["GET"], + responses={status.HTTP_200_OK: serializers.TagSerializer}, ) @extend_schema( - methods=['POST'], + methods=["POST"], request=serializers.TagSerializer, - responses={status.HTTP_201_CREATED: serializers.TagSerializer} + responses={status.HTTP_201_CREATED: serializers.TagSerializer}, ) @swagger_auto_schema( - method='get', - responses={status.HTTP_200_OK: serializers.TagSerializer} + method="get", responses={status.HTTP_200_OK: serializers.TagSerializer} ) @swagger_auto_schema( - method='post', + method="post", request_body=serializers.TagSerializer, - responses={status.HTTP_200_OK: serializers.TagSerializer} + responses={status.HTTP_200_OK: serializers.TagSerializer}, ) - @action(detail=True, methods=['get', 'post']) + @action(detail=True, methods=["get", "post"]) def tags(self, request, pk=None): finding = self.get_object() - if request.method == 'POST': + if request.method == "POST": new_tags = serializers.TagSerializer(data=request.data) if new_tags.is_valid(): all_tags = finding.tags - all_tags = serializers.TagSerializer({"tags": all_tags}).data['tags'] + all_tags = serializers.TagSerializer({"tags": all_tags}).data[ + "tags" + ] - for tag in tagulous.utils.parse_tags(new_tags.validated_data['tags']): + for tag in tagulous.utils.parse_tags( + new_tags.validated_data["tags"] + ): if tag not in all_tags: all_tags.append(tag) new_tags = tagulous.utils.render_tags(all_tags) finding.tags = new_tags finding.save() else: - return Response(new_tags.errors, - status=status.HTTP_400_BAD_REQUEST) + return Response( + new_tags.errors, status=status.HTTP_400_BAD_REQUEST + ) tags = finding.tags serialized_tags = serializers.TagSerializer({"tags": tags}) return Response(serialized_tags.data) @extend_schema( - methods=['GET'], - responses={status.HTTP_200_OK: serializers.BurpRawRequestResponseSerializer} + methods=["GET"], + responses={ + status.HTTP_200_OK: serializers.BurpRawRequestResponseSerializer + }, ) @extend_schema( - methods=['POST'], + methods=["POST"], request=serializers.BurpRawRequestResponseSerializer, - responses={status.HTTP_201_CREATED: serializers.BurpRawRequestResponseSerializer} + responses={ + status.HTTP_201_CREATED: serializers.BurpRawRequestResponseSerializer + }, ) @swagger_auto_schema( - method='get', - responses={status.HTTP_200_OK: serializers.BurpRawRequestResponseSerializer} + method="get", + responses={ + status.HTTP_200_OK: serializers.BurpRawRequestResponseSerializer + }, ) @swagger_auto_schema( - method='post', + method="post", request_body=serializers.BurpRawRequestResponseSerializer, - responses={status.HTTP_200_OK: serializers.BurpRawRequestResponseSerializer} + responses={ + status.HTTP_200_OK: serializers.BurpRawRequestResponseSerializer + }, ) - @action(detail=True, methods=['get', 'post']) + @action(detail=True, methods=["get", "post"]) def request_response(self, request, pk=None): finding = self.get_object() - if request.method == 'POST': - burps = serializers.BurpRawRequestResponseSerializer(data=request.data, many=isinstance(request.data, list)) + if request.method == "POST": + burps = serializers.BurpRawRequestResponseSerializer( + data=request.data, many=isinstance(request.data, list) + ) if burps.is_valid(): - for pair in burps.validated_data['req_resp']: + for pair in burps.validated_data["req_resp"]: burp_rr = BurpRawRequestResponse( - finding=finding, - burpRequestBase64=base64.b64encode(pair["request"].encode("utf-8")), - burpResponseBase64=base64.b64encode(pair["response"].encode("utf-8")), - ) + finding=finding, + burpRequestBase64=base64.b64encode( + pair["request"].encode("utf-8") + ), + burpResponseBase64=base64.b64encode( + pair["response"].encode("utf-8") + ), + ) burp_rr.clean() burp_rr.save() else: - return Response(burps.errors, - status=status.HTTP_400_BAD_REQUEST) + return Response( + burps.errors, status=status.HTTP_400_BAD_REQUEST + ) burp_req_resp = BurpRawRequestResponse.objects.filter(finding=finding) burp_list = [] for burp in burp_req_resp: request = burp.get_request() response = burp.get_response() - burp_list.append({'request': request, 'response': response}) - serialized_burps = serializers.BurpRawRequestResponseSerializer({'req_resp': burp_list}) + burp_list.append({"request": request, "response": response}) + serialized_burps = serializers.BurpRawRequestResponseSerializer( + {"req_resp": burp_list} + ) return Response(serialized_burps.data) @extend_schema( - methods=['GET'], - responses={status.HTTP_200_OK: serializers.FindingToNotesSerializer} + methods=["GET"], + responses={status.HTTP_200_OK: serializers.FindingToNotesSerializer}, ) @extend_schema( - methods=['POST'], + methods=["POST"], request=serializers.AddNewNoteOptionSerializer, - responses={status.HTTP_201_CREATED: serializers.NoteSerializer} + responses={status.HTTP_201_CREATED: serializers.NoteSerializer}, ) @swagger_auto_schema( - method='get', - responses={status.HTTP_200_OK: serializers.FindingToNotesSerializer} + method="get", + responses={status.HTTP_200_OK: serializers.FindingToNotesSerializer}, ) @swagger_auto_schema( - methods=['post'], + methods=["post"], request_body=serializers.AddNewNoteOptionSerializer, - responses={status.HTTP_201_CREATED: serializers.NoteSerializer} + responses={status.HTTP_201_CREATED: serializers.NoteSerializer}, ) @action(detail=True, methods=["get", "post"]) def notes(self, request, pk=None): finding = self.get_object() - if request.method == 'POST': - new_note = serializers.AddNewNoteOptionSerializer(data=request.data) + if request.method == "POST": + new_note = serializers.AddNewNoteOptionSerializer( + data=request.data + ) if new_note.is_valid(): - entry = new_note.validated_data['entry'] - private = new_note.validated_data.get('private', False) - note_type = new_note.validated_data.get('note_type', None) + entry = new_note.validated_data["entry"] + private = new_note.validated_data.get("private", False) + note_type = new_note.validated_data.get("note_type", None) else: - return Response(new_note.errors, - status=status.HTTP_400_BAD_REQUEST) + return Response( + new_note.errors, status=status.HTTP_400_BAD_REQUEST + ) author = request.user - note = Notes(entry=entry, author=author, private=private, note_type=note_type) + note = Notes( + entry=entry, + author=author, + private=private, + note_type=note_type, + ) note.save() finding.notes.add(note) @@ -879,176 +1272,233 @@ def notes(self, request, pk=None): elif finding.has_jira_group_issue: jira_helper.add_comment(finding.finding_group, note) - serialized_note = serializers.NoteSerializer({ - "author": author, "entry": entry, - "private": private - }) - result = serializers.FindingToNotesSerializer({ - "finding_id": finding, "notes": [serialized_note.data] - }) - return Response(serialized_note.data, - status=status.HTTP_201_CREATED) + serialized_note = serializers.NoteSerializer( + {"author": author, "entry": entry, "private": private} + ) + result = serializers.FindingToNotesSerializer( + {"finding_id": finding, "notes": [serialized_note.data]} + ) + return Response( + serialized_note.data, status=status.HTTP_201_CREATED + ) notes = finding.notes.all() - serialized_notes = serializers.FindingToNotesSerializer({ - "finding_id": finding, "notes": notes - }) - return Response(serialized_notes.data, - status=status.HTTP_200_OK) + serialized_notes = serializers.FindingToNotesSerializer( + {"finding_id": finding, "notes": notes} + ) + return Response(serialized_notes.data, status=status.HTTP_200_OK) @extend_schema( - methods=['GET'], - responses={status.HTTP_200_OK: serializers.FindingToFilesSerializer} + methods=["GET"], + responses={status.HTTP_200_OK: serializers.FindingToFilesSerializer}, ) @extend_schema( - methods=['POST'], + methods=["POST"], request=serializers.AddNewFileOptionSerializer, - responses={status.HTTP_201_CREATED: serializers.FileSerializer} + responses={status.HTTP_201_CREATED: serializers.FileSerializer}, ) @swagger_auto_schema( - method='get', - responses={status.HTTP_200_OK: serializers.FindingToFilesSerializer} + method="get", + responses={status.HTTP_200_OK: serializers.FindingToFilesSerializer}, ) @swagger_auto_schema( - method='post', + method="post", request_body=serializers.AddNewFileOptionSerializer, - responses={status.HTTP_201_CREATED: serializers.FileSerializer} + responses={status.HTTP_201_CREATED: serializers.FileSerializer}, + ) + @action( + detail=True, methods=["get", "post"], parser_classes=(MultiPartParser,) ) - @action(detail=True, methods=["get", "post"], parser_classes=(MultiPartParser,)) def files(self, request, pk=None): finding = self.get_object() - if request.method == 'POST': + if request.method == "POST": new_file = serializers.FileSerializer(data=request.data) if new_file.is_valid(): - title = new_file.validated_data['title'] - file = new_file.validated_data['file'] + title = new_file.validated_data["title"] + file = new_file.validated_data["file"] else: - return Response(new_file.errors, status=status.HTTP_400_BAD_REQUEST) + return Response( + new_file.errors, status=status.HTTP_400_BAD_REQUEST + ) file = FileUpload(title=title, file=file) file.save() finding.files.add(file) serialized_file = serializers.FileSerializer(file) - return Response(serialized_file.data, status=status.HTTP_201_CREATED) + return Response( + serialized_file.data, status=status.HTTP_201_CREATED + ) files = finding.files.all() - serialized_files = serializers.FindingToFilesSerializer({ - "finding_id": finding, "files": files - }) + serialized_files = serializers.FindingToFilesSerializer( + {"finding_id": finding, "files": files} + ) return Response(serialized_files.data, status=status.HTTP_200_OK) @extend_schema( - methods=['GET'], + methods=["GET"], responses={ status.HTTP_200_OK: serializers.RawFileSerializer, - } + }, ) @swagger_auto_schema( - method='get', + method="get", responses={ status.HTTP_200_OK: serializers.RawFileSerializer, - } + }, + ) + @action( + detail=True, + methods=["get"], + url_path=r"files/download/(?P\d+)", ) - @action(detail=True, methods=["get"], url_path=r'files/download/(?P\d+)') def download_file(self, request, file_id, pk=None): finding = self.get_object() # Get the file object file_object_qs = finding.files.filter(id=file_id) - file_object = file_object_qs.first() if len(file_object_qs) > 0 else None + file_object = ( + file_object_qs.first() if len(file_object_qs) > 0 else None + ) if file_object is None: - return Response({"error": "File ID not associated with Finding"}, status=status.HTTP_404_NOT_FOUND) + return Response( + {"error": "File ID not associated with Finding"}, + status=status.HTTP_404_NOT_FOUND, + ) # Get the path of the file in media root - file_path = f'{settings.MEDIA_ROOT}/{file_object.file.url.lstrip(settings.MEDIA_URL)}' + file_path = f"{settings.MEDIA_ROOT}/{file_object.file.url.lstrip(settings.MEDIA_URL)}" file_handle = open(file_path, "rb") # send file - response = FileResponse(file_handle, content_type=f'{mimetypes.guess_type(file_path)}', status=status.HTTP_200_OK) - response['Content-Length'] = file_object.file.size - response['Content-Disposition'] = f'attachment; filename="{file_object.file.name}"' + response = FileResponse( + file_handle, + content_type=f"{mimetypes.guess_type(file_path)}", + status=status.HTTP_200_OK, + ) + response["Content-Length"] = file_object.file.size + response[ + "Content-Disposition" + ] = f'attachment; filename="{file_object.file.name}"' return response @extend_schema( request=serializers.FindingNoteSerializer, - responses={status.HTTP_204_NO_CONTENT: ""} + responses={status.HTTP_204_NO_CONTENT: ""}, ) @swagger_auto_schema( request_body=serializers.FindingNoteSerializer, - responses={status.HTTP_204_NO_CONTENT: ""} + responses={status.HTTP_204_NO_CONTENT: ""}, ) @action(detail=True, methods=["patch"]) def remove_note(self, request, pk=None): """Remove Note From Finding Note""" finding = self.get_object() notes = finding.notes.all() - if request.data['note_id']: - note = get_object_or_404(Notes.objects, id=request.data['note_id']) + if request.data["note_id"]: + note = get_object_or_404(Notes.objects, id=request.data["note_id"]) if note not in notes: - return Response({"error": "Selected Note is not assigned to this Finding"}, - status=status.HTTP_400_BAD_REQUEST) + return Response( + {"error": "Selected Note is not assigned to this Finding"}, + status=status.HTTP_400_BAD_REQUEST, + ) else: - return Response({"error": "('note_id') parameter missing"}, - status=status.HTTP_400_BAD_REQUEST) - if note.author.username == request.user.username or request.user.is_superuser: + return Response( + {"error": "('note_id') parameter missing"}, + status=status.HTTP_400_BAD_REQUEST, + ) + if ( + note.author.username == request.user.username + or request.user.is_superuser + ): finding.notes.remove(note) note.delete() else: - return Response({"error": "Delete Failed, You are not the Note's author"}, - status=status.HTTP_400_BAD_REQUEST) + return Response( + {"error": "Delete Failed, You are not the Note's author"}, + status=status.HTTP_400_BAD_REQUEST, + ) - return Response({"Success": "Selected Note has been Removed successfully"}, - status=status.HTTP_204_NO_CONTENT) + return Response( + {"Success": "Selected Note has been Removed successfully"}, + status=status.HTTP_204_NO_CONTENT, + ) @extend_schema( - methods=['PUT', 'PATCH'], + methods=["PUT", "PATCH"], request=serializers.TagSerializer, responses={status.HTTP_204_NO_CONTENT: ""}, ) @swagger_auto_schema( - methods=['put', 'patch'], + methods=["put", "patch"], request_body=serializers.TagSerializer, responses={status.HTTP_204_NO_CONTENT: ""}, ) @action(detail=True, methods=["put", "patch"]) def remove_tags(self, request, pk=None): - """ Remove Tag(s) from finding list of tags """ + """Remove Tag(s) from finding list of tags""" finding = self.get_object() delete_tags = serializers.TagSerializer(data=request.data) if delete_tags.is_valid(): all_tags = finding.tags - all_tags = serializers.TagSerializer({"tags": all_tags}).data['tags'] + all_tags = serializers.TagSerializer({"tags": all_tags}).data[ + "tags" + ] # serializer turns it into a string, but we need a list - del_tags = tagulous.utils.parse_tags(delete_tags.validated_data['tags']) + del_tags = tagulous.utils.parse_tags( + delete_tags.validated_data["tags"] + ) if len(del_tags) < 1: - return Response({"error": "Empty Tag List Not Allowed"}, - status=status.HTTP_400_BAD_REQUEST) + return Response( + {"error": "Empty Tag List Not Allowed"}, + status=status.HTTP_400_BAD_REQUEST, + ) for tag in del_tags: if tag not in all_tags: - return Response({"error": "'{}' is not a valid tag in list".format(tag)}, - status=status.HTTP_400_BAD_REQUEST) + return Response( + { + "error": "'{}' is not a valid tag in list".format( + tag + ) + }, + status=status.HTTP_400_BAD_REQUEST, + ) all_tags.remove(tag) new_tags = tagulous.utils.render_tags(all_tags) finding.tags = new_tags finding.save() - return Response({"success": "Tag(s) Removed"}, - status=status.HTTP_204_NO_CONTENT) + return Response( + {"success": "Tag(s) Removed"}, + status=status.HTTP_204_NO_CONTENT, + ) else: - return Response(delete_tags.errors, - status=status.HTTP_400_BAD_REQUEST) + return Response( + delete_tags.errors, status=status.HTTP_400_BAD_REQUEST + ) @extend_schema( - responses={status.HTTP_200_OK: serializers.FindingSerializer(many=True)} + responses={ + status.HTTP_200_OK: serializers.FindingSerializer(many=True) + } ) @swagger_auto_schema( - responses={status.HTTP_200_OK: serializers.FindingSerializer(many=True)} + responses={ + status.HTTP_200_OK: serializers.FindingSerializer(many=True) + } + ) + @action( + detail=True, + methods=["get"], + url_path=r"duplicate", + filter_backends=[], + pagination_class=None, ) - @action(detail=True, methods=['get'], url_path=r'duplicate', filter_backends=[], pagination_class=None) def get_duplicate_cluster(self, request, pk): finding = self.get_object() result = duplicate_cluster(request, finding) - serializer = serializers.FindingSerializer(instance=result, many=True, - context={"request": request}) + serializer = serializers.FindingSerializer( + instance=result, many=True, context={"request": request} + ) return Response(serializer.data, status=status.HTTP_200_OK) @extend_schema( @@ -1059,10 +1509,11 @@ def get_duplicate_cluster(self, request, pk): request_body=no_body, responses={status.HTTP_204_NO_CONTENT: ""}, ) - @action(detail=True, methods=['post'], url_path=r'duplicate/reset') + @action(detail=True, methods=["post"], url_path=r"duplicate/reset") def reset_finding_duplicate_status(self, request, pk): - finding = self.get_object() - checked_duplicate_id = reset_finding_duplicate_status_internal(request.user, pk) + checked_duplicate_id = reset_finding_duplicate_status_internal( + request.user, pk + ) if checked_duplicate_id is None: return Response(status=status.HTTP_400_BAD_REQUEST) return Response(status=status.HTTP_204_NO_CONTENT) @@ -1070,17 +1521,19 @@ def reset_finding_duplicate_status(self, request, pk): @extend_schema( request=OpenApiTypes.NONE, parameters=[ - OpenApiParameter("new_fid", OpenApiTypes.INT, OpenApiParameter.PATH) + OpenApiParameter( + "new_fid", OpenApiTypes.INT, OpenApiParameter.PATH + ) ], responses={status.HTTP_204_NO_CONTENT: ""}, ) @swagger_auto_schema( - responses={status.HTTP_204_NO_CONTENT: ""}, - request_body=no_body + responses={status.HTTP_204_NO_CONTENT: ""}, request_body=no_body + ) + @action( + detail=True, methods=["post"], url_path=r"original/(?P\d+)" ) - @action(detail=True, methods=['post'], url_path=r'original/(?P\d+)') def set_finding_as_original(self, request, pk, new_fid): - finding = self.get_object() success = set_finding_as_original_internal(request.user, pk, new_fid) if not success: return Response(status=status.HTTP_400_BAD_REQUEST) @@ -1094,20 +1547,33 @@ def set_finding_as_original(self, request, pk, new_fid): request_body=serializers.ReportGenerateOptionSerializer, responses={status.HTTP_200_OK: serializers.ReportGenerateSerializer}, ) - @action(detail=False, methods=['post'], permission_classes=[IsAuthenticated]) + @action( + detail=False, methods=["post"], permission_classes=[IsAuthenticated] + ) def generate_report(self, request): findings = self.get_queryset() options = {} # prepare post data - report_options = serializers.ReportGenerateOptionSerializer(data=request.data) + report_options = serializers.ReportGenerateOptionSerializer( + data=request.data + ) if report_options.is_valid(): - options['include_finding_notes'] = report_options.validated_data['include_finding_notes'] - options['include_finding_images'] = report_options.validated_data['include_finding_images'] - options['include_executive_summary'] = report_options.validated_data['include_executive_summary'] - options['include_table_of_contents'] = report_options.validated_data['include_table_of_contents'] + options["include_finding_notes"] = report_options.validated_data[ + "include_finding_notes" + ] + options["include_finding_images"] = report_options.validated_data[ + "include_finding_images" + ] + options[ + "include_executive_summary" + ] = report_options.validated_data["include_executive_summary"] + options[ + "include_table_of_contents" + ] = report_options.validated_data["include_table_of_contents"] else: - return Response(report_options.errors, - status=status.HTTP_400_BAD_REQUEST) + return Response( + report_options.errors, status=status.HTTP_400_BAD_REQUEST + ) data = report_generate(request, findings, options) report = serializers.ReportGenerateSerializer(data) @@ -1115,27 +1581,34 @@ def generate_report(self, request): def _get_metadata(self, request, finding): metadata = DojoMeta.objects.filter(finding=finding) - serializer = serializers.FindingMetaSerializer(instance=metadata, many=True) + serializer = serializers.FindingMetaSerializer( + instance=metadata, many=True + ) return Response(serializer.data, status=status.HTTP_200_OK) def _edit_metadata(self, request, finding): metadata_name = request.query_params.get("name", None) if metadata_name is None: - return Response("Metadata name is required", status=status.HTTP_400_BAD_REQUEST) + return Response( + "Metadata name is required", status=status.HTTP_400_BAD_REQUEST + ) try: DojoMeta.objects.update_or_create( - name=metadata_name, finding=finding, + name=metadata_name, + finding=finding, defaults={ "name": request.data.get("name"), - "value": request.data.get("value") - } + "value": request.data.get("value"), + }, ) return Response(data=request.data, status=status.HTTP_200_OK) except IntegrityError: - return Response("Update failed because the new name already exists", - status=status.HTTP_400_BAD_REQUEST) + return Response( + "Update failed because the new name already exists", + status=status.HTTP_400_BAD_REQUEST, + ) def _add_metadata(self, request, finding): metadata_data = serializers.FindingMetaSerializer(data=request.data) @@ -1149,41 +1622,63 @@ def _add_metadata(self, request, finding): metadata.validate_unique() metadata.save() except ValidationError: - return Response("Create failed probably because the name of the metadata already exists", status=status.HTTP_400_BAD_REQUEST) + return Response( + "Create failed probably because the name of the metadata already exists", + status=status.HTTP_400_BAD_REQUEST, + ) return Response(data=metadata_data.data, status=status.HTTP_200_OK) else: - return Response(metadata_data.errors, - status=status.HTTP_400_BAD_REQUEST) + return Response( + metadata_data.errors, status=status.HTTP_400_BAD_REQUEST + ) def _remove_metadata(self, request, finding): name = request.query_params.get("name", None) if name is None: - return Response("A metadata name must be provided", status=status.HTTP_400_BAD_REQUEST) + return Response( + "A metadata name must be provided", + status=status.HTTP_400_BAD_REQUEST, + ) - metadata = get_object_or_404(DojoMeta.objects, finding=finding, name=name) + metadata = get_object_or_404( + DojoMeta.objects, finding=finding, name=name + ) metadata.delete() return Response("Metadata deleted", status=status.HTTP_200_OK) @extend_schema( - methods=['GET'], + methods=["GET"], responses={ status.HTTP_200_OK: serializers.FindingMetaSerializer(many=True), - status.HTTP_404_NOT_FOUND: OpenApiResponse(description="Returned if finding does not exist"), + status.HTTP_404_NOT_FOUND: OpenApiResponse( + description="Returned if finding does not exist" + ), }, ) @extend_schema( - methods=['DELETE'], + methods=["DELETE"], parameters=[ - OpenApiParameter("name", OpenApiTypes.INT, OpenApiParameter.QUERY, required=True, - description="name of the metadata to retrieve. If name is empty, return all the \ - metadata associated with the finding") + OpenApiParameter( + "name", + OpenApiTypes.INT, + OpenApiParameter.QUERY, + required=True, + description="name of the metadata to retrieve. If name is empty, return all the \ + metadata associated with the finding", + ) ], responses={ - status.HTTP_200_OK: OpenApiResponse(description="Returned if the metadata was correctly deleted"), - status.HTTP_404_NOT_FOUND: OpenApiResponse(description="Returned if finding does not exist"), - status.HTTP_400_BAD_REQUEST: OpenApiResponse(description="Returned if there was a problem with the metadata information"), + status.HTTP_200_OK: OpenApiResponse( + description="Returned if the metadata was correctly deleted" + ), + status.HTTP_404_NOT_FOUND: OpenApiResponse( + description="Returned if finding does not exist" + ), + status.HTTP_400_BAD_REQUEST: OpenApiResponse( + description="Returned if there was a problem with the metadata information" + ), }, # manual_parameters=[openapi.Parameter( # name="name", in_=openapi.IN_QUERY, type=openapi.TYPE_STRING, @@ -1191,68 +1686,92 @@ def _remove_metadata(self, request, finding): # metadata associated with the finding")] ) @extend_schema( - methods=['PUT'], + methods=["PUT"], request=serializers.FindingMetaSerializer, responses={ status.HTTP_200_OK: serializers.FindingMetaSerializer, - status.HTTP_404_NOT_FOUND: OpenApiResponse(description="Returned if finding does not exist"), - status.HTTP_400_BAD_REQUEST: OpenApiResponse(description="Returned if there was a problem with the metadata information"), + status.HTTP_404_NOT_FOUND: OpenApiResponse( + description="Returned if finding does not exist" + ), + status.HTTP_400_BAD_REQUEST: OpenApiResponse( + description="Returned if there was a problem with the metadata information" + ), }, # manual_parameters=[openapi.Parameter( # name="name", in_=openapi.IN_QUERY, required=True, type=openapi.TYPE_STRING, # description="name of the metadata to edit")], ) @extend_schema( - methods=['POST'], + methods=["POST"], request=serializers.FindingMetaSerializer, responses={ status.HTTP_200_OK: serializers.FindingMetaSerializer, - status.HTTP_404_NOT_FOUND: OpenApiResponse(description="Returned if finding does not exist"), - status.HTTP_400_BAD_REQUEST: OpenApiResponse(description="Returned if there was a problem with the metadata information"), + status.HTTP_404_NOT_FOUND: OpenApiResponse( + description="Returned if finding does not exist" + ), + status.HTTP_400_BAD_REQUEST: OpenApiResponse( + description="Returned if there was a problem with the metadata information" + ), }, ) @swagger_auto_schema( responses={ status.HTTP_200_OK: serializers.FindingMetaSerializer(many=True), - status.HTTP_404_NOT_FOUND: "Returned if finding does not exist" + status.HTTP_404_NOT_FOUND: "Returned if finding does not exist", }, - methods=['get'] + methods=["get"], ) @swagger_auto_schema( responses={ status.HTTP_200_OK: "Returned if the metadata was correctly deleted", status.HTTP_404_NOT_FOUND: "Returned if finding does not exist", - status.HTTP_400_BAD_REQUEST: "Returned if there was a problem with the metadata information" + status.HTTP_400_BAD_REQUEST: "Returned if there was a problem with the metadata information", }, - methods=['delete'], - manual_parameters=[openapi.Parameter( - name="name", in_=openapi.IN_QUERY, required=True, type=openapi.TYPE_STRING, - description="name of the metadata to retrieve. If name is empty, return all the \ - metadata associated with the finding")] + methods=["delete"], + manual_parameters=[ + openapi.Parameter( + name="name", + in_=openapi.IN_QUERY, + required=True, + type=openapi.TYPE_STRING, + description="name of the metadata to retrieve. If name is empty, return all the \ + metadata associated with the finding", + ) + ], ) @swagger_auto_schema( responses={ status.HTTP_200_OK: serializers.FindingMetaSerializer, status.HTTP_404_NOT_FOUND: "Returned if finding does not exist", - status.HTTP_400_BAD_REQUEST: "Returned if there was a problem with the metadata information" + status.HTTP_400_BAD_REQUEST: "Returned if there was a problem with the metadata information", }, - methods=['put'], - manual_parameters=[openapi.Parameter( - name="name", in_=openapi.IN_QUERY, required=True, type=openapi.TYPE_STRING, - description="name of the metadata to edit")], - request_body=serializers.FindingMetaSerializer + methods=["put"], + manual_parameters=[ + openapi.Parameter( + name="name", + in_=openapi.IN_QUERY, + required=True, + type=openapi.TYPE_STRING, + description="name of the metadata to edit", + ) + ], + request_body=serializers.FindingMetaSerializer, ) @swagger_auto_schema( responses={ status.HTTP_200_OK: serializers.FindingMetaSerializer, status.HTTP_404_NOT_FOUND: "Returned if finding does not exist", - status.HTTP_400_BAD_REQUEST: "Returned if there was a problem with the metadata information" + status.HTTP_400_BAD_REQUEST: "Returned if there was a problem with the metadata information", }, - methods=['post'], - request_body=serializers.FindingMetaSerializer + methods=["post"], + request_body=serializers.FindingMetaSerializer, + ) + @action( + detail=True, + methods=["post", "put", "delete", "get"], + filter_backends=[], + pagination_class=None, ) - @action(detail=True, methods=["post", "put", "delete", "get"], - filter_backends=[], pagination_class=None) def metadata(self, request, pk=None): finding = self.get_object() @@ -1267,148 +1786,238 @@ def metadata(self, request, pk=None): elif request.method == "DELETE": return self._remove_metadata(request, finding) - return Response({"error", "unsupported method"}, status=status.HTTP_400_BAD_REQUEST) + return Response( + {"error", "unsupported method"}, status=status.HTTP_400_BAD_REQUEST + ) # Authorization: configuration -class JiraInstanceViewSet(mixins.ListModelMixin, - mixins.RetrieveModelMixin, - mixins.DestroyModelMixin, - mixins.UpdateModelMixin, - mixins.CreateModelMixin, - viewsets.GenericViewSet, - dojo_mixins.DeletePreviewModelMixin): +class JiraInstanceViewSet( + mixins.ListModelMixin, + mixins.RetrieveModelMixin, + mixins.DestroyModelMixin, + mixins.UpdateModelMixin, + mixins.CreateModelMixin, + viewsets.GenericViewSet, + dojo_mixins.DeletePreviewModelMixin, +): serializer_class = serializers.JIRAInstanceSerializer queryset = JIRA_Instance.objects.all() filter_backends = (DjangoFilterBackend,) - filterset_fields = ['id', 'url'] - permission_classes = (permissions.UserHasConfigurationPermissionSuperuser, ) + filterset_fields = ["id", "url"] + permission_classes = (permissions.UserHasConfigurationPermissionSuperuser,) # Authorization: object-based -class JiraIssuesViewSet(prefetch.PrefetchListMixin, - prefetch.PrefetchRetrieveMixin, - mixins.ListModelMixin, - mixins.RetrieveModelMixin, - mixins.DestroyModelMixin, - mixins.CreateModelMixin, - mixins.UpdateModelMixin, - viewsets.GenericViewSet, - dojo_mixins.DeletePreviewModelMixin): +class JiraIssuesViewSet( + prefetch.PrefetchListMixin, + prefetch.PrefetchRetrieveMixin, + mixins.ListModelMixin, + mixins.RetrieveModelMixin, + mixins.DestroyModelMixin, + mixins.CreateModelMixin, + mixins.UpdateModelMixin, + viewsets.GenericViewSet, + dojo_mixins.DeletePreviewModelMixin, +): serializer_class = serializers.JIRAIssueSerializer queryset = JIRA_Issue.objects.none() filter_backends = (DjangoFilterBackend,) - filterset_fields = ['id', 'jira_id', 'jira_key', 'finding', 'engagement', 'finding_group'] - swagger_schema = prefetch.get_prefetch_schema(["jira_finding_mappings_list", "jira_finding_mappings_read"], serializers.JIRAIssueSerializer).to_schema() - permission_classes = (IsAuthenticated, permissions.UserHasJiraIssuePermission) + filterset_fields = [ + "id", + "jira_id", + "jira_key", + "finding", + "engagement", + "finding_group", + ] + swagger_schema = prefetch.get_prefetch_schema( + ["jira_finding_mappings_list", "jira_finding_mappings_read"], + serializers.JIRAIssueSerializer, + ).to_schema() + permission_classes = ( + IsAuthenticated, + permissions.UserHasJiraIssuePermission, + ) def get_queryset(self): return get_authorized_jira_issues(Permissions.Product_View) # Authorization: object-based -class JiraProjectViewSet(prefetch.PrefetchListMixin, - prefetch.PrefetchRetrieveMixin, - mixins.ListModelMixin, - mixins.RetrieveModelMixin, - mixins.DestroyModelMixin, - mixins.UpdateModelMixin, - mixins.CreateModelMixin, - viewsets.GenericViewSet, - dojo_mixins.DeletePreviewModelMixin): +class JiraProjectViewSet( + prefetch.PrefetchListMixin, + prefetch.PrefetchRetrieveMixin, + mixins.ListModelMixin, + mixins.RetrieveModelMixin, + mixins.DestroyModelMixin, + mixins.UpdateModelMixin, + mixins.CreateModelMixin, + viewsets.GenericViewSet, + dojo_mixins.DeletePreviewModelMixin, +): serializer_class = serializers.JIRAProjectSerializer queryset = JIRA_Project.objects.none() filter_backends = (DjangoFilterBackend,) filterset_fields = [ - 'id', 'jira_instance', 'product', 'engagement', 'component', 'project_key', - 'push_all_issues', 'enable_engagement_epic_mapping', 'push_notes'] - swagger_schema = prefetch.get_prefetch_schema(["jira_projects_list", "jira_projects_read"], serializers.JIRAProjectSerializer).to_schema() - permission_classes = (IsAuthenticated, permissions.UserHasJiraProductPermission) + "id", + "jira_instance", + "product", + "engagement", + "component", + "project_key", + "push_all_issues", + "enable_engagement_epic_mapping", + "push_notes", + ] + swagger_schema = prefetch.get_prefetch_schema( + ["jira_projects_list", "jira_projects_read"], + serializers.JIRAProjectSerializer, + ).to_schema() + permission_classes = ( + IsAuthenticated, + permissions.UserHasJiraProductPermission, + ) def get_queryset(self): return get_authorized_jira_projects(Permissions.Product_View) # Authorization: superuser -class SonarqubeIssueViewSet(mixins.ListModelMixin, - mixins.RetrieveModelMixin, - mixins.DestroyModelMixin, - mixins.UpdateModelMixin, - mixins.CreateModelMixin, - viewsets.GenericViewSet, - dojo_mixins.DeletePreviewModelMixin): +class SonarqubeIssueViewSet( + mixins.ListModelMixin, + mixins.RetrieveModelMixin, + mixins.DestroyModelMixin, + mixins.UpdateModelMixin, + mixins.CreateModelMixin, + viewsets.GenericViewSet, + dojo_mixins.DeletePreviewModelMixin, +): serializer_class = serializers.SonarqubeIssueSerializer queryset = Sonarqube_Issue.objects.all() filter_backends = (DjangoFilterBackend,) - filterset_fields = ['id', 'key', 'status', 'type'] + filterset_fields = ["id", "key", "status", "type"] permission_classes = (permissions.IsSuperUser, DjangoModelPermissions) # Authorization: superuser -class SonarqubeIssueTransitionViewSet(mixins.ListModelMixin, - mixins.RetrieveModelMixin, - mixins.DestroyModelMixin, - mixins.CreateModelMixin, - mixins.UpdateModelMixin, - viewsets.GenericViewSet, - dojo_mixins.DeletePreviewModelMixin): +class SonarqubeIssueTransitionViewSet( + mixins.ListModelMixin, + mixins.RetrieveModelMixin, + mixins.DestroyModelMixin, + mixins.CreateModelMixin, + mixins.UpdateModelMixin, + viewsets.GenericViewSet, + dojo_mixins.DeletePreviewModelMixin, +): serializer_class = serializers.SonarqubeIssueTransitionSerializer queryset = Sonarqube_Issue_Transition.objects.all() filter_backends = (DjangoFilterBackend,) - filterset_fields = ['id', 'sonarqube_issue', 'finding_status', 'sonarqube_status', 'transitions'] + filterset_fields = [ + "id", + "sonarqube_issue", + "finding_status", + "sonarqube_status", + "transitions", + ] permission_classes = (permissions.IsSuperUser, DjangoModelPermissions) # Authorization: object-based -class ProductAPIScanConfigurationViewSet(prefetch.PrefetchListMixin, - prefetch.PrefetchRetrieveMixin, - mixins.ListModelMixin, - mixins.RetrieveModelMixin, - mixins.DestroyModelMixin, - mixins.UpdateModelMixin, - mixins.CreateModelMixin, - viewsets.GenericViewSet, - dojo_mixins.DeletePreviewModelMixin): +class ProductAPIScanConfigurationViewSet( + prefetch.PrefetchListMixin, + prefetch.PrefetchRetrieveMixin, + mixins.ListModelMixin, + mixins.RetrieveModelMixin, + mixins.DestroyModelMixin, + mixins.UpdateModelMixin, + mixins.CreateModelMixin, + viewsets.GenericViewSet, + dojo_mixins.DeletePreviewModelMixin, +): serializer_class = serializers.ProductAPIScanConfigurationSerializer queryset = Product_API_Scan_Configuration.objects.none() filter_backends = (DjangoFilterBackend,) - filterset_fields = ['id', 'product', 'tool_configuration', 'service_key_1', 'service_key_2', 'service_key_3'] - swagger_schema = prefetch.get_prefetch_schema(["product_api_scan_configurations_list", "product_api_scan_configurations_read"], serializers.ProductAPIScanConfigurationSerializer).to_schema() - permission_classes = (IsAuthenticated, permissions.UserHasProductAPIScanConfigurationPermission) + filterset_fields = [ + "id", + "product", + "tool_configuration", + "service_key_1", + "service_key_2", + "service_key_3", + ] + swagger_schema = prefetch.get_prefetch_schema( + [ + "product_api_scan_configurations_list", + "product_api_scan_configurations_read", + ], + serializers.ProductAPIScanConfigurationSerializer, + ).to_schema() + permission_classes = ( + IsAuthenticated, + permissions.UserHasProductAPIScanConfigurationPermission, + ) def get_queryset(self): - return get_authorized_product_api_scan_configurations(Permissions.Product_API_Scan_Configuration_View) + return get_authorized_product_api_scan_configurations( + Permissions.Product_API_Scan_Configuration_View + ) # Authorization: object-based @extend_schema_view( - list=extend_schema(parameters=[ - OpenApiParameter("prefetch", OpenApiTypes.STR, OpenApiParameter.QUERY, required=False, - description="List of fields for which to prefetch model instances and add those to the response"), - ], + list=extend_schema( + parameters=[ + OpenApiParameter( + "prefetch", + OpenApiTypes.STR, + OpenApiParameter.QUERY, + required=False, + description="List of fields for which to prefetch model instances and add those to the response", + ), + ], + ), + retrieve=extend_schema( + parameters=[ + OpenApiParameter( + "prefetch", + OpenApiTypes.STR, + OpenApiParameter.QUERY, + required=False, + description="List of fields for which to prefetch model instances and add those to the response", + ), + ], ), - retrieve=extend_schema(parameters=[ - OpenApiParameter("prefetch", OpenApiTypes.STR, OpenApiParameter.QUERY, required=False, - description="List of fields for which to prefetch model instances and add those to the response"), - ], - ) ) -class DojoMetaViewSet(prefetch.PrefetchListMixin, - prefetch.PrefetchRetrieveMixin, - mixins.ListModelMixin, - mixins.RetrieveModelMixin, - mixins.DestroyModelMixin, - mixins.CreateModelMixin, - mixins.UpdateModelMixin, - viewsets.GenericViewSet, - dojo_mixins.DeletePreviewModelMixin): +class DojoMetaViewSet( + prefetch.PrefetchListMixin, + prefetch.PrefetchRetrieveMixin, + mixins.ListModelMixin, + mixins.RetrieveModelMixin, + mixins.DestroyModelMixin, + mixins.CreateModelMixin, + mixins.UpdateModelMixin, + viewsets.GenericViewSet, + dojo_mixins.DeletePreviewModelMixin, +): serializer_class = serializers.MetaSerializer queryset = DojoMeta.objects.none() filter_backends = (DjangoFilterBackend,) - filterset_fields = ['id', 'product', 'endpoint', 'finding', 'name', 'value'] - permission_classes = (IsAuthenticated, permissions.UserHasDojoMetaPermission) - swagger_schema = prefetch.get_prefetch_schema(["metadata_list", "metadata_read"], - serializers.MetaSerializer).to_schema() + filterset_fields = [ + "id", + "product", + "endpoint", + "finding", + "name", + "value", + ] + permission_classes = ( + IsAuthenticated, + permissions.UserHasDojoMetaPermission, + ) + swagger_schema = prefetch.get_prefetch_schema( + ["metadata_list", "metadata_read"], serializers.MetaSerializer + ).to_schema() def get_queryset(self): return get_authorized_dojo_meta(Permissions.Product_View) @@ -1418,10 +2027,16 @@ def get_queryset(self): class DjangoFilterDescriptionInspector(CoreAPICompatInspector): def get_filter_parameters(self, filter_backend): if isinstance(filter_backend, DjangoFilterBackend): - result = super(DjangoFilterDescriptionInspector, self).get_filter_parameters(filter_backend) + result = super( + DjangoFilterDescriptionInspector, self + ).get_filter_parameters(filter_backend) for param in result: - if not param.get('description', ''): - param.description = "Filter the returned list by {field_name}".format(field_name=param.name) + if not param.get("description", ""): + param.description = ( + "Filter the returned list by {field_name}".format( + field_name=param.name + ) + ) return result @@ -1429,36 +2044,57 @@ def get_filter_parameters(self, filter_backend): @extend_schema_view( - list=extend_schema(parameters=[ - OpenApiParameter("prefetch", OpenApiTypes.STR, OpenApiParameter.QUERY, required=False, - description="List of fields for which to prefetch model instances and add those to the response"), - ], + list=extend_schema( + parameters=[ + OpenApiParameter( + "prefetch", + OpenApiTypes.STR, + OpenApiParameter.QUERY, + required=False, + description="List of fields for which to prefetch model instances and add those to the response", + ), + ], + ), + retrieve=extend_schema( + parameters=[ + OpenApiParameter( + "prefetch", + OpenApiTypes.STR, + OpenApiParameter.QUERY, + required=False, + description="List of fields for which to prefetch model instances and add those to the response", + ), + ], + ), +) +@method_decorator( + name="list", + decorator=swagger_auto_schema( + filter_inspectors=[DjangoFilterDescriptionInspector] ), - retrieve=extend_schema(parameters=[ - OpenApiParameter("prefetch", OpenApiTypes.STR, OpenApiParameter.QUERY, required=False, - description="List of fields for which to prefetch model instances and add those to the response"), - ], - ) ) -@method_decorator(name='list', decorator=swagger_auto_schema( - filter_inspectors=[DjangoFilterDescriptionInspector] -)) -class ProductViewSet(prefetch.PrefetchListMixin, - prefetch.PrefetchRetrieveMixin, - mixins.CreateModelMixin, - mixins.DestroyModelMixin, - mixins.UpdateModelMixin, - viewsets.GenericViewSet, - dojo_mixins.DeletePreviewModelMixin): +class ProductViewSet( + prefetch.PrefetchListMixin, + prefetch.PrefetchRetrieveMixin, + mixins.CreateModelMixin, + mixins.DestroyModelMixin, + mixins.UpdateModelMixin, + viewsets.GenericViewSet, + dojo_mixins.DeletePreviewModelMixin, +): serializer_class = serializers.ProductSerializer # TODO: prefetch queryset = Product.objects.none() filter_backends = (DjangoFilterBackend,) filterset_class = ApiProductFilter - swagger_schema = prefetch.get_prefetch_schema(["products_list", "products_read"], serializers.ProductSerializer). \ - to_schema() - permission_classes = (IsAuthenticated, permissions.UserHasProductPermission) + swagger_schema = prefetch.get_prefetch_schema( + ["products_list", "products_read"], serializers.ProductSerializer + ).to_schema() + permission_classes = ( + IsAuthenticated, + permissions.UserHasProductPermission, + ) def get_queryset(self): return get_authorized_products(Permissions.Product_View).distinct() @@ -1487,21 +2123,34 @@ def destroy(self, request, *args, **kwargs): request_body=serializers.ReportGenerateOptionSerializer, responses={status.HTTP_200_OK: serializers.ReportGenerateSerializer}, ) - @action(detail=True, methods=['post'], permission_classes=[IsAuthenticated]) + @action( + detail=True, methods=["post"], permission_classes=[IsAuthenticated] + ) def generate_report(self, request, pk=None): product = self.get_object() options = {} # prepare post data - report_options = serializers.ReportGenerateOptionSerializer(data=request.data) + report_options = serializers.ReportGenerateOptionSerializer( + data=request.data + ) if report_options.is_valid(): - options['include_finding_notes'] = report_options.validated_data['include_finding_notes'] - options['include_finding_images'] = report_options.validated_data['include_finding_images'] - options['include_executive_summary'] = report_options.validated_data['include_executive_summary'] - options['include_table_of_contents'] = report_options.validated_data['include_table_of_contents'] + options["include_finding_notes"] = report_options.validated_data[ + "include_finding_notes" + ] + options["include_finding_images"] = report_options.validated_data[ + "include_finding_images" + ] + options[ + "include_executive_summary" + ] = report_options.validated_data["include_executive_summary"] + options[ + "include_table_of_contents" + ] = report_options.validated_data["include_table_of_contents"] else: - return Response(report_options.errors, - status=status.HTTP_400_BAD_REQUEST) + return Response( + report_options.errors, status=status.HTTP_400_BAD_REQUEST + ) data = report_generate(request, product, options) report = serializers.ReportGenerateSerializer(data) @@ -1510,36 +2159,57 @@ def generate_report(self, request, pk=None): # Authorization: object-based @extend_schema_view( - list=extend_schema(parameters=[ - OpenApiParameter("prefetch", OpenApiTypes.STR, OpenApiParameter.QUERY, required=False, - description="List of fields for which to prefetch model instances and add those to the response"), - ], + list=extend_schema( + parameters=[ + OpenApiParameter( + "prefetch", + OpenApiTypes.STR, + OpenApiParameter.QUERY, + required=False, + description="List of fields for which to prefetch model instances and add those to the response", + ), + ], + ), + retrieve=extend_schema( + parameters=[ + OpenApiParameter( + "prefetch", + OpenApiTypes.STR, + OpenApiParameter.QUERY, + required=False, + description="List of fields for which to prefetch model instances and add those to the response", + ), + ], ), - retrieve=extend_schema(parameters=[ - OpenApiParameter("prefetch", OpenApiTypes.STR, OpenApiParameter.QUERY, required=False, - description="List of fields for which to prefetch model instances and add those to the response"), - ], - ) ) -class ProductMemberViewSet(prefetch.PrefetchListMixin, - prefetch.PrefetchRetrieveMixin, - mixins.ListModelMixin, - mixins.RetrieveModelMixin, - mixins.CreateModelMixin, - mixins.DestroyModelMixin, - mixins.UpdateModelMixin, - viewsets.GenericViewSet, - dojo_mixins.DeletePreviewModelMixin): +class ProductMemberViewSet( + prefetch.PrefetchListMixin, + prefetch.PrefetchRetrieveMixin, + mixins.ListModelMixin, + mixins.RetrieveModelMixin, + mixins.CreateModelMixin, + mixins.DestroyModelMixin, + mixins.UpdateModelMixin, + viewsets.GenericViewSet, + dojo_mixins.DeletePreviewModelMixin, +): serializer_class = serializers.ProductMemberSerializer queryset = Product_Member.objects.none() filter_backends = (DjangoFilterBackend,) - filterset_fields = ['id', 'product_id', 'user_id'] - swagger_schema = prefetch.get_prefetch_schema(["product_members_list", "product_members_read"], - serializers.ProductMemberSerializer).to_schema() - permission_classes = (IsAuthenticated, permissions.UserHasProductMemberPermission) + filterset_fields = ["id", "product_id", "user_id"] + swagger_schema = prefetch.get_prefetch_schema( + ["product_members_list", "product_members_read"], + serializers.ProductMemberSerializer, + ).to_schema() + permission_classes = ( + IsAuthenticated, + permissions.UserHasProductMemberPermission, + ) def get_queryset(self): - return get_authorized_product_members(Permissions.Product_View).distinct() + return get_authorized_product_members( + Permissions.Product_View + ).distinct() @extend_schema( request=OpenApiTypes.NONE, @@ -1551,42 +2221,63 @@ def get_queryset(self): ) def partial_update(self, request, pk=None): # Object authorization won't work if not all data is provided - response = {'message': 'Patch function is not offered in this path.'} + response = {"message": "Patch function is not offered in this path."} return Response(response, status=status.HTTP_405_METHOD_NOT_ALLOWED) # Authorization: object-based @extend_schema_view( - list=extend_schema(parameters=[ - OpenApiParameter("prefetch", OpenApiTypes.STR, OpenApiParameter.QUERY, required=False, - description="List of fields for which to prefetch model instances and add those to the response"), - ], + list=extend_schema( + parameters=[ + OpenApiParameter( + "prefetch", + OpenApiTypes.STR, + OpenApiParameter.QUERY, + required=False, + description="List of fields for which to prefetch model instances and add those to the response", + ), + ], + ), + retrieve=extend_schema( + parameters=[ + OpenApiParameter( + "prefetch", + OpenApiTypes.STR, + OpenApiParameter.QUERY, + required=False, + description="List of fields for which to prefetch model instances and add those to the response", + ), + ], ), - retrieve=extend_schema(parameters=[ - OpenApiParameter("prefetch", OpenApiTypes.STR, OpenApiParameter.QUERY, required=False, - description="List of fields for which to prefetch model instances and add those to the response"), - ], - ) ) -class ProductGroupViewSet(prefetch.PrefetchListMixin, - prefetch.PrefetchRetrieveMixin, - mixins.ListModelMixin, - mixins.RetrieveModelMixin, - mixins.CreateModelMixin, - mixins.DestroyModelMixin, - mixins.UpdateModelMixin, - viewsets.GenericViewSet, - dojo_mixins.DeletePreviewModelMixin): +class ProductGroupViewSet( + prefetch.PrefetchListMixin, + prefetch.PrefetchRetrieveMixin, + mixins.ListModelMixin, + mixins.RetrieveModelMixin, + mixins.CreateModelMixin, + mixins.DestroyModelMixin, + mixins.UpdateModelMixin, + viewsets.GenericViewSet, + dojo_mixins.DeletePreviewModelMixin, +): serializer_class = serializers.ProductGroupSerializer queryset = Product_Group.objects.none() filter_backends = (DjangoFilterBackend,) - filterset_fields = ['id', 'product_id', 'group_id'] - swagger_schema = prefetch.get_prefetch_schema(["product_groups_list", "product_groups_read"], - serializers.ProductGroupSerializer).to_schema() - permission_classes = (IsAuthenticated, permissions.UserHasProductGroupPermission) + filterset_fields = ["id", "product_id", "group_id"] + swagger_schema = prefetch.get_prefetch_schema( + ["product_groups_list", "product_groups_read"], + serializers.ProductGroupSerializer, + ).to_schema() + permission_classes = ( + IsAuthenticated, + permissions.UserHasProductGroupPermission, + ) def get_queryset(self): - return get_authorized_product_groups(Permissions.Product_Group_View).distinct() + return get_authorized_product_groups( + Permissions.Product_Group_View + ).distinct() @extend_schema( request=OpenApiTypes.NONE, @@ -1598,49 +2289,77 @@ def get_queryset(self): ) def partial_update(self, request, pk=None): # Object authorization won't work if not all data is provided - response = {'message': 'Patch function is not offered in this path.'} + response = {"message": "Patch function is not offered in this path."} return Response(response, status=status.HTTP_405_METHOD_NOT_ALLOWED) # Authorization: object-based @extend_schema_view( - list=extend_schema(parameters=[ - OpenApiParameter("prefetch", OpenApiTypes.STR, OpenApiParameter.QUERY, required=False, - description="List of fields for which to prefetch model instances and add those to the response"), - ], + list=extend_schema( + parameters=[ + OpenApiParameter( + "prefetch", + OpenApiTypes.STR, + OpenApiParameter.QUERY, + required=False, + description="List of fields for which to prefetch model instances and add those to the response", + ), + ], + ), + retrieve=extend_schema( + parameters=[ + OpenApiParameter( + "prefetch", + OpenApiTypes.STR, + OpenApiParameter.QUERY, + required=False, + description="List of fields for which to prefetch model instances and add those to the response", + ), + ], ), - retrieve=extend_schema(parameters=[ - OpenApiParameter("prefetch", OpenApiTypes.STR, OpenApiParameter.QUERY, required=False, - description="List of fields for which to prefetch model instances and add those to the response"), - ], - ) ) -class ProductTypeViewSet(prefetch.PrefetchListMixin, - prefetch.PrefetchRetrieveMixin, - mixins.ListModelMixin, - mixins.RetrieveModelMixin, - mixins.CreateModelMixin, - mixins.UpdateModelMixin, - mixins.DestroyModelMixin, - viewsets.GenericViewSet, - dojo_mixins.DeletePreviewModelMixin): +class ProductTypeViewSet( + prefetch.PrefetchListMixin, + prefetch.PrefetchRetrieveMixin, + mixins.ListModelMixin, + mixins.RetrieveModelMixin, + mixins.CreateModelMixin, + mixins.UpdateModelMixin, + mixins.DestroyModelMixin, + viewsets.GenericViewSet, + dojo_mixins.DeletePreviewModelMixin, +): serializer_class = serializers.ProductTypeSerializer queryset = Product_Type.objects.none() filter_backends = (DjangoFilterBackend,) - filterset_fields = ['id', 'name', 'critical_product', 'key_product', 'created', 'updated'] - swagger_schema = prefetch.get_prefetch_schema(["product_types_list", "product_types_read"], - serializers.ProductTypeSerializer).to_schema() - permission_classes = (IsAuthenticated, permissions.UserHasProductTypePermission) + filterset_fields = [ + "id", + "name", + "critical_product", + "key_product", + "created", + "updated", + ] + swagger_schema = prefetch.get_prefetch_schema( + ["product_types_list", "product_types_read"], + serializers.ProductTypeSerializer, + ).to_schema() + permission_classes = ( + IsAuthenticated, + permissions.UserHasProductTypePermission, + ) def get_queryset(self): - return get_authorized_product_types(Permissions.Product_Type_View).distinct() + return get_authorized_product_types( + Permissions.Product_Type_View + ).distinct() # Overwrite perfom_create of CreateModelMixin to add current user as owner def perform_create(self, serializer): serializer.save() product_type_data = serializer.data - product_type_data.pop('authorization_groups') - product_type_data.pop('members') + product_type_data.pop("authorization_groups") + product_type_data.pop("members") member = Product_Type_Member() member.user = self.request.user member.product_type = Product_Type(**product_type_data) @@ -1664,21 +2383,34 @@ def destroy(self, request, *args, **kwargs): request_body=serializers.ReportGenerateOptionSerializer, responses={status.HTTP_200_OK: serializers.ReportGenerateSerializer}, ) - @action(detail=True, methods=['post'], permission_classes=[IsAuthenticated]) + @action( + detail=True, methods=["post"], permission_classes=[IsAuthenticated] + ) def generate_report(self, request, pk=None): product_type = self.get_object() options = {} # prepare post data - report_options = serializers.ReportGenerateOptionSerializer(data=request.data) + report_options = serializers.ReportGenerateOptionSerializer( + data=request.data + ) if report_options.is_valid(): - options['include_finding_notes'] = report_options.validated_data['include_finding_notes'] - options['include_finding_images'] = report_options.validated_data['include_finding_images'] - options['include_executive_summary'] = report_options.validated_data['include_executive_summary'] - options['include_table_of_contents'] = report_options.validated_data['include_table_of_contents'] + options["include_finding_notes"] = report_options.validated_data[ + "include_finding_notes" + ] + options["include_finding_images"] = report_options.validated_data[ + "include_finding_images" + ] + options[ + "include_executive_summary" + ] = report_options.validated_data["include_executive_summary"] + options[ + "include_table_of_contents" + ] = report_options.validated_data["include_table_of_contents"] else: - return Response(report_options.errors, - status=status.HTTP_400_BAD_REQUEST) + return Response( + report_options.errors, status=status.HTTP_400_BAD_REQUEST + ) data = report_generate(request, product_type, options) report = serializers.ReportGenerateSerializer(data) @@ -1687,43 +2419,69 @@ def generate_report(self, request, pk=None): # Authorization: object-based @extend_schema_view( - list=extend_schema(parameters=[ - OpenApiParameter("prefetch", OpenApiTypes.STR, OpenApiParameter.QUERY, required=False, - description="List of fields for which to prefetch model instances and add those to the response"), - ], + list=extend_schema( + parameters=[ + OpenApiParameter( + "prefetch", + OpenApiTypes.STR, + OpenApiParameter.QUERY, + required=False, + description="List of fields for which to prefetch model instances and add those to the response", + ), + ], + ), + retrieve=extend_schema( + parameters=[ + OpenApiParameter( + "prefetch", + OpenApiTypes.STR, + OpenApiParameter.QUERY, + required=False, + description="List of fields for which to prefetch model instances and add those to the response", + ), + ], ), - retrieve=extend_schema(parameters=[ - OpenApiParameter("prefetch", OpenApiTypes.STR, OpenApiParameter.QUERY, required=False, - description="List of fields for which to prefetch model instances and add those to the response"), - ], - ) ) -class ProductTypeMemberViewSet(prefetch.PrefetchListMixin, - prefetch.PrefetchRetrieveMixin, - mixins.ListModelMixin, - mixins.RetrieveModelMixin, - mixins.CreateModelMixin, - mixins.DestroyModelMixin, - mixins.UpdateModelMixin, - viewsets.GenericViewSet, - dojo_mixins.DeletePreviewModelMixin): +class ProductTypeMemberViewSet( + prefetch.PrefetchListMixin, + prefetch.PrefetchRetrieveMixin, + mixins.ListModelMixin, + mixins.RetrieveModelMixin, + mixins.CreateModelMixin, + mixins.DestroyModelMixin, + mixins.UpdateModelMixin, + viewsets.GenericViewSet, + dojo_mixins.DeletePreviewModelMixin, +): serializer_class = serializers.ProductTypeMemberSerializer queryset = Product_Type_Member.objects.none() filter_backends = (DjangoFilterBackend,) - filterset_fields = ['id', 'product_type_id', 'user_id'] - swagger_schema = prefetch.get_prefetch_schema(["product_type_members_list", "product_type_members_read"], - serializers.ProductTypeMemberSerializer).to_schema() - permission_classes = (IsAuthenticated, permissions.UserHasProductTypeMemberPermission) + filterset_fields = ["id", "product_type_id", "user_id"] + swagger_schema = prefetch.get_prefetch_schema( + ["product_type_members_list", "product_type_members_read"], + serializers.ProductTypeMemberSerializer, + ).to_schema() + permission_classes = ( + IsAuthenticated, + permissions.UserHasProductTypeMemberPermission, + ) def get_queryset(self): - return get_authorized_product_type_members(Permissions.Product_Type_View).distinct() + return get_authorized_product_type_members( + Permissions.Product_Type_View + ).distinct() def destroy(self, request, *args, **kwargs): instance = self.get_object() if instance.role.is_owner: - owners = Product_Type_Member.objects.filter(product_type=instance.product_type, role__is_owner=True).count() + owners = Product_Type_Member.objects.filter( + product_type=instance.product_type, role__is_owner=True + ).count() if owners <= 1: - return Response('There must be at least one owner', status=status.HTTP_400_BAD_REQUEST) + return Response( + "There must be at least one owner", + status=status.HTTP_400_BAD_REQUEST, + ) self.perform_destroy(instance) return Response(status=status.HTTP_204_NO_CONTENT) @@ -1737,42 +2495,63 @@ def destroy(self, request, *args, **kwargs): ) def partial_update(self, request, pk=None): # Object authorization won't work if not all data is provided - response = {'message': 'Patch function is not offered in this path.'} + response = {"message": "Patch function is not offered in this path."} return Response(response, status=status.HTTP_405_METHOD_NOT_ALLOWED) # Authorization: object-based @extend_schema_view( - list=extend_schema(parameters=[ - OpenApiParameter("prefetch", OpenApiTypes.STR, OpenApiParameter.QUERY, required=False, - description="List of fields for which to prefetch model instances and add those to the response"), - ], + list=extend_schema( + parameters=[ + OpenApiParameter( + "prefetch", + OpenApiTypes.STR, + OpenApiParameter.QUERY, + required=False, + description="List of fields for which to prefetch model instances and add those to the response", + ), + ], + ), + retrieve=extend_schema( + parameters=[ + OpenApiParameter( + "prefetch", + OpenApiTypes.STR, + OpenApiParameter.QUERY, + required=False, + description="List of fields for which to prefetch model instances and add those to the response", + ), + ], ), - retrieve=extend_schema(parameters=[ - OpenApiParameter("prefetch", OpenApiTypes.STR, OpenApiParameter.QUERY, required=False, - description="List of fields for which to prefetch model instances and add those to the response"), - ], - ) ) -class ProductTypeGroupViewSet(prefetch.PrefetchListMixin, - prefetch.PrefetchRetrieveMixin, - mixins.ListModelMixin, - mixins.RetrieveModelMixin, - mixins.CreateModelMixin, - mixins.DestroyModelMixin, - mixins.UpdateModelMixin, - viewsets.GenericViewSet, - dojo_mixins.DeletePreviewModelMixin): +class ProductTypeGroupViewSet( + prefetch.PrefetchListMixin, + prefetch.PrefetchRetrieveMixin, + mixins.ListModelMixin, + mixins.RetrieveModelMixin, + mixins.CreateModelMixin, + mixins.DestroyModelMixin, + mixins.UpdateModelMixin, + viewsets.GenericViewSet, + dojo_mixins.DeletePreviewModelMixin, +): serializer_class = serializers.ProductTypeGroupSerializer queryset = Product_Type_Group.objects.none() filter_backends = (DjangoFilterBackend,) - filterset_fields = ['id', 'product_type_id', 'group_id'] - swagger_schema = prefetch.get_prefetch_schema(["product_type_groups_list", "product_type_groups_read"], - serializers.ProductTypeGroupSerializer).to_schema() - permission_classes = (IsAuthenticated, permissions.UserHasProductTypeGroupPermission) + filterset_fields = ["id", "product_type_id", "group_id"] + swagger_schema = prefetch.get_prefetch_schema( + ["product_type_groups_list", "product_type_groups_read"], + serializers.ProductTypeGroupSerializer, + ).to_schema() + permission_classes = ( + IsAuthenticated, + permissions.UserHasProductTypeGroupPermission, + ) def get_queryset(self): - return get_authorized_product_type_groups(Permissions.Product_Type_Group_View).distinct() + return get_authorized_product_type_groups( + Permissions.Product_Type_Group_View + ).distinct() @extend_schema( request=OpenApiTypes.NONE, @@ -1784,45 +2563,57 @@ def get_queryset(self): ) def partial_update(self, request, pk=None): # Object authorization won't work if not all data is provided - response = {'message': 'Patch function is not offered in this path.'} + response = {"message": "Patch function is not offered in this path."} return Response(response, status=status.HTTP_405_METHOD_NOT_ALLOWED) # Authorization: object-based -class StubFindingsViewSet(prefetch.PrefetchListMixin, - prefetch.PrefetchRetrieveMixin, - mixins.ListModelMixin, - mixins.RetrieveModelMixin, - mixins.CreateModelMixin, - mixins.UpdateModelMixin, - mixins.DestroyModelMixin, - viewsets.GenericViewSet, - dojo_mixins.DeletePreviewModelMixin): +class StubFindingsViewSet( + prefetch.PrefetchListMixin, + prefetch.PrefetchRetrieveMixin, + mixins.ListModelMixin, + mixins.RetrieveModelMixin, + mixins.CreateModelMixin, + mixins.UpdateModelMixin, + mixins.DestroyModelMixin, + viewsets.GenericViewSet, + dojo_mixins.DeletePreviewModelMixin, +): serializer_class = serializers.StubFindingSerializer queryset = Stub_Finding.objects.none() filter_backends = (DjangoFilterBackend,) - filterset_fields = ['id', 'title', 'date', 'severity', 'description'] - swagger_schema = prefetch.get_prefetch_schema(["stub_findings_list", "stub_findings_read"], serializers.StubFindingSerializer).to_schema() - permission_classes = (IsAuthenticated, permissions.UserHasFindingPermission) + filterset_fields = ["id", "title", "date", "severity", "description"] + swagger_schema = prefetch.get_prefetch_schema( + ["stub_findings_list", "stub_findings_read"], + serializers.StubFindingSerializer, + ).to_schema() + permission_classes = ( + IsAuthenticated, + permissions.UserHasFindingPermission, + ) def get_queryset(self): - return get_authorized_stub_findings(Permissions.Finding_View).distinct() + return get_authorized_stub_findings( + Permissions.Finding_View + ).distinct() def get_serializer_class(self): - if self.request and self.request.method == 'POST': + if self.request and self.request.method == "POST": return serializers.StubFindingCreateSerializer else: return serializers.StubFindingSerializer # Authorization: authenticated, configuration -class DevelopmentEnvironmentViewSet(mixins.ListModelMixin, - mixins.RetrieveModelMixin, - mixins.CreateModelMixin, - mixins.DestroyModelMixin, - mixins.UpdateModelMixin, - viewsets.GenericViewSet, - dojo_mixins.DeletePreviewModelMixin): +class DevelopmentEnvironmentViewSet( + mixins.ListModelMixin, + mixins.RetrieveModelMixin, + mixins.CreateModelMixin, + mixins.DestroyModelMixin, + mixins.UpdateModelMixin, + viewsets.GenericViewSet, + dojo_mixins.DeletePreviewModelMixin, +): serializer_class = serializers.DevelopmentEnvironmentSerializer queryset = Development_Environment.objects.all() filter_backends = (DjangoFilterBackend,) @@ -1830,21 +2621,25 @@ class DevelopmentEnvironmentViewSet(mixins.ListModelMixin, # Authorization: object-based -class TestsViewSet(prefetch.PrefetchListMixin, - prefetch.PrefetchRetrieveMixin, - mixins.ListModelMixin, - mixins.RetrieveModelMixin, - mixins.UpdateModelMixin, - mixins.DestroyModelMixin, - mixins.CreateModelMixin, - ra_api.AcceptedRisksMixin, - viewsets.GenericViewSet, - dojo_mixins.DeletePreviewModelMixin): +class TestsViewSet( + prefetch.PrefetchListMixin, + prefetch.PrefetchRetrieveMixin, + mixins.ListModelMixin, + mixins.RetrieveModelMixin, + mixins.UpdateModelMixin, + mixins.DestroyModelMixin, + mixins.CreateModelMixin, + ra_api.AcceptedRisksMixin, + viewsets.GenericViewSet, + dojo_mixins.DeletePreviewModelMixin, +): serializer_class = serializers.TestSerializer queryset = Test.objects.none() filter_backends = (DjangoFilterBackend,) filterset_class = ApiTestFilter - swagger_schema = prefetch.get_prefetch_schema(["tests_list", "tests_read"], serializers.TestSerializer).to_schema() + swagger_schema = prefetch.get_prefetch_schema( + ["tests_list", "tests_read"], serializers.TestSerializer + ).to_schema() permission_classes = (IsAuthenticated, permissions.UserHasTestPermission) @property @@ -1852,9 +2647,11 @@ def risk_application_model_class(self): return Test def get_queryset(self): - return get_authorized_tests(Permissions.Test_View).prefetch_related( - 'notes', - 'files').distinct() + return ( + get_authorized_tests(Permissions.Test_View) + .prefetch_related("notes", "files") + .distinct() + ) def destroy(self, request, *args, **kwargs): instance = self.get_object() @@ -1866,8 +2663,8 @@ def destroy(self, request, *args, **kwargs): return Response(status=status.HTTP_204_NO_CONTENT) def get_serializer_class(self): - if self.request and self.request.method == 'POST': - if self.action == 'accept_risks': + if self.request and self.request.method == "POST": + if self.action == "accept_risks": return ra_api.AcceptedRiskSerializer return serializers.TestCreateSerializer else: @@ -1881,380 +2678,524 @@ def get_serializer_class(self): request_body=serializers.ReportGenerateOptionSerializer, responses={status.HTTP_200_OK: serializers.ReportGenerateSerializer}, ) - @action(detail=True, methods=['post'], permission_classes=[IsAuthenticated]) + @action( + detail=True, methods=["post"], permission_classes=[IsAuthenticated] + ) def generate_report(self, request, pk=None): test = self.get_object() options = {} # prepare post data - report_options = serializers.ReportGenerateOptionSerializer(data=request.data) + report_options = serializers.ReportGenerateOptionSerializer( + data=request.data + ) if report_options.is_valid(): - options['include_finding_notes'] = report_options.validated_data['include_finding_notes'] - options['include_finding_images'] = report_options.validated_data['include_finding_images'] - options['include_executive_summary'] = report_options.validated_data['include_executive_summary'] - options['include_table_of_contents'] = report_options.validated_data['include_table_of_contents'] + options["include_finding_notes"] = report_options.validated_data[ + "include_finding_notes" + ] + options["include_finding_images"] = report_options.validated_data[ + "include_finding_images" + ] + options[ + "include_executive_summary" + ] = report_options.validated_data["include_executive_summary"] + options[ + "include_table_of_contents" + ] = report_options.validated_data["include_table_of_contents"] else: - return Response(report_options.errors, - status=status.HTTP_400_BAD_REQUEST) + return Response( + report_options.errors, status=status.HTTP_400_BAD_REQUEST + ) data = report_generate(request, test, options) report = serializers.ReportGenerateSerializer(data) return Response(report.data) @extend_schema( - methods=['GET'], - responses={status.HTTP_200_OK: serializers.TestToNotesSerializer} + methods=["GET"], + responses={status.HTTP_200_OK: serializers.TestToNotesSerializer}, ) @extend_schema( - methods=['POST'], + methods=["POST"], request=serializers.AddNewNoteOptionSerializer, - responses={status.HTTP_201_CREATED: serializers.NoteSerializer} + responses={status.HTTP_201_CREATED: serializers.NoteSerializer}, ) @swagger_auto_schema( - method='get', - responses={status.HTTP_200_OK: serializers.TestToNotesSerializer} + method="get", + responses={status.HTTP_200_OK: serializers.TestToNotesSerializer}, ) @swagger_auto_schema( - methods=['post'], + methods=["post"], request_body=serializers.AddNewNoteOptionSerializer, - responses={status.HTTP_201_CREATED: serializers.NoteSerializer} + responses={status.HTTP_201_CREATED: serializers.NoteSerializer}, ) @action(detail=True, methods=["get", "post"]) def notes(self, request, pk=None): test = self.get_object() - if request.method == 'POST': - new_note = serializers.AddNewNoteOptionSerializer(data=request.data) + if request.method == "POST": + new_note = serializers.AddNewNoteOptionSerializer( + data=request.data + ) if new_note.is_valid(): - entry = new_note.validated_data['entry'] - private = new_note.validated_data.get('private', False) - note_type = new_note.validated_data.get('note_type', None) + entry = new_note.validated_data["entry"] + private = new_note.validated_data.get("private", False) + note_type = new_note.validated_data.get("note_type", None) else: - return Response(new_note.errors, - status=status.HTTP_400_BAD_REQUEST) + return Response( + new_note.errors, status=status.HTTP_400_BAD_REQUEST + ) author = request.user - note = Notes(entry=entry, author=author, private=private, note_type=note_type) + note = Notes( + entry=entry, + author=author, + private=private, + note_type=note_type, + ) note.save() test.notes.add(note) - serialized_note = serializers.NoteSerializer({ - "author": author, "entry": entry, - "private": private - }) - result = serializers.TestToNotesSerializer({ - "test_id": test, "notes": [serialized_note.data] - }) - return Response(serialized_note.data, - status=status.HTTP_201_CREATED) + serialized_note = serializers.NoteSerializer( + {"author": author, "entry": entry, "private": private} + ) + result = serializers.TestToNotesSerializer( + {"test_id": test, "notes": [serialized_note.data]} + ) + return Response( + serialized_note.data, status=status.HTTP_201_CREATED + ) notes = test.notes.all() - serialized_notes = serializers.TestToNotesSerializer({ - "test_id": test, "notes": notes - }) - return Response(serialized_notes.data, - status=status.HTTP_200_OK) + serialized_notes = serializers.TestToNotesSerializer( + {"test_id": test, "notes": notes} + ) + return Response(serialized_notes.data, status=status.HTTP_200_OK) @extend_schema( - methods=['GET'], - responses={status.HTTP_200_OK: serializers.TestToFilesSerializer} + methods=["GET"], + responses={status.HTTP_200_OK: serializers.TestToFilesSerializer}, ) @extend_schema( - methods=['POST'], + methods=["POST"], request=serializers.AddNewFileOptionSerializer, - responses={status.HTTP_201_CREATED: serializers.FileSerializer} + responses={status.HTTP_201_CREATED: serializers.FileSerializer}, ) @swagger_auto_schema( - method='get', - responses={status.HTTP_200_OK: serializers.TestToFilesSerializer} + method="get", + responses={status.HTTP_200_OK: serializers.TestToFilesSerializer}, ) @swagger_auto_schema( - method='post', + method="post", request_body=serializers.AddNewFileOptionSerializer, - responses={status.HTTP_201_CREATED: serializers.FileSerializer} + responses={status.HTTP_201_CREATED: serializers.FileSerializer}, + ) + @action( + detail=True, methods=["get", "post"], parser_classes=(MultiPartParser,) ) - @action(detail=True, methods=["get", "post"], parser_classes=(MultiPartParser,)) def files(self, request, pk=None): test = self.get_object() - if request.method == 'POST': + if request.method == "POST": new_file = serializers.FileSerializer(data=request.data) if new_file.is_valid(): - title = new_file.validated_data['title'] - file = new_file.validated_data['file'] + title = new_file.validated_data["title"] + file = new_file.validated_data["file"] else: - return Response(new_file.errors, status=status.HTTP_400_BAD_REQUEST) + return Response( + new_file.errors, status=status.HTTP_400_BAD_REQUEST + ) file = FileUpload(title=title, file=file) file.save() test.files.add(file) serialized_file = serializers.FileSerializer(file) - return Response(serialized_file.data, status=status.HTTP_201_CREATED) + return Response( + serialized_file.data, status=status.HTTP_201_CREATED + ) files = test.files.all() - serialized_files = serializers.TestToFilesSerializer({ - "test_id": test, "files": files - }) + serialized_files = serializers.TestToFilesSerializer( + {"test_id": test, "files": files} + ) return Response(serialized_files.data, status=status.HTTP_200_OK) @extend_schema( - methods=['GET'], + methods=["GET"], responses={ status.HTTP_200_OK: serializers.RawFileSerializer, - } + }, ) @swagger_auto_schema( - method='get', + method="get", responses={ status.HTTP_200_OK: serializers.RawFileSerializer, - } + }, + ) + @action( + detail=True, + methods=["get"], + url_path=r"files/download/(?P\d+)", ) - @action(detail=True, methods=["get"], url_path=r'files/download/(?P\d+)') def download_file(self, request, file_id, pk=None): test = self.get_object() # Get the file object file_object_qs = test.files.filter(id=file_id) - file_object = file_object_qs.first() if len(file_object_qs) > 0 else None + file_object = ( + file_object_qs.first() if len(file_object_qs) > 0 else None + ) if file_object is None: - return Response({"error": "File ID not associated with Test"}, status=status.HTTP_404_NOT_FOUND) + return Response( + {"error": "File ID not associated with Test"}, + status=status.HTTP_404_NOT_FOUND, + ) # Get the path of the file in media root - file_path = f'{settings.MEDIA_ROOT}/{file_object.file.url.lstrip(settings.MEDIA_URL)}' + file_path = f"{settings.MEDIA_ROOT}/{file_object.file.url.lstrip(settings.MEDIA_URL)}" file_handle = open(file_path, "rb") # send file - response = FileResponse(file_handle, content_type=f'{mimetypes.guess_type(file_path)}', status=status.HTTP_200_OK) - response['Content-Length'] = file_object.file.size - response['Content-Disposition'] = f'attachment; filename="{file_object.file.name}"' + response = FileResponse( + file_handle, + content_type=f"{mimetypes.guess_type(file_path)}", + status=status.HTTP_200_OK, + ) + response["Content-Length"] = file_object.file.size + response[ + "Content-Disposition" + ] = f'attachment; filename="{file_object.file.name}"' return response # Authorization: authenticated, configuration -class TestTypesViewSet(mixins.ListModelMixin, - mixins.RetrieveModelMixin, - mixins.UpdateModelMixin, - mixins.CreateModelMixin, - viewsets.GenericViewSet): +class TestTypesViewSet( + mixins.ListModelMixin, + mixins.RetrieveModelMixin, + mixins.UpdateModelMixin, + mixins.CreateModelMixin, + viewsets.GenericViewSet, +): serializer_class = serializers.TestTypeSerializer queryset = Test_Type.objects.all() filter_backends = (DjangoFilterBackend,) - filterset_fields = ['name', ] + filterset_fields = [ + "name", + ] permission_classes = (IsAuthenticated, DjangoModelPermissions) @extend_schema_view( - list=extend_schema(parameters=[ - OpenApiParameter("prefetch", OpenApiTypes.STR, OpenApiParameter.QUERY, required=False, - description="List of fields for which to prefetch model instances and add those to the response"), - ], + list=extend_schema( + parameters=[ + OpenApiParameter( + "prefetch", + OpenApiTypes.STR, + OpenApiParameter.QUERY, + required=False, + description="List of fields for which to prefetch model instances and add those to the response", + ), + ], + ), + retrieve=extend_schema( + parameters=[ + OpenApiParameter( + "prefetch", + OpenApiTypes.STR, + OpenApiParameter.QUERY, + required=False, + description="List of fields for which to prefetch model instances and add those to the response", + ), + ], ), - retrieve=extend_schema(parameters=[ - OpenApiParameter("prefetch", OpenApiTypes.STR, OpenApiParameter.QUERY, required=False, - description="List of fields for which to prefetch model instances and add those to the response"), - ], - ) ) -class TestImportViewSet(prefetch.PrefetchListMixin, - prefetch.PrefetchRetrieveMixin, - mixins.ListModelMixin, - mixins.RetrieveModelMixin, - mixins.UpdateModelMixin, - mixins.CreateModelMixin, - mixins.DestroyModelMixin, - viewsets.GenericViewSet, - dojo_mixins.DeletePreviewModelMixin): +class TestImportViewSet( + prefetch.PrefetchListMixin, + prefetch.PrefetchRetrieveMixin, + mixins.ListModelMixin, + mixins.RetrieveModelMixin, + mixins.UpdateModelMixin, + mixins.CreateModelMixin, + mixins.DestroyModelMixin, + viewsets.GenericViewSet, + dojo_mixins.DeletePreviewModelMixin, +): serializer_class = serializers.TestImportSerializer queryset = Test_Import.objects.none() filter_backends = (DjangoFilterBackend,) - filterset_fields = ['test', 'findings_affected', 'version', 'branch_tag', 'build_id', 'commit_hash', 'test_import_finding_action__action', 'test_import_finding_action__finding', 'test_import_finding_action__created'] - swagger_schema = prefetch.get_prefetch_schema(["test_imports_list", "test_imports_read"], serializers.TestImportSerializer). \ - to_schema() - permission_classes = (IsAuthenticated, permissions.UserHasTestImportPermission) + filterset_fields = [ + "test", + "findings_affected", + "version", + "branch_tag", + "build_id", + "commit_hash", + "test_import_finding_action__action", + "test_import_finding_action__finding", + "test_import_finding_action__created", + ] + swagger_schema = prefetch.get_prefetch_schema( + ["test_imports_list", "test_imports_read"], + serializers.TestImportSerializer, + ).to_schema() + permission_classes = ( + IsAuthenticated, + permissions.UserHasTestImportPermission, + ) def get_queryset(self): - return get_authorized_test_imports(Permissions.Test_View).prefetch_related( - 'test_import_finding_action_set', - 'findings_affected', - 'findings_affected__endpoints', - 'findings_affected__status_finding', - 'findings_affected__finding_meta', - 'findings_affected__jira_issue', - 'findings_affected__burprawrequestresponse_set', - 'findings_affected__jira_issue', - 'findings_affected__jira_issue', - 'findings_affected__jira_issue', - 'findings_affected__reviewers', - 'findings_affected__notes', - 'findings_affected__notes__author', - 'findings_affected__notes__history', - 'findings_affected__files', - 'findings_affected__found_by', - 'findings_affected__tags', - 'findings_affected__risk_acceptance_set', - 'test', - 'test__tags', - 'test__notes', - 'test__notes__author', - 'test__files', - 'test__test_type', - 'test__engagement', - 'test__environment', - 'test__engagement__product', - 'test__engagement__product__prod_type') + return get_authorized_test_imports( + Permissions.Test_View + ).prefetch_related( + "test_import_finding_action_set", + "findings_affected", + "findings_affected__endpoints", + "findings_affected__status_finding", + "findings_affected__finding_meta", + "findings_affected__jira_issue", + "findings_affected__burprawrequestresponse_set", + "findings_affected__jira_issue", + "findings_affected__jira_issue", + "findings_affected__jira_issue", + "findings_affected__reviewers", + "findings_affected__notes", + "findings_affected__notes__author", + "findings_affected__notes__history", + "findings_affected__files", + "findings_affected__found_by", + "findings_affected__tags", + "findings_affected__risk_acceptance_set", + "test", + "test__tags", + "test__notes", + "test__notes__author", + "test__files", + "test__test_type", + "test__engagement", + "test__environment", + "test__engagement__product", + "test__engagement__product__prod_type", + ) # Authorization: configurations -class ToolConfigurationsViewSet(prefetch.PrefetchListMixin, - prefetch.PrefetchRetrieveMixin, - mixins.ListModelMixin, - mixins.RetrieveModelMixin, - mixins.CreateModelMixin, - mixins.UpdateModelMixin, - mixins.DestroyModelMixin, - viewsets.GenericViewSet, - dojo_mixins.DeletePreviewModelMixin): +class ToolConfigurationsViewSet( + prefetch.PrefetchListMixin, + prefetch.PrefetchRetrieveMixin, + mixins.ListModelMixin, + mixins.RetrieveModelMixin, + mixins.CreateModelMixin, + mixins.UpdateModelMixin, + mixins.DestroyModelMixin, + viewsets.GenericViewSet, + dojo_mixins.DeletePreviewModelMixin, +): serializer_class = serializers.ToolConfigurationSerializer queryset = Tool_Configuration.objects.all() filter_backends = (DjangoFilterBackend,) - filterset_fields = ['id', 'name', 'tool_type', 'url', 'authentication_type'] - swagger_schema = prefetch.get_prefetch_schema(["tool_configurations_list", "tool_configurations_read"], serializers.ToolConfigurationSerializer).to_schema() - permission_classes = (permissions.UserHasConfigurationPermissionSuperuser, ) + filterset_fields = [ + "id", + "name", + "tool_type", + "url", + "authentication_type", + ] + swagger_schema = prefetch.get_prefetch_schema( + ["tool_configurations_list", "tool_configurations_read"], + serializers.ToolConfigurationSerializer, + ).to_schema() + permission_classes = (permissions.UserHasConfigurationPermissionSuperuser,) # Authorization: object-based -class ToolProductSettingsViewSet(prefetch.PrefetchListMixin, - prefetch.PrefetchRetrieveMixin, - mixins.ListModelMixin, - mixins.RetrieveModelMixin, - mixins.DestroyModelMixin, - mixins.CreateModelMixin, - mixins.UpdateModelMixin, - viewsets.GenericViewSet, - dojo_mixins.DeletePreviewModelMixin): +class ToolProductSettingsViewSet( + prefetch.PrefetchListMixin, + prefetch.PrefetchRetrieveMixin, + mixins.ListModelMixin, + mixins.RetrieveModelMixin, + mixins.DestroyModelMixin, + mixins.CreateModelMixin, + mixins.UpdateModelMixin, + viewsets.GenericViewSet, + dojo_mixins.DeletePreviewModelMixin, +): serializer_class = serializers.ToolProductSettingsSerializer queryset = Tool_Product_Settings.objects.none() filter_backends = (DjangoFilterBackend,) - filterset_fields = ['id', 'name', 'product', 'tool_configuration', 'tool_project_id', 'url'] - swagger_schema = prefetch.get_prefetch_schema(["tool_configurations_list", "tool_configurations_read"], serializers.ToolConfigurationSerializer).to_schema() - permission_classes = (IsAuthenticated, permissions.UserHasToolProductSettingsPermission) + filterset_fields = [ + "id", + "name", + "product", + "tool_configuration", + "tool_project_id", + "url", + ] + swagger_schema = prefetch.get_prefetch_schema( + ["tool_configurations_list", "tool_configurations_read"], + serializers.ToolConfigurationSerializer, + ).to_schema() + permission_classes = ( + IsAuthenticated, + permissions.UserHasToolProductSettingsPermission, + ) def get_queryset(self): return get_authorized_tool_product_settings(Permissions.Product_View) # Authorization: configuration -class ToolTypesViewSet(mixins.ListModelMixin, - mixins.RetrieveModelMixin, - mixins.DestroyModelMixin, - mixins.CreateModelMixin, - mixins.UpdateModelMixin, - viewsets.GenericViewSet, - dojo_mixins.DeletePreviewModelMixin): +class ToolTypesViewSet( + mixins.ListModelMixin, + mixins.RetrieveModelMixin, + mixins.DestroyModelMixin, + mixins.CreateModelMixin, + mixins.UpdateModelMixin, + viewsets.GenericViewSet, + dojo_mixins.DeletePreviewModelMixin, +): serializer_class = serializers.ToolTypeSerializer queryset = Tool_Type.objects.all() filter_backends = (DjangoFilterBackend,) - filterset_fields = ['id', 'name', 'description'] - permission_classes = (permissions.UserHasConfigurationPermissionSuperuser, ) + filterset_fields = ["id", "name", "description"] + permission_classes = (permissions.UserHasConfigurationPermissionSuperuser,) # Authorization: authenticated, configuration -class RegulationsViewSet(mixins.ListModelMixin, - mixins.RetrieveModelMixin, - mixins.CreateModelMixin, - mixins.DestroyModelMixin, - mixins.UpdateModelMixin, - viewsets.GenericViewSet, - dojo_mixins.DeletePreviewModelMixin): +class RegulationsViewSet( + mixins.ListModelMixin, + mixins.RetrieveModelMixin, + mixins.CreateModelMixin, + mixins.DestroyModelMixin, + mixins.UpdateModelMixin, + viewsets.GenericViewSet, + dojo_mixins.DeletePreviewModelMixin, +): serializer_class = serializers.RegulationSerializer queryset = Regulation.objects.all() filter_backends = (DjangoFilterBackend,) - filterset_fields = ['id', 'name', 'description'] + filterset_fields = ["id", "name", "description"] permission_classes = (IsAuthenticated, DjangoModelPermissions) # Authorization: configuration -class UsersViewSet(mixins.CreateModelMixin, - mixins.UpdateModelMixin, - mixins.ListModelMixin, - mixins.RetrieveModelMixin, - mixins.DestroyModelMixin, - viewsets.GenericViewSet, - dojo_mixins.DeletePreviewModelMixin): +class UsersViewSet( + mixins.CreateModelMixin, + mixins.UpdateModelMixin, + mixins.ListModelMixin, + mixins.RetrieveModelMixin, + mixins.DestroyModelMixin, + viewsets.GenericViewSet, + dojo_mixins.DeletePreviewModelMixin, +): serializer_class = serializers.UserSerializer queryset = User.objects.all() filter_backends = (DjangoFilterBackend,) - filterset_fields = ['id', 'username', 'first_name', 'last_name', 'email', 'is_active', 'is_superuser'] - permission_classes = (permissions.UserHasConfigurationPermissionSuperuser, ) + filterset_fields = [ + "id", + "username", + "first_name", + "last_name", + "email", + "is_active", + "is_superuser", + ] + permission_classes = (permissions.UserHasConfigurationPermissionSuperuser,) def destroy(self, request, *args, **kwargs): instance = self.get_object() if request.user == instance: - return Response('Users may not delete themselves', status=status.HTTP_400_BAD_REQUEST) + return Response( + "Users may not delete themselves", + status=status.HTTP_400_BAD_REQUEST, + ) self.perform_destroy(instance) return Response(status=status.HTTP_204_NO_CONTENT) # Authorization: superuser @extend_schema_view( - list=extend_schema(parameters=[ - OpenApiParameter("prefetch", OpenApiTypes.STR, OpenApiParameter.QUERY, required=False, - description="List of fields for which to prefetch model instances and add those to the response"), - ], + list=extend_schema( + parameters=[ + OpenApiParameter( + "prefetch", + OpenApiTypes.STR, + OpenApiParameter.QUERY, + required=False, + description="List of fields for which to prefetch model instances and add those to the response", + ), + ], + ), + retrieve=extend_schema( + parameters=[ + OpenApiParameter( + "prefetch", + OpenApiTypes.STR, + OpenApiParameter.QUERY, + required=False, + description="List of fields for which to prefetch model instances and add those to the response", + ), + ], ), - retrieve=extend_schema(parameters=[ - OpenApiParameter("prefetch", OpenApiTypes.STR, OpenApiParameter.QUERY, required=False, - description="List of fields for which to prefetch model instances and add those to the response"), - ], - ) ) -class UserContactInfoViewSet(prefetch.PrefetchListMixin, - prefetch.PrefetchRetrieveMixin, - mixins.CreateModelMixin, - mixins.UpdateModelMixin, - mixins.ListModelMixin, - mixins.RetrieveModelMixin, - mixins.DestroyModelMixin, - viewsets.GenericViewSet, - dojo_mixins.DeletePreviewModelMixin): +class UserContactInfoViewSet( + prefetch.PrefetchListMixin, + prefetch.PrefetchRetrieveMixin, + mixins.CreateModelMixin, + mixins.UpdateModelMixin, + mixins.ListModelMixin, + mixins.RetrieveModelMixin, + mixins.DestroyModelMixin, + viewsets.GenericViewSet, + dojo_mixins.DeletePreviewModelMixin, +): serializer_class = serializers.UserContactInfoSerializer queryset = UserContactInfo.objects.all() - swagger_schema = prefetch.get_prefetch_schema(["user_contact_infos_list", "user_contact_infos_read"], - serializers.UserContactInfoSerializer).to_schema() + swagger_schema = prefetch.get_prefetch_schema( + ["user_contact_infos_list", "user_contact_infos_read"], + serializers.UserContactInfoSerializer, + ).to_schema() filter_backends = (DjangoFilterBackend,) - filterset_fields = '__all__' + filterset_fields = "__all__" permission_classes = (permissions.IsSuperUser, DjangoModelPermissions) # Authorization: authenticated users class UserProfileView(GenericAPIView): - permission_classes = (IsAuthenticated, ) + permission_classes = (IsAuthenticated,) pagination_class = None serializer_class = serializers.UserProfileSerializer @swagger_auto_schema( - method='get', - responses={status.HTTP_200_OK: serializers.UserProfileSerializer} + method="get", + responses={status.HTTP_200_OK: serializers.UserProfileSerializer}, + ) + @action( + detail=True, methods=["get"], filter_backends=[], pagination_class=None ) - @action(detail=True, methods=["get"], - filter_backends=[], pagination_class=None) def get(self, request, format=None): user = get_current_user() - user_contact_info = user.usercontactinfo if hasattr(user, 'usercontactinfo') else None - global_role = user.global_role if hasattr(user, 'global_role') else None + user_contact_info = ( + user.usercontactinfo if hasattr(user, "usercontactinfo") else None + ) + global_role = ( + user.global_role if hasattr(user, "global_role") else None + ) dojo_group_member = Dojo_Group_Member.objects.filter(user=user) product_type_member = Product_Type_Member.objects.filter(user=user) product_member = Product_Member.objects.filter(user=user) serializer = serializers.UserProfileSerializer( - {"user": user, - "user_contact_info": user_contact_info, - "global_role": global_role, - "dojo_group_member": dojo_group_member, - "product_type_member": product_type_member, - "product_member": product_member}, many=False) + { + "user": user, + "user_contact_info": user_contact_info, + "global_role": global_role, + "dojo_group_member": dojo_group_member, + "product_type_member": product_type_member, + "product_member": product_member, + }, + many=False, + ) return Response(serializer.data) # Authorization: authenticated users, DjangoModelPermissions -class ImportScanView(mixins.CreateModelMixin, - viewsets.GenericViewSet): +class ImportScanView(mixins.CreateModelMixin, viewsets.GenericViewSet): """ Imports a scan report into an engagement or product. @@ -2280,25 +3221,48 @@ class ImportScanView(mixins.CreateModelMixin, When `auto_create_context` is set to `True` you can use `deduplication_on_engagement` to restrict deduplication for imported Findings to the newly created Engagement. """ + serializer_class = serializers.ImportScanSerializer parser_classes = [MultiPartParser] queryset = Test.objects.none() permission_classes = (IsAuthenticated, permissions.UserHasImportPermission) def perform_create(self, serializer): - _, _, _, engagement_id, engagement_name, product_name, product_type_name, auto_create_context, deduplication_on_engagement, do_not_reactivate = serializers.get_import_meta_data_from_dict(serializer.validated_data) + ( + _, + _, + _, + engagement_id, + engagement_name, + product_name, + product_type_name, + auto_create_context, + deduplication_on_engagement, + do_not_reactivate, + ) = serializers.get_import_meta_data_from_dict( + serializer.validated_data + ) product = get_target_product_if_exists(product_name) - engagement = get_target_engagement_if_exists(engagement_id, engagement_name, product) - - # when using auto_create_context, the engagement or product may not have been created yet - jira_driver = engagement if engagement else product if product else None - jira_project = jira_helper.get_jira_project(jira_driver) if jira_driver else None - - push_to_jira = serializer.validated_data.get('push_to_jira') - if get_system_setting('enable_jira') and jira_project: + engagement = get_target_engagement_if_exists( + engagement_id, engagement_name, product + ) + + # when using auto_create_context, the engagement or product may not + # have been created yet + jira_driver = ( + engagement if engagement else product if product else None + ) + jira_project = ( + jira_helper.get_jira_project(jira_driver) if jira_driver else None + ) + + push_to_jira = serializer.validated_data.get("push_to_jira") + if get_system_setting("enable_jira") and jira_project: push_to_jira = push_to_jira or jira_project.push_all_issues - logger.debug('push_to_jira: %s', serializer.validated_data.get('push_to_jira')) + logger.debug( + "push_to_jira: %s", serializer.validated_data.get("push_to_jira") + ) serializer.save(push_to_jira=push_to_jira) def get_queryset(self): @@ -2306,8 +3270,9 @@ def get_queryset(self): # Authorization: authenticated users, DjangoModelPermissions -class EndpointMetaImporterView(mixins.CreateModelMixin, - viewsets.GenericViewSet): +class EndpointMetaImporterView( + mixins.CreateModelMixin, viewsets.GenericViewSet +): """ Imports a CSV file into a product to propagate arbitrary meta and tags on endpoints. @@ -2319,10 +3284,14 @@ class EndpointMetaImporterView(mixins.CreateModelMixin, In this scenario Defect Dojo will look up the product by the provided details. """ + serializer_class = serializers.EndpointMetaImporterSerializer parser_classes = [MultiPartParser] queryset = Product.objects.all() - permission_classes = (IsAuthenticated, permissions.UserHasMetaImportPermission) + permission_classes = ( + IsAuthenticated, + permissions.UserHasMetaImportPermission, + ) def perform_create(self, serializer): serializer.save() @@ -2332,69 +3301,90 @@ def get_queryset(self): # Authorization: configuration -class LanguageTypeViewSet(mixins.ListModelMixin, - mixins.RetrieveModelMixin, - mixins.CreateModelMixin, - mixins.DestroyModelMixin, - mixins.UpdateModelMixin, - viewsets.GenericViewSet, - dojo_mixins.DeletePreviewModelMixin): +class LanguageTypeViewSet( + mixins.ListModelMixin, + mixins.RetrieveModelMixin, + mixins.CreateModelMixin, + mixins.DestroyModelMixin, + mixins.UpdateModelMixin, + viewsets.GenericViewSet, + dojo_mixins.DeletePreviewModelMixin, +): serializer_class = serializers.LanguageTypeSerializer queryset = Language_Type.objects.all() filter_backends = (DjangoFilterBackend,) - filterset_fields = ['id', 'language', 'color'] - permission_classes = (permissions.UserHasConfigurationPermissionStaff, ) + filterset_fields = ["id", "language", "color"] + permission_classes = (permissions.UserHasConfigurationPermissionStaff,) # Authorization: object-based @extend_schema_view( - list=extend_schema(parameters=[ - OpenApiParameter("prefetch", OpenApiTypes.STR, OpenApiParameter.QUERY, required=False, - description="List of fields for which to prefetch model instances and add those to the response"), - ], + list=extend_schema( + parameters=[ + OpenApiParameter( + "prefetch", + OpenApiTypes.STR, + OpenApiParameter.QUERY, + required=False, + description="List of fields for which to prefetch model instances and add those to the response", + ), + ], + ), + retrieve=extend_schema( + parameters=[ + OpenApiParameter( + "prefetch", + OpenApiTypes.STR, + OpenApiParameter.QUERY, + required=False, + description="List of fields for which to prefetch model instances and add those to the response", + ), + ], ), - retrieve=extend_schema(parameters=[ - OpenApiParameter("prefetch", OpenApiTypes.STR, OpenApiParameter.QUERY, required=False, - description="List of fields for which to prefetch model instances and add those to the response"), - ], - ) ) -class LanguageViewSet(prefetch.PrefetchListMixin, - prefetch.PrefetchRetrieveMixin, - mixins.ListModelMixin, - mixins.RetrieveModelMixin, - mixins.DestroyModelMixin, - mixins.UpdateModelMixin, - mixins.CreateModelMixin, - viewsets.GenericViewSet, - dojo_mixins.DeletePreviewModelMixin): +class LanguageViewSet( + prefetch.PrefetchListMixin, + prefetch.PrefetchRetrieveMixin, + mixins.ListModelMixin, + mixins.RetrieveModelMixin, + mixins.DestroyModelMixin, + mixins.UpdateModelMixin, + mixins.CreateModelMixin, + viewsets.GenericViewSet, + dojo_mixins.DeletePreviewModelMixin, +): serializer_class = serializers.LanguageSerializer queryset = Languages.objects.none() filter_backends = (DjangoFilterBackend,) - filterset_fields = ['id', 'language', 'product'] - swagger_schema = prefetch.get_prefetch_schema(["languages_list", "languages_read"], - serializers.LanguageSerializer).to_schema() - permission_classes = (IsAuthenticated, permissions.UserHasLanguagePermission) + filterset_fields = ["id", "language", "product"] + swagger_schema = prefetch.get_prefetch_schema( + ["languages_list", "languages_read"], serializers.LanguageSerializer + ).to_schema() + permission_classes = ( + IsAuthenticated, + permissions.UserHasLanguagePermission, + ) def get_queryset(self): return get_authorized_languages(Permissions.Language_View).distinct() # Authorization: object-based -class ImportLanguagesView(mixins.CreateModelMixin, - viewsets.GenericViewSet): +class ImportLanguagesView(mixins.CreateModelMixin, viewsets.GenericViewSet): serializer_class = serializers.ImportLanguagesSerializer parser_classes = [MultiPartParser] queryset = Product.objects.none() - permission_classes = (IsAuthenticated, permissions.UserHasLanguagePermission) + permission_classes = ( + IsAuthenticated, + permissions.UserHasLanguagePermission, + ) def get_queryset(self): return get_authorized_products(Permissions.Language_Add) # Authorization: object-based -class ReImportScanView(mixins.CreateModelMixin, - viewsets.GenericViewSet): +class ReImportScanView(mixins.CreateModelMixin, viewsets.GenericViewSet): """ Reimports a scan report into an existing test. @@ -2421,56 +3411,110 @@ class ReImportScanView(mixins.CreateModelMixin, When `auto_create_context` is set to `True` you can use `deduplication_on_engagement` to restrict deduplication for imported Findings to the newly created Engagement. """ + serializer_class = serializers.ReImportScanSerializer parser_classes = [MultiPartParser] queryset = Test.objects.none() - permission_classes = (IsAuthenticated, permissions.UserHasReimportPermission) + permission_classes = ( + IsAuthenticated, + permissions.UserHasReimportPermission, + ) def get_queryset(self): return get_authorized_tests(Permissions.Import_Scan_Result) def perform_create(self, serializer): - test_id, test_title, scan_type, _, engagement_name, product_name, product_type_name, auto_create_context, deduplication_on_engagement, do_not_reactivate = serializers.get_import_meta_data_from_dict(serializer.validated_data) + ( + test_id, + test_title, + scan_type, + _, + engagement_name, + product_name, + product_type_name, + auto_create_context, + deduplication_on_engagement, + do_not_reactivate, + ) = serializers.get_import_meta_data_from_dict( + serializer.validated_data + ) product = get_target_product_if_exists(product_name) - engagement = get_target_engagement_if_exists(None, engagement_name, product) - test = get_target_test_if_exists(test_id, test_title, scan_type, engagement) - - # when using auto_create_context, the engagement or product may not have been created yet - jira_driver = test if test else engagement if engagement else product if product else None - jira_project = jira_helper.get_jira_project(jira_driver) if jira_driver else None - - push_to_jira = serializer.validated_data.get('push_to_jira') - if get_system_setting('enable_jira') and jira_project: + engagement = get_target_engagement_if_exists( + None, engagement_name, product + ) + test = get_target_test_if_exists( + test_id, test_title, scan_type, engagement + ) + + # when using auto_create_context, the engagement or product may not + # have been created yet + jira_driver = ( + test + if test + else engagement + if engagement + else product + if product + else None + ) + jira_project = ( + jira_helper.get_jira_project(jira_driver) if jira_driver else None + ) + + push_to_jira = serializer.validated_data.get("push_to_jira") + if get_system_setting("enable_jira") and jira_project: push_to_jira = push_to_jira or jira_project.push_all_issues - logger.debug('push_to_jira: %s', serializer.validated_data.get('push_to_jira')) + logger.debug( + "push_to_jira: %s", serializer.validated_data.get("push_to_jira") + ) serializer.save(push_to_jira=push_to_jira) # Authorization: configuration -class NoteTypeViewSet(mixins.ListModelMixin, - mixins.RetrieveModelMixin, - mixins.DestroyModelMixin, - mixins.CreateModelMixin, - mixins.UpdateModelMixin, - viewsets.GenericViewSet, - dojo_mixins.DeletePreviewModelMixin): +class NoteTypeViewSet( + mixins.ListModelMixin, + mixins.RetrieveModelMixin, + mixins.DestroyModelMixin, + mixins.CreateModelMixin, + mixins.UpdateModelMixin, + viewsets.GenericViewSet, + dojo_mixins.DeletePreviewModelMixin, +): serializer_class = serializers.NoteTypeSerializer queryset = Note_Type.objects.all() filter_backends = (DjangoFilterBackend,) - filterset_fields = ['id', 'name', 'description', 'is_single', 'is_active', 'is_mandatory'] - permission_classes = (permissions.UserHasConfigurationPermissionSuperuser, ) + filterset_fields = [ + "id", + "name", + "description", + "is_single", + "is_active", + "is_mandatory", + ] + permission_classes = (permissions.UserHasConfigurationPermissionSuperuser,) # Authorization: superuser -class NotesViewSet(mixins.ListModelMixin, - mixins.RetrieveModelMixin, - mixins.UpdateModelMixin, - viewsets.GenericViewSet): +class NotesViewSet( + mixins.ListModelMixin, + mixins.RetrieveModelMixin, + mixins.UpdateModelMixin, + viewsets.GenericViewSet, +): serializer_class = serializers.NoteSerializer queryset = Notes.objects.all() filter_backends = (DjangoFilterBackend,) - filterset_fields = ['id', 'entry', 'author', 'private', 'date', 'edited', 'edit_time', 'editor'] + filterset_fields = [ + "id", + "entry", + "author", + "private", + "date", + "edited", + "edit_time", + "editor", + ] permission_classes = (permissions.IsSuperUser, DjangoModelPermissions) @@ -2482,15 +3526,7 @@ def report_generate(request, obj, options): test = None endpoint = None endpoints = None - endpoint_all_findings = None endpoint_monthly_counts = None - endpoint_active_findings = None - accepted_findings = None - open_findings = None - closed_findings = None - verified_findings = None - report_title = None - report_subtitle = None include_finding_notes = False include_finding_images = False @@ -2498,34 +3534,46 @@ def report_generate(request, obj, options): include_table_of_contents = False report_info = "Generated By %s on %s" % ( - user.get_full_name(), (timezone.now().strftime("%m/%d/%Y %I:%M%p %Z"))) + user.get_full_name(), + (timezone.now().strftime("%m/%d/%Y %I:%M%p %Z")), + ) # generate = "_generate" in request.GET report_name = str(obj) - report_type = type(obj).__name__ - include_finding_notes = options.get('include_finding_notes', False) - include_finding_images = options.get('include_finding_images', False) - include_executive_summary = options.get('include_executive_summary', False) - include_table_of_contents = options.get('include_table_of_contents', False) + include_finding_notes = options.get("include_finding_notes", False) + include_finding_images = options.get("include_finding_images", False) + include_executive_summary = options.get("include_executive_summary", False) + include_table_of_contents = options.get("include_table_of_contents", False) if type(obj).__name__ == "Product_Type": product_type = obj report_name = "Product Type Report: " + str(product_type) - report_title = "Product Type Report" - report_subtitle = str(product_type) - - findings = ReportFindingFilter(request.GET, prod_type=product_type, queryset=prefetch_related_findings_for_report(Finding.objects.filter( - test__engagement__product__prod_type=product_type))) - products = Product.objects.filter(prod_type=product_type, - engagement__test__finding__in=findings.qs).distinct() - engagements = Engagement.objects.filter(product__prod_type=product_type, - test__finding__in=findings.qs).distinct() - tests = Test.objects.filter(engagement__product__prod_type=product_type, - finding__in=findings.qs).distinct() + + findings = ReportFindingFilter( + request.GET, + prod_type=product_type, + queryset=prefetch_related_findings_for_report( + Finding.objects.filter( + test__engagement__product__prod_type=product_type + ) + ), + ) + products = Product.objects.filter( + prod_type=product_type, engagement__test__finding__in=findings.qs + ).distinct() + engagements = Engagement.objects.filter( + product__prod_type=product_type, test__finding__in=findings.qs + ).distinct() + tests = Test.objects.filter( + engagement__product__prod_type=product_type, + finding__in=findings.qs, + ).distinct() if len(findings.qs) > 0: - start_date = timezone.make_aware(datetime.combine(findings.qs.last().date, datetime.min.time())) + start_date = timezone.make_aware( + datetime.combine(findings.qs.last().date, datetime.min.time()) + ) else: start_date = timezone.now() @@ -2536,116 +3584,125 @@ def report_generate(request, obj, options): # include current month months_between += 1 - endpoint_monthly_counts = get_period_counts_legacy(findings.qs.order_by('numerical_severity'), findings.qs.order_by('numerical_severity'), None, - months_between, start_date, - relative_delta='months') + endpoint_monthly_counts = get_period_counts_legacy( + findings.qs.order_by("numerical_severity"), + findings.qs.order_by("numerical_severity"), + None, + months_between, + start_date, + relative_delta="months", + ) elif type(obj).__name__ == "Product": product = obj report_name = "Product Report: " + str(product) - report_title = "Product Report" - report_subtitle = str(product) - findings = ReportFindingFilter(request.GET, product=product, queryset=prefetch_related_findings_for_report(Finding.objects.filter( - test__engagement__product=product))) + + findings = ReportFindingFilter( + request.GET, + product=product, + queryset=prefetch_related_findings_for_report( + Finding.objects.filter(test__engagement__product=product) + ), + ) ids = set(finding.id for finding in findings.qs) - engagements = Engagement.objects.filter(test__finding__id__in=ids).distinct() + engagements = Engagement.objects.filter( + test__finding__id__in=ids + ).distinct() tests = Test.objects.filter(finding__id__in=ids).distinct() - ids = get_endpoint_ids(Endpoint.objects.filter(product=product).distinct()) + ids = get_endpoint_ids( + Endpoint.objects.filter(product=product).distinct() + ) endpoints = Endpoint.objects.filter(id__in=ids) elif type(obj).__name__ == "Engagement": engagement = obj - findings = ReportFindingFilter(request.GET, engagement=engagement, - queryset=prefetch_related_findings_for_report(Finding.objects.filter(test__engagement=engagement))) + findings = ReportFindingFilter( + request.GET, + engagement=engagement, + queryset=prefetch_related_findings_for_report( + Finding.objects.filter(test__engagement=engagement) + ), + ) report_name = "Engagement Report: " + str(engagement) - report_title = "Engagement Report" - report_subtitle = str(engagement) - ids = set(finding.id for finding in findings.qs) tests = Test.objects.filter(finding__id__in=ids).distinct() - ids = get_endpoint_ids(Endpoint.objects.filter(product=engagement.product).distinct()) + ids = get_endpoint_ids( + Endpoint.objects.filter(product=engagement.product).distinct() + ) endpoints = Endpoint.objects.filter(id__in=ids) elif type(obj).__name__ == "Test": test = obj - findings = ReportFindingFilter(request.GET, engagement=test.engagement, - queryset=prefetch_related_findings_for_report(Finding.objects.filter(test=test))) - filename = "test_finding_report.pdf" - template = "dojo/test_pdf_report.html" + findings = ReportFindingFilter( + request.GET, + engagement=test.engagement, + queryset=prefetch_related_findings_for_report( + Finding.objects.filter(test=test) + ), + ) report_name = "Test Report: " + str(test) - report_title = "Test Report" - report_subtitle = str(test) elif type(obj).__name__ == "Endpoint": endpoint = obj host = endpoint.host report_name = "Endpoint Report: " + host - report_type = "Endpoint" - endpoints = Endpoint.objects.filter(host=host, - product=endpoint.product).distinct() - report_title = "Endpoint Report" - report_subtitle = host - findings = ReportFindingFilter(request.GET, - queryset=prefetch_related_findings_for_report(Finding.objects.filter(endpoints__in=endpoints))) + endpoints = Endpoint.objects.filter( + host=host, product=endpoint.product + ).distinct() + findings = ReportFindingFilter( + request.GET, + queryset=prefetch_related_findings_for_report( + Finding.objects.filter(endpoints__in=endpoints) + ), + ) elif type(obj).__name__ == "CastTaggedQuerySet": - findings = ReportFindingFilter(request.GET, - queryset=prefetch_related_findings_for_report(obj).distinct()) + findings = ReportFindingFilter( + request.GET, + queryset=prefetch_related_findings_for_report(obj).distinct(), + ) - report_name = 'Finding' - report_type = 'Finding' - report_title = "Finding Report" - report_subtitle = '' + report_name = "Finding" else: raise Http404() result = { - 'product_type': product_type, - 'product': product, - 'engagement': engagement, - 'report_name': report_name, - 'report_info': report_info, - 'test': test, - 'endpoint': endpoint, - 'endpoints': endpoints, - 'findings': findings.qs.order_by('numerical_severity'), - 'include_table_of_contents': include_table_of_contents, - 'user': user, - 'team_name': settings.TEAM_NAME, - 'title': 'Generate Report', - 'user_id': request.user.id, - 'host': report_url_resolver(request), + "product_type": product_type, + "product": product, + "engagement": engagement, + "report_name": report_name, + "report_info": report_info, + "test": test, + "endpoint": endpoint, + "endpoints": endpoints, + "findings": findings.qs.order_by("numerical_severity"), + "include_table_of_contents": include_table_of_contents, + "user": user, + "team_name": settings.TEAM_NAME, + "title": "Generate Report", + "user_id": request.user.id, + "host": report_url_resolver(request), } finding_notes = [] finding_files = [] if include_finding_images: - for finding in findings.qs.order_by('numerical_severity'): + for finding in findings.qs.order_by("numerical_severity"): files = finding.files.all() if files: - finding_files.append( - { - "finding_id": finding, - "files": files - } - ) - result['finding_files'] = finding_files + finding_files.append({"finding_id": finding, "files": files}) + result["finding_files"] = finding_files if include_finding_notes: - for finding in findings.qs.order_by('numerical_severity'): + for finding in findings.qs.order_by("numerical_severity"): notes = finding.notes.filter(private=False) if notes: - finding_notes.append( - { - "finding_id": finding, - "notes": notes - } - ) - result['finding_notes'] = finding_notes + finding_notes.append({"finding_id": finding, "notes": notes}) + result["finding_notes"] = finding_notes # Generating Executive summary based on obj type if include_executive_summary and type(obj).__name__ != "Endpoint": @@ -2754,28 +3811,29 @@ def report_generate(request, obj, options): pass # do nothing executive_summary = { - 'engagement_name': engagement_name, - 'engagement_target_start': engagement_target_start, - 'engagement_target_end': engagement_target_end, - 'test_type_name': test_type_name, - 'test_target_start': test_target_start, - 'test_target_end': test_target_end, - 'test_environment_name': test_environment_name, - 'test_strategy_ref': test_strategy_ref, - 'total_findings': total_findings + "engagement_name": engagement_name, + "engagement_target_start": engagement_target_start, + "engagement_target_end": engagement_target_end, + "test_type_name": test_type_name, + "test_target_start": test_target_start, + "test_target_end": test_target_end, + "test_environment_name": test_environment_name, + "test_strategy_ref": test_strategy_ref, + "total_findings": total_findings, } # End of executive summary generation - result['executive_summary'] = executive_summary + result["executive_summary"] = executive_summary return result # Authorization: superuser -class SystemSettingsViewSet(mixins.ListModelMixin, - mixins.UpdateModelMixin, - viewsets.GenericViewSet): - """ Basic control over System Settings. Use 'id' 1 for PUT, PATCH operations """ +class SystemSettingsViewSet( + mixins.ListModelMixin, mixins.UpdateModelMixin, viewsets.GenericViewSet +): + """Basic control over System Settings. Use 'id' 1 for PUT, PATCH operations""" + permission_classes = (permissions.IsSuperUser, DjangoModelPermissions) serializer_class = serializers.SystemSettingsSerializer queryset = System_Settings.objects.all() @@ -2783,156 +3841,221 @@ class SystemSettingsViewSet(mixins.ListModelMixin, # Authorization: superuser @extend_schema_view( - list=extend_schema(parameters=[ - OpenApiParameter("prefetch", OpenApiTypes.STR, OpenApiParameter.QUERY, required=False, - description="List of fields for which to prefetch model instances and add those to the response"), - ], + list=extend_schema( + parameters=[ + OpenApiParameter( + "prefetch", + OpenApiTypes.STR, + OpenApiParameter.QUERY, + required=False, + description="List of fields for which to prefetch model instances and add those to the response", + ), + ], + ), + retrieve=extend_schema( + parameters=[ + OpenApiParameter( + "prefetch", + OpenApiTypes.STR, + OpenApiParameter.QUERY, + required=False, + description="List of fields for which to prefetch model instances and add those to the response", + ), + ], ), - retrieve=extend_schema(parameters=[ - OpenApiParameter("prefetch", OpenApiTypes.STR, OpenApiParameter.QUERY, required=False, - description="List of fields for which to prefetch model instances and add those to the response"), - ], - ) ) -class NotificationsViewSet(prefetch.PrefetchListMixin, - prefetch.PrefetchRetrieveMixin, - mixins.ListModelMixin, - mixins.RetrieveModelMixin, - mixins.DestroyModelMixin, - mixins.CreateModelMixin, - mixins.UpdateModelMixin, - viewsets.GenericViewSet, - dojo_mixins.DeletePreviewModelMixin): +class NotificationsViewSet( + prefetch.PrefetchListMixin, + prefetch.PrefetchRetrieveMixin, + mixins.ListModelMixin, + mixins.RetrieveModelMixin, + mixins.DestroyModelMixin, + mixins.CreateModelMixin, + mixins.UpdateModelMixin, + viewsets.GenericViewSet, + dojo_mixins.DeletePreviewModelMixin, +): serializer_class = serializers.NotificationsSerializer queryset = Notifications.objects.all() filter_backends = (DjangoFilterBackend,) - filterset_fields = ['id', 'user', 'product', 'template'] + filterset_fields = ["id", "user", "product", "template"] permission_classes = (permissions.IsSuperUser, DjangoModelPermissions) - swagger_schema = prefetch.get_prefetch_schema(["notifications_list", "notifications_read"], - serializers.NotificationsSerializer).to_schema() - - -class EngagementPresetsViewset(prefetch.PrefetchListMixin, - prefetch.PrefetchRetrieveMixin, - mixins.ListModelMixin, - mixins.RetrieveModelMixin, - mixins.UpdateModelMixin, - mixins.DestroyModelMixin, - mixins.CreateModelMixin, - viewsets.GenericViewSet, - dojo_mixins.DeletePreviewModelMixin): + swagger_schema = prefetch.get_prefetch_schema( + ["notifications_list", "notifications_read"], + serializers.NotificationsSerializer, + ).to_schema() + + +class EngagementPresetsViewset( + prefetch.PrefetchListMixin, + prefetch.PrefetchRetrieveMixin, + mixins.ListModelMixin, + mixins.RetrieveModelMixin, + mixins.UpdateModelMixin, + mixins.DestroyModelMixin, + mixins.CreateModelMixin, + viewsets.GenericViewSet, + dojo_mixins.DeletePreviewModelMixin, +): serializer_class = serializers.EngagementPresetsSerializer queryset = Engagement_Presets.objects.none() filter_backends = (DjangoFilterBackend,) - filterset_fields = ['id', 'title', 'product'] - swagger_schema = prefetch.get_prefetch_schema(["engagement_presets_list", "engagement_presets_read"], serializers.EngagementPresetsSerializer).to_schema() - permission_classes = (IsAuthenticated, permissions.UserHasEngagementPresetPermission) + filterset_fields = ["id", "title", "product"] + swagger_schema = prefetch.get_prefetch_schema( + ["engagement_presets_list", "engagement_presets_read"], + serializers.EngagementPresetsSerializer, + ).to_schema() + permission_classes = ( + IsAuthenticated, + permissions.UserHasEngagementPresetPermission, + ) def get_queryset(self): return get_authorized_engagement_presets(Permissions.Product_View) -class EngagementCheckListViewset(prefetch.PrefetchListMixin, - prefetch.PrefetchRetrieveMixin, - mixins.ListModelMixin, - mixins.RetrieveModelMixin, - mixins.UpdateModelMixin, - mixins.DestroyModelMixin, - mixins.CreateModelMixin, - viewsets.GenericViewSet, - dojo_mixins.DeletePreviewModelMixin): +class EngagementCheckListViewset( + prefetch.PrefetchListMixin, + prefetch.PrefetchRetrieveMixin, + mixins.ListModelMixin, + mixins.RetrieveModelMixin, + mixins.UpdateModelMixin, + mixins.DestroyModelMixin, + mixins.CreateModelMixin, + viewsets.GenericViewSet, + dojo_mixins.DeletePreviewModelMixin, +): serializer_class = serializers.EngagementCheckListSerializer queryset = Check_List.objects.none() filter_backends = (DjangoFilterBackend,) - swagger_schema = prefetch.get_prefetch_schema(["engagement_checklists_list", "engagement_checklists_read"], serializers.EngagementCheckListSerializer).to_schema() - permission_classes = (IsAuthenticated, permissions.UserHasEngagementPermission) + swagger_schema = prefetch.get_prefetch_schema( + ["engagement_checklists_list", "engagement_checklists_read"], + serializers.EngagementCheckListSerializer, + ).to_schema() + permission_classes = ( + IsAuthenticated, + permissions.UserHasEngagementPermission, + ) def get_queryset(self): return get_authorized_engagement_checklists(Permissions.Product_View) -class NetworkLocationsViewset(mixins.ListModelMixin, - mixins.RetrieveModelMixin, - mixins.UpdateModelMixin, - mixins.DestroyModelMixin, - mixins.CreateModelMixin, - viewsets.GenericViewSet, - dojo_mixins.DeletePreviewModelMixin): +class NetworkLocationsViewset( + mixins.ListModelMixin, + mixins.RetrieveModelMixin, + mixins.UpdateModelMixin, + mixins.DestroyModelMixin, + mixins.CreateModelMixin, + viewsets.GenericViewSet, + dojo_mixins.DeletePreviewModelMixin, +): serializer_class = serializers.NetworkLocationsSerializer queryset = Network_Locations.objects.all() filter_backends = (DjangoFilterBackend,) - filterset_fields = ['id', 'location'] + filterset_fields = ["id", "location"] permission_classes = (IsAuthenticated, DjangoModelPermissions) # Authorization: superuser -class ConfigurationPermissionViewSet(mixins.RetrieveModelMixin, - mixins.ListModelMixin, - viewsets.GenericViewSet): +class ConfigurationPermissionViewSet( + mixins.RetrieveModelMixin, mixins.ListModelMixin, viewsets.GenericViewSet +): serializer_class = serializers.ConfigurationPermissionSerializer - queryset = Permission.objects.filter(codename__in=get_configuration_permissions_codenames()) + queryset = Permission.objects.filter( + codename__in=get_configuration_permissions_codenames() + ) filter_backends = (DjangoFilterBackend,) - filterset_fields = ['id', 'name', 'codename'] + filterset_fields = ["id", "name", "codename"] permission_classes = (permissions.IsSuperUser, DjangoModelPermissions) -class SLAConfigurationViewset(mixins.ListModelMixin, - mixins.RetrieveModelMixin, - mixins.UpdateModelMixin, - mixins.DestroyModelMixin, - mixins.CreateModelMixin, - viewsets.GenericViewSet): +class SLAConfigurationViewset( + mixins.ListModelMixin, + mixins.RetrieveModelMixin, + mixins.UpdateModelMixin, + mixins.DestroyModelMixin, + mixins.CreateModelMixin, + viewsets.GenericViewSet, +): serializer_class = serializers.SLAConfigurationSerializer queryset = SLA_Configuration.objects.all() filter_backends = (DjangoFilterBackend,) permission_classes = (IsAuthenticated, DjangoModelPermissions) -class QuestionnaireQuestionViewSet(mixins.ListModelMixin, - mixins.RetrieveModelMixin, - viewsets.GenericViewSet, - dojo_mixins.QuestionSubClassFieldsMixin): +class QuestionnaireQuestionViewSet( + mixins.ListModelMixin, + mixins.RetrieveModelMixin, + viewsets.GenericViewSet, + dojo_mixins.QuestionSubClassFieldsMixin, +): serializer_class = serializers.QuestionnaireQuestionSerializer queryset = Question.objects.all() filter_backends = (DjangoFilterBackend,) - permission_classes = (permissions.UserHasEngagementPermission, DjangoModelPermissions) + permission_classes = ( + permissions.UserHasEngagementPermission, + DjangoModelPermissions, + ) -class QuestionnaireAnswerViewSet(mixins.ListModelMixin, - mixins.RetrieveModelMixin, - viewsets.GenericViewSet, - dojo_mixins.AnswerSubClassFieldsMixin): +class QuestionnaireAnswerViewSet( + mixins.ListModelMixin, + mixins.RetrieveModelMixin, + viewsets.GenericViewSet, + dojo_mixins.AnswerSubClassFieldsMixin, +): serializer_class = serializers.QuestionnaireAnswerSerializer queryset = Answer.objects.all() filter_backends = (DjangoFilterBackend,) - permission_classes = (permissions.UserHasEngagementPermission, DjangoModelPermissions) + permission_classes = ( + permissions.UserHasEngagementPermission, + DjangoModelPermissions, + ) -class QuestionnaireGeneralSurveyViewSet(mixins.ListModelMixin, - mixins.RetrieveModelMixin, - viewsets.GenericViewSet): +class QuestionnaireGeneralSurveyViewSet( + mixins.ListModelMixin, mixins.RetrieveModelMixin, viewsets.GenericViewSet +): serializer_class = serializers.QuestionnaireGeneralSurveySerializer queryset = General_Survey.objects.all() filter_backends = (DjangoFilterBackend,) - permission_classes = (permissions.UserHasEngagementPermission, DjangoModelPermissions) + permission_classes = ( + permissions.UserHasEngagementPermission, + DjangoModelPermissions, + ) -class QuestionnaireEngagementSurveyViewSet(mixins.ListModelMixin, - mixins.RetrieveModelMixin, - viewsets.GenericViewSet): +class QuestionnaireEngagementSurveyViewSet( + mixins.ListModelMixin, mixins.RetrieveModelMixin, viewsets.GenericViewSet +): serializer_class = serializers.QuestionnaireEngagementSurveySerializer queryset = Engagement_Survey.objects.all() filter_backends = (DjangoFilterBackend,) - permission_classes = (permissions.UserHasEngagementPermission, DjangoModelPermissions) + permission_classes = ( + permissions.UserHasEngagementPermission, + DjangoModelPermissions, + ) -class QuestionnaireAnsweredSurveyViewSet(prefetch.PrefetchListMixin, - prefetch.PrefetchRetrieveMixin, - mixins.ListModelMixin, - mixins.RetrieveModelMixin, - viewsets.GenericViewSet): +class QuestionnaireAnsweredSurveyViewSet( + prefetch.PrefetchListMixin, + prefetch.PrefetchRetrieveMixin, + mixins.ListModelMixin, + mixins.RetrieveModelMixin, + viewsets.GenericViewSet, +): serializer_class = serializers.QuestionnaireAnsweredSurveySerializer queryset = Answered_Survey.objects.all() filter_backends = (DjangoFilterBackend,) - permission_classes = (permissions.UserHasEngagementPermission, DjangoModelPermissions) - swagger_schema = prefetch.get_prefetch_schema(["questionnaire_answered_questionnaires_list", "questionnaire_answered_questionnaires_read"], serializers.QuestionnaireAnsweredSurveySerializer).to_schema() + permission_classes = ( + permissions.UserHasEngagementPermission, + DjangoModelPermissions, + ) + swagger_schema = prefetch.get_prefetch_schema( + [ + "questionnaire_answered_questionnaires_list", + "questionnaire_answered_questionnaires_read", + ], + serializers.QuestionnaireAnsweredSurveySerializer, + ).to_schema() diff --git a/dojo/authorization/authorization.py b/dojo/authorization/authorization.py index b59fa84c92..69f3884a4c 100644 --- a/dojo/authorization/authorization.py +++ b/dojo/authorization/authorization.py @@ -1,13 +1,34 @@ from django.core.exceptions import PermissionDenied from dojo.request_cache import cache_for_request -from dojo.authorization.roles_permissions import Permissions, Roles, get_global_roles_with_permissions, get_roles_with_permissions -from dojo.models import Product_Type, Product_Type_Member, Product, Product_Member, Engagement, \ - Test, Finding, Endpoint, Finding_Group, Product_Group, Product_Type_Group, Dojo_Group, Dojo_Group_Member, \ - Languages, App_Analysis, Stub_Finding, Product_API_Scan_Configuration, Cred_Mapping +from dojo.authorization.roles_permissions import ( + Permissions, + Roles, + get_global_roles_with_permissions, + get_roles_with_permissions, +) +from dojo.models import ( + Product_Type, + Product_Type_Member, + Product, + Product_Member, + Engagement, + Test, + Finding, + Endpoint, + Finding_Group, + Product_Group, + Product_Type_Group, + Dojo_Group, + Dojo_Group_Member, + Languages, + App_Analysis, + Stub_Finding, + Product_API_Scan_Configuration, + Cred_Mapping, +) def user_has_configuration_permission(user, permission): - if not user: return False @@ -25,92 +46,168 @@ def user_has_permission(user, obj, permission): return True if isinstance(obj, Product_Type) or isinstance(obj, Product): - # Global roles are only relevant for product types, products and their dependent objects + # Global roles are only relevant for product types, products and their + # dependent objects if user_has_global_permission(user, permission): return True if isinstance(obj, Product_Type): - # Check if the user has a role for the product type with the requested permissions + # Check if the user has a role for the product type with the requested + # permissions member = get_product_type_member(user, obj) - if member is not None and role_has_permission(member.role.id, permission): + if member is not None and role_has_permission( + member.role.id, permission + ): return True - # Check if the user is in a group with a role for the product type with the requested permissions + # Check if the user is in a group with a role for the product type with + # the requested permissions for product_type_group in get_product_type_groups(user, obj): if role_has_permission(product_type_group.role.id, permission): return True return False - elif (isinstance(obj, Product) and - permission.value >= Permissions.Product_View.value): + elif ( + isinstance(obj, Product) + and permission.value >= Permissions.Product_View.value + ): # Products inherit permissions of their product type if user_has_permission(user, obj.prod_type, permission): return True - # Check if the user has a role for the product with the requested permissions + # Check if the user has a role for the product with the requested + # permissions member = get_product_member(user, obj) - if member is not None and role_has_permission(member.role.id, permission): + if member is not None and role_has_permission( + member.role.id, permission + ): return True - # Check if the user is in a group with a role for the product with the requested permissions + # Check if the user is in a group with a role for the product with the + # requested permissions for product_group in get_product_groups(user, obj): if role_has_permission(product_group.role.id, permission): return True return False - elif isinstance(obj, Engagement) and permission in Permissions.get_engagement_permissions(): + elif ( + isinstance(obj, Engagement) + and permission in Permissions.get_engagement_permissions() + ): return user_has_permission(user, obj.product, permission) - elif isinstance(obj, Test) and permission in Permissions.get_test_permissions(): + elif ( + isinstance(obj, Test) + and permission in Permissions.get_test_permissions() + ): return user_has_permission(user, obj.engagement.product, permission) - elif (isinstance(obj, Finding) or isinstance(obj, Stub_Finding)) and permission in Permissions.get_finding_permissions(): - return user_has_permission(user, obj.test.engagement.product, permission) - elif isinstance(obj, Finding_Group) and permission in Permissions.get_finding_group_permissions(): - return user_has_permission(user, obj.test.engagement.product, permission) - elif isinstance(obj, Endpoint) and permission in Permissions.get_endpoint_permissions(): + elif ( + isinstance(obj, Finding) or isinstance(obj, Stub_Finding) + ) and permission in Permissions.get_finding_permissions(): + return user_has_permission( + user, obj.test.engagement.product, permission + ) + elif ( + isinstance(obj, Finding_Group) + and permission in Permissions.get_finding_group_permissions() + ): + return user_has_permission( + user, obj.test.engagement.product, permission + ) + elif ( + isinstance(obj, Endpoint) + and permission in Permissions.get_endpoint_permissions() + ): return user_has_permission(user, obj.product, permission) - elif isinstance(obj, Languages) and permission in Permissions.get_language_permissions(): + elif ( + isinstance(obj, Languages) + and permission in Permissions.get_language_permissions() + ): return user_has_permission(user, obj.product, permission) - elif isinstance(obj, App_Analysis) and permission in Permissions.get_technology_permissions(): + elif ( + isinstance(obj, App_Analysis) + and permission in Permissions.get_technology_permissions() + ): return user_has_permission(user, obj.product, permission) - elif isinstance(obj, Product_API_Scan_Configuration) and permission in Permissions.get_product_api_scan_configuration_permissions(): + elif ( + isinstance(obj, Product_API_Scan_Configuration) + and permission + in Permissions.get_product_api_scan_configuration_permissions() + ): return user_has_permission(user, obj.product, permission) - elif isinstance(obj, Product_Type_Member) and permission in Permissions.get_product_type_member_permissions(): + elif ( + isinstance(obj, Product_Type_Member) + and permission in Permissions.get_product_type_member_permissions() + ): if permission == Permissions.Product_Type_Member_Delete: # Every member is allowed to remove himself - return obj.user == user or user_has_permission(user, obj.product_type, permission) + return obj.user == user or user_has_permission( + user, obj.product_type, permission + ) else: return user_has_permission(user, obj.product_type, permission) - elif isinstance(obj, Product_Member) and permission in Permissions.get_product_member_permissions(): + elif ( + isinstance(obj, Product_Member) + and permission in Permissions.get_product_member_permissions() + ): if permission == Permissions.Product_Member_Delete: # Every member is allowed to remove himself - return obj.user == user or user_has_permission(user, obj.product, permission) + return obj.user == user or user_has_permission( + user, obj.product, permission + ) else: return user_has_permission(user, obj.product, permission) - elif isinstance(obj, Product_Type_Group) and permission in Permissions.get_product_type_group_permissions(): + elif ( + isinstance(obj, Product_Type_Group) + and permission in Permissions.get_product_type_group_permissions() + ): return user_has_permission(user, obj.product_type, permission) - elif isinstance(obj, Product_Group) and permission in Permissions.get_product_group_permissions(): + elif ( + isinstance(obj, Product_Group) + and permission in Permissions.get_product_group_permissions() + ): return user_has_permission(user, obj.product, permission) - elif isinstance(obj, Dojo_Group) and permission in Permissions.get_group_permissions(): - # Check if the user has a role for the group with the requested permissions + elif ( + isinstance(obj, Dojo_Group) + and permission in Permissions.get_group_permissions() + ): + # Check if the user has a role for the group with the requested + # permissions group_member = get_group_member(user, obj) - return group_member is not None and role_has_permission(group_member.role.id, permission) - elif isinstance(obj, Dojo_Group_Member) and permission in Permissions.get_group_member_permissions(): + return group_member is not None and role_has_permission( + group_member.role.id, permission + ) + elif ( + isinstance(obj, Dojo_Group_Member) + and permission in Permissions.get_group_member_permissions() + ): if permission == Permissions.Group_Member_Delete: # Every user is allowed to remove himself - return obj.user == user or user_has_permission(user, obj.group, permission) + return obj.user == user or user_has_permission( + user, obj.group, permission + ) else: return user_has_permission(user, obj.group, permission) - elif isinstance(obj, Cred_Mapping) and permission in Permissions.get_credential_permissions(): + elif ( + isinstance(obj, Cred_Mapping) + and permission in Permissions.get_credential_permissions() + ): if obj.product: return user_has_permission(user, obj.product, permission) if obj.engagement: - return user_has_permission(user, obj.engagement.product, permission) + return user_has_permission( + user, obj.engagement.product, permission + ) if obj.test: - return user_has_permission(user, obj.test.engagement.product, permission) + return user_has_permission( + user, obj.test.engagement.product, permission + ) if obj.finding: - return user_has_permission(user, obj.finding.test.engagement.product, permission) + return user_has_permission( + user, obj.finding.test.engagement.product, permission + ) else: - raise NoAuthorizationImplementedError(f'No authorization implemented for class {type(obj).__name__} and permission {permission}') + raise NoAuthorizationImplementedError( + f"No authorization implemented for class {type(obj).__name__} and permission {permission}" + ) def user_has_global_permission(user, permission): - if not user: return False @@ -121,14 +218,24 @@ def user_has_global_permission(user, permission): return True if permission == Permissions.Product_Type_Add: - if user_has_configuration_permission(user, 'dojo.add_product_type'): + if user_has_configuration_permission(user, "dojo.add_product_type"): return True - if hasattr(user, 'global_role') and user.global_role.role is not None and role_has_global_permission(user.global_role.role.id, permission): + if ( + hasattr(user, "global_role") + and user.global_role.role is not None + and role_has_global_permission(user.global_role.role.id, permission) + ): return True for group in get_groups(user): - if hasattr(group, 'global_role') and group.global_role.role is not None and role_has_global_permission(group.global_role.role.id, permission): + if ( + hasattr(group, "global_role") + and group.global_role.role is not None + and role_has_global_permission( + group.global_role.role.id, permission + ) + ): return True return False @@ -151,7 +258,9 @@ def user_has_global_permission_or_403(user, permission): def get_roles_for_permission(permission): if not Permissions.has_value(permission): - raise PermissionDoesNotExistError('Permission {} does not exist'.format(permission)) + raise PermissionDoesNotExistError( + "Permission {} does not exist".format(permission) + ) roles_for_permissions = set() roles = get_roles_with_permissions() for role in roles: @@ -165,7 +274,7 @@ def role_has_permission(role, permission): if role is None: return False if not Roles.has_value(role): - raise RoleDoesNotExistError('Role {} does not exist'.format(role)) + raise RoleDoesNotExistError("Role {} does not exist".format(role)) roles = get_roles_with_permissions() permissions = roles.get(role) if not permissions: @@ -177,7 +286,7 @@ def role_has_global_permission(role, permission): if role is None: return False if not Roles.has_value(role): - raise RoleDoesNotExistError('Role {} does not exist'.format(role)) + raise RoleDoesNotExistError("Role {} does not exist".format(role)) roles = get_global_roles_with_permissions() permissions = roles.get(role) if permissions and permission in permissions: @@ -207,7 +316,11 @@ def get_product_member(user, product): @cache_for_request def get_product_member_dict(user): pm_dict = {} - for product_member in Product_Member.objects.select_related('product').select_related('role').filter(user=user): + for product_member in ( + Product_Member.objects.select_related("product") + .select_related("role") + .filter(user=user) + ): pm_dict[product_member.product.id] = product_member return pm_dict @@ -219,7 +332,11 @@ def get_product_type_member(user, product_type): @cache_for_request def get_product_type_member_dict(user): ptm_dict = {} - for product_type_member in Product_Type_Member.objects.select_related('product_type').select_related('role').filter(user=user): + for product_type_member in ( + Product_Type_Member.objects.select_related("product_type") + .select_related("role") + .filter(user=user) + ): ptm_dict[product_type_member.product_type.id] = product_type_member return ptm_dict @@ -231,7 +348,11 @@ def get_product_groups(user, product): @cache_for_request def get_product_groups_dict(user): pg_dict = {} - for product_group in Product_Group.objects.select_related('product').select_related('role').filter(group__users=user): + for product_group in ( + Product_Group.objects.select_related("product") + .select_related("role") + .filter(group__users=user) + ): if pg_dict.get(product_group.product.id) is None: pgu_list = [] else: @@ -248,7 +369,11 @@ def get_product_type_groups(user, product_type): @cache_for_request def get_product_type_groups_dict(user): pgt_dict = {} - for product_type_group in Product_Type_Group.objects.select_related('product_type').select_related('role').filter(group__users=user): + for product_type_group in ( + Product_Type_Group.objects.select_related("product_type") + .select_related("role") + .filter(group__users=user) + ): if pgt_dict.get(product_type_group.product_type.id) is None: pgtu_list = [] else: @@ -260,7 +385,7 @@ def get_product_type_groups_dict(user): @cache_for_request def get_groups(user): - return Dojo_Group.objects.select_related('global_role').filter(users=user) + return Dojo_Group.objects.select_related("global_role").filter(users=user) def get_group_member(user, group): @@ -270,6 +395,10 @@ def get_group_member(user, group): @cache_for_request def get_group_members_dict(user): gu_dict = {} - for group_member in Dojo_Group_Member.objects.select_related('group').select_related('role').filter(user=user): + for group_member in ( + Dojo_Group_Member.objects.select_related("group") + .select_related("role") + .filter(user=user) + ): gu_dict[group_member.group.id] = group_member return gu_dict diff --git a/dojo/authorization/authorization_decorators.py b/dojo/authorization/authorization_decorators.py index 70e5a225a4..ec2c26aefd 100644 --- a/dojo/authorization/authorization_decorators.py +++ b/dojo/authorization/authorization_decorators.py @@ -1,15 +1,20 @@ import functools from django.core.exceptions import PermissionDenied from django.shortcuts import get_object_or_404 -from dojo.authorization.authorization import user_has_global_permission_or_403, user_has_permission_or_403, user_has_configuration_permission +from dojo.authorization.authorization import ( + user_has_global_permission_or_403, + user_has_permission_or_403, + user_has_configuration_permission, +) def user_is_authorized(model, permission, arg, lookup="pk", func=None): - """Decorator for functions that ensures the user has permission on an object. - """ + """Decorator for functions that ensures the user has permission on an object.""" if func is None: - return functools.partial(user_is_authorized, model, permission, arg, lookup) + return functools.partial( + user_is_authorized, model, permission, arg, lookup + ) @functools.wraps(func) def _wrapped(request, *args, **kwargs): @@ -33,8 +38,7 @@ def _wrapped(request, *args, **kwargs): def user_has_global_permission(permission, func=None): - """Decorator for functions that ensures the user has a (global) permission - """ + """Decorator for functions that ensures the user has a (global) permission""" if func is None: return functools.partial(user_has_global_permission, permission) @@ -57,7 +61,6 @@ def user_is_configuration_authorized(permission, func=None): @functools.wraps(func) def _wrapped(request, *args, **kwargs): - if not user_has_configuration_permission(request.user, permission): raise PermissionDenied return func(request, *args, **kwargs) diff --git a/dojo/authorization/roles_permissions.py b/dojo/authorization/roles_permissions.py index 5e8fca4872..779463258f 100644 --- a/dojo/authorization/roles_permissions.py +++ b/dojo/authorization/roles_permissions.py @@ -19,7 +19,8 @@ def has_value(cls, value): def django_enum(cls): # decorator needed to enable enums in django templates - # see https://stackoverflow.com/questions/35953132/how-to-access-enum-types-in-django-templates + # see + # https://stackoverflow.com/questions/35953132/how-to-access-enum-types-in-django-templates cls.do_not_call_in_templates = True return cls @@ -144,8 +145,8 @@ def get_engagement_permissions(cls): Permissions.Note_Add, Permissions.Note_Delete, Permissions.Note_Edit, - Permissions.Note_View_History} \ - .union(cls.get_test_permissions()) + Permissions.Note_View_History, + }.union(cls.get_test_permissions()) @classmethod def get_test_permissions(cls): @@ -158,8 +159,8 @@ def get_test_permissions(cls): Permissions.Note_Add, Permissions.Note_Delete, Permissions.Note_Edit, - Permissions.Note_View_History} \ - .union(cls.get_finding_permissions()) + Permissions.Note_View_History, + }.union(cls.get_finding_permissions()) @classmethod def get_finding_permissions(cls): @@ -173,50 +174,56 @@ def get_finding_permissions(cls): Permissions.Risk_Acceptance, Permissions.Note_Delete, Permissions.Note_Edit, - Permissions.Note_View_History} \ - .union(cls.get_finding_group_permissions()) + Permissions.Note_View_History, + }.union(cls.get_finding_group_permissions()) @classmethod def get_finding_group_permissions(cls): return { Permissions.Finding_Group_View, Permissions.Finding_Group_Edit, - Permissions.Finding_Group_Delete} + Permissions.Finding_Group_Delete, + } @classmethod def get_endpoint_permissions(cls): return { Permissions.Endpoint_View, Permissions.Endpoint_Edit, - Permissions.Endpoint_Delete} + Permissions.Endpoint_Delete, + } @classmethod def get_product_member_permissions(cls): return { Permissions.Product_View, Permissions.Product_Manage_Members, - Permissions.Product_Member_Delete} + Permissions.Product_Member_Delete, + } @classmethod def get_product_type_member_permissions(cls): return { Permissions.Product_Type_View, Permissions.Product_Type_Manage_Members, - Permissions.Product_Type_Member_Delete} + Permissions.Product_Type_Member_Delete, + } @classmethod def get_product_group_permissions(cls): return { Permissions.Product_Group_View, Permissions.Product_Group_Edit, - Permissions.Product_Group_Delete} + Permissions.Product_Group_Delete, + } @classmethod def get_product_type_group_permissions(cls): return { Permissions.Product_Type_Group_View, Permissions.Product_Type_Group_Edit, - Permissions.Product_Type_Group_Delete} + Permissions.Product_Type_Group_Delete, + } @classmethod def get_group_permissions(cls): @@ -226,35 +233,40 @@ def get_group_permissions(cls): Permissions.Group_Manage_Members, Permissions.Group_Add_Owner, Permissions.Group_Edit, - Permissions.Group_Delete} + Permissions.Group_Delete, + } @classmethod def get_group_member_permissions(cls): return { Permissions.Group_View, Permissions.Group_Manage_Members, - Permissions.Group_Member_Delete} + Permissions.Group_Member_Delete, + } @classmethod def get_language_permissions(cls): return { Permissions.Language_View, Permissions.Language_Edit, - Permissions.Language_Delete} + Permissions.Language_Delete, + } @classmethod def get_technology_permissions(cls): return { Permissions.Technology_View, Permissions.Technology_Edit, - Permissions.Technology_Delete} + Permissions.Technology_Delete, + } @classmethod def get_product_api_scan_configuration_permissions(cls): return { Permissions.Product_API_Scan_Configuration_View, Permissions.Product_API_Scan_Configuration_Edit, - Permissions.Product_API_Scan_Configuration_Delete} + Permissions.Product_API_Scan_Configuration_Delete, + } @classmethod def get_credential_permissions(cls): @@ -262,7 +274,8 @@ def get_credential_permissions(cls): Permissions.Credential_View, Permissions.Credential_Add, Permissions.Credential_Edit, - Permissions.Credential_Delete} + Permissions.Credential_Delete, + } def get_roles_with_permissions(): @@ -306,57 +319,42 @@ def get_roles_with_permissions(): }, Roles.Writer: { Permissions.Product_Type_View, - Permissions.Product_View, - Permissions.Engagement_View, Permissions.Engagement_Add, Permissions.Engagement_Edit, Permissions.Risk_Acceptance, - Permissions.Test_View, Permissions.Test_Add, Permissions.Test_Edit, - Permissions.Finding_View, Permissions.Finding_Add, Permissions.Import_Scan_Result, Permissions.Finding_Edit, - Permissions.Finding_Group_View, Permissions.Finding_Group_Add, Permissions.Finding_Group_Edit, Permissions.Finding_Group_Delete, - Permissions.Endpoint_View, Permissions.Endpoint_Add, Permissions.Endpoint_Edit, - Permissions.Benchmark_Edit, - Permissions.Component_View, - Permissions.Note_View_History, Permissions.Note_Edit, Permissions.Note_Add, - Permissions.Product_Group_View, Permissions.Product_Type_Group_View, Permissions.Group_View, - Permissions.Language_View, Permissions.Language_Add, Permissions.Language_Edit, Permissions.Language_Delete, - Permissions.Technology_View, Permissions.Technology_Add, Permissions.Technology_Edit, - Permissions.Product_API_Scan_Configuration_View, - Permissions.Product_Tracking_Files_View, - Permissions.Credential_View, Permissions.Credential_Add, Permissions.Credential_Edit, @@ -367,85 +365,68 @@ def get_roles_with_permissions(): Permissions.Product_Type_Member_Delete, Permissions.Product_Type_Manage_Members, Permissions.Product_Type_Edit, - Permissions.Product_View, Permissions.Product_Member_Delete, Permissions.Product_Manage_Members, Permissions.Product_Configure_Notifications, Permissions.Product_Edit, - Permissions.Engagement_View, Permissions.Engagement_Add, Permissions.Engagement_Edit, Permissions.Engagement_Delete, Permissions.Risk_Acceptance, - Permissions.Test_View, Permissions.Test_Add, Permissions.Test_Edit, Permissions.Test_Delete, - Permissions.Finding_View, Permissions.Finding_Add, Permissions.Import_Scan_Result, Permissions.Finding_Edit, Permissions.Finding_Delete, - Permissions.Finding_Group_View, Permissions.Finding_Group_Add, Permissions.Finding_Group_Edit, Permissions.Finding_Group_Delete, - Permissions.Endpoint_View, Permissions.Endpoint_Add, Permissions.Endpoint_Edit, Permissions.Endpoint_Delete, - Permissions.Benchmark_Edit, Permissions.Benchmark_Delete, - Permissions.Component_View, - Permissions.Note_View_History, Permissions.Note_Edit, Permissions.Note_Add, Permissions.Note_Delete, - Permissions.Product_Group_View, Permissions.Product_Group_Add, Permissions.Product_Group_Edit, Permissions.Product_Group_Delete, - Permissions.Product_Type_Group_View, Permissions.Product_Type_Group_Add, Permissions.Product_Type_Group_Edit, Permissions.Product_Type_Group_Delete, - Permissions.Group_View, Permissions.Group_Edit, Permissions.Group_Manage_Members, Permissions.Group_Member_Delete, - Permissions.Language_View, Permissions.Language_Add, Permissions.Language_Edit, Permissions.Language_Delete, - Permissions.Technology_View, Permissions.Technology_Add, Permissions.Technology_Edit, Permissions.Technology_Delete, - Permissions.Product_API_Scan_Configuration_View, Permissions.Product_API_Scan_Configuration_Add, Permissions.Product_API_Scan_Configuration_Edit, Permissions.Product_API_Scan_Configuration_Delete, - Permissions.Product_Tracking_Files_View, Permissions.Product_Tracking_Files_Add, Permissions.Product_Tracking_Files_Edit, Permissions.Product_Tracking_Files_Delete, - Permissions.Credential_View, Permissions.Credential_Add, Permissions.Credential_Edit, @@ -459,7 +440,6 @@ def get_roles_with_permissions(): Permissions.Product_Type_Member_Add_Owner, Permissions.Product_Type_Edit, Permissions.Product_Type_Delete, - Permissions.Product_View, Permissions.Product_Member_Delete, Permissions.Product_Manage_Members, @@ -467,88 +447,72 @@ def get_roles_with_permissions(): Permissions.Product_Configure_Notifications, Permissions.Product_Edit, Permissions.Product_Delete, - Permissions.Engagement_View, Permissions.Engagement_Add, Permissions.Engagement_Edit, Permissions.Engagement_Delete, Permissions.Risk_Acceptance, - Permissions.Test_View, Permissions.Test_Add, Permissions.Test_Edit, Permissions.Test_Delete, - Permissions.Finding_View, Permissions.Finding_Add, Permissions.Import_Scan_Result, Permissions.Finding_Edit, Permissions.Finding_Delete, - Permissions.Finding_Group_View, Permissions.Finding_Group_Add, Permissions.Finding_Group_Edit, Permissions.Finding_Group_Delete, - Permissions.Endpoint_View, Permissions.Endpoint_Add, Permissions.Endpoint_Edit, Permissions.Endpoint_Delete, - Permissions.Benchmark_Edit, Permissions.Benchmark_Delete, - Permissions.Component_View, - Permissions.Note_View_History, Permissions.Note_Edit, Permissions.Note_Add, Permissions.Note_Delete, - Permissions.Product_Group_View, Permissions.Product_Group_Add, Permissions.Product_Group_Add_Owner, Permissions.Product_Group_Edit, Permissions.Product_Group_Delete, - Permissions.Product_Type_Group_View, Permissions.Product_Type_Group_Add, Permissions.Product_Type_Group_Add_Owner, Permissions.Product_Type_Group_Edit, Permissions.Product_Type_Group_Delete, - Permissions.Group_View, Permissions.Group_Edit, Permissions.Group_Manage_Members, Permissions.Group_Member_Delete, Permissions.Group_Add_Owner, Permissions.Group_Delete, - Permissions.Language_View, Permissions.Language_Add, Permissions.Language_Edit, Permissions.Language_Delete, - Permissions.Technology_View, Permissions.Technology_Add, Permissions.Technology_Edit, Permissions.Technology_Delete, - Permissions.Product_API_Scan_Configuration_View, Permissions.Product_API_Scan_Configuration_Add, Permissions.Product_API_Scan_Configuration_Edit, Permissions.Product_API_Scan_Configuration_Delete, - Permissions.Product_Tracking_Files_View, Permissions.Product_Tracking_Files_Add, Permissions.Product_Tracking_Files_Edit, Permissions.Product_Tracking_Files_Delete, - Permissions.Credential_View, Permissions.Credential_Add, Permissions.Credential_Edit, Permissions.Credential_Delete, - } + }, } @@ -557,10 +521,6 @@ def get_global_roles_with_permissions(): Extra permissions for global roles, on top of the permissions granted to the "normal" roles above. """ return { - Roles.Maintainer: { - Permissions.Product_Type_Add - }, - Roles.Owner: { - Permissions.Product_Type_Add - } + Roles.Maintainer: {Permissions.Product_Type_Add}, + Roles.Owner: {Permissions.Product_Type_Add}, } diff --git a/dojo/banner/urls.py b/dojo/banner/urls.py index e39b26c801..4b99585db9 100644 --- a/dojo/banner/urls.py +++ b/dojo/banner/urls.py @@ -2,6 +2,7 @@ from dojo.banner import views urlpatterns = [ - re_path(r'^configure_banner$', views.configure_banner, - name='configure_banner'), + re_path( + r"^configure_banner$", views.configure_banner, name="configure_banner" + ), ] diff --git a/dojo/banner/views.py b/dojo/banner/views.py index 497b55b14f..bfd4616062 100644 --- a/dojo/banner/views.py +++ b/dojo/banner/views.py @@ -8,36 +8,43 @@ from dojo.forms import LoginBanner from dojo.models import BannerConf -from dojo.authorization.authorization_decorators import user_is_configuration_authorized +from dojo.authorization.authorization_decorators import ( + user_is_configuration_authorized, +) logger = logging.getLogger(__name__) -@user_is_configuration_authorized('dojo.change_bannerconf') +@user_is_configuration_authorized("dojo.change_bannerconf") def configure_banner(request): banner_config = get_object_or_404(BannerConf, id=1) - if request.method == 'POST': + if request.method == "POST": form = LoginBanner(request.POST) if form.is_valid(): - banner_config.banner_enable = form.cleaned_data['banner_enable'] - banner_config.banner_message = form.cleaned_data['banner_message'] + banner_config.banner_enable = form.cleaned_data["banner_enable"] + banner_config.banner_message = form.cleaned_data["banner_message"] banner_config.save() messages.add_message( request, messages.SUCCESS, - 'Banner updated successfully.', + "Banner updated successfully.", extra_tags="alert-success", ) return HttpResponseRedirect(reverse("configure_banner")) else: # List the banner configuration - form = LoginBanner(initial={ - 'banner_enable': banner_config.banner_enable, - 'banner_message': banner_config.banner_message - }) + form = LoginBanner( + initial={ + "banner_enable": banner_config.banner_enable, + "banner_message": banner_config.banner_message, + } + ) - add_breadcrumb(title="Banner Configuration", top_level=True, request=request) - return render(request, 'dojo/banner.html', { - 'form': form, - 'banner_message': banner_config.banner_message - }) + add_breadcrumb( + title="Banner Configuration", top_level=True, request=request + ) + return render( + request, + "dojo/banner.html", + {"form": form, "banner_message": banner_config.banner_message}, + ) diff --git a/dojo/benchmark/urls.py b/dojo/benchmark/urls.py index 2d137d6ca6..f30e3fa8c8 100644 --- a/dojo/benchmark/urls.py +++ b/dojo/benchmark/urls.py @@ -2,10 +2,34 @@ from . import views urlpatterns = [ - re_path(r'^benchmark/(?P\d+)/type/(?P\d+)$', views.benchmark_view, name='view_product_benchmark'), - re_path(r'^benchmark/(?P\d+)/type/(?P\d+)/category/(?P\d+)$', views.benchmark_view, name='view_product_benchmark'), - re_path(r'^benchmark/(?P\d+)/type/(?P\d+)/category/(?P\d+)/edit/(?P\d+)$', views.benchmark_view, name='edit_benchmark'), - re_path(r'^benchmark/(?P\d+)/type/(?P\d+)/delete$', views.delete, name='delete_product_benchmark'), - re_path(r'^benchmark/(?P\d+)/type/(?P<_type>\d+)/update$', views.update_benchmark, name='update_product_benchmark'), - re_path(r'^benchmark/(?P\d+)/type/(?P<_type>\d+)/summary/(?P
    \d+)/update$', views.update_benchmark_summary, name='update_product_benchmark_summary'), + re_path( + r"^benchmark/(?P\d+)/type/(?P\d+)$", + views.benchmark_view, + name="view_product_benchmark", + ), + re_path( + r"^benchmark/(?P\d+)/type/(?P\d+)/category/(?P\d+)$", + views.benchmark_view, + name="view_product_benchmark", + ), + re_path( + r"^benchmark/(?P\d+)/type/(?P\d+)/category/(?P\d+)/edit/(?P\d+)$", + views.benchmark_view, + name="edit_benchmark", + ), + re_path( + r"^benchmark/(?P\d+)/type/(?P\d+)/delete$", + views.delete, + name="delete_product_benchmark", + ), + re_path( + r"^benchmark/(?P\d+)/type/(?P<_type>\d+)/update$", + views.update_benchmark, + name="update_product_benchmark", + ), + re_path( + r"^benchmark/(?P\d+)/type/(?P<_type>\d+)/summary/(?P\d+)/update$", + views.update_benchmark_summary, + name="update_product_benchmark_summary", + ), ] diff --git a/dojo/benchmark/views.py b/dojo/benchmark/views.py index b4dd1340bd..86961e66fe 100644 --- a/dojo/benchmark/views.py +++ b/dojo/benchmark/views.py @@ -7,8 +7,19 @@ from django.utils.translation import gettext as _ from dojo.forms import Benchmark_Product_SummaryForm, DeleteBenchmarkForm -from dojo.models import Benchmark_Type, Benchmark_Category, Benchmark_Requirement, Benchmark_Product, Product, Benchmark_Product_Summary -from dojo.utils import add_breadcrumb, Product_Tab, redirect_to_return_url_or_else +from dojo.models import ( + Benchmark_Type, + Benchmark_Category, + Benchmark_Requirement, + Benchmark_Product, + Product, + Benchmark_Product_Summary, +) +from dojo.utils import ( + add_breadcrumb, + Product_Tab, + redirect_to_return_url_or_else, +) from dojo.authorization.authorization_decorators import user_is_authorized from dojo.authorization.roles_permissions import Permissions from dojo.templatetags.display_tags import asvs_level @@ -28,71 +39,79 @@ def add_benchmark(queryset, product): try: Benchmark_Product.objects.bulk_create(requirements) - except: + except Exception: pass def update_benchmark(request, pid, _type): - if request.method == 'POST': - bench_id = request.POST.get('bench_id') - field = request.POST.get('field') - value = request.POST.get('value') - value = {'true': True, 'false': False}.get(value, value) - - if field in ['enabled', 'pass_fail', 'notes', 'get_notes', 'delete_notes']: + if request.method == "POST": + bench_id = request.POST.get("bench_id") + field = request.POST.get("field") + value = request.POST.get("value") + value = {"true": True, "false": False}.get(value, value) + + if field in [ + "enabled", + "pass_fail", + "notes", + "get_notes", + "delete_notes", + ]: bench = Benchmark_Product.objects.get(id=bench_id) - if field == 'enabled': + if field == "enabled": bench.enabled = value - elif field == 'pass_fail': + elif field == "pass_fail": bench.pass_fail = value - elif field in ['notes', 'get_notes', 'delete_notes']: - if field == 'notes': + elif field in ["notes", "get_notes", "delete_notes"]: + if field == "notes": bench.notes.create(entry=value, author=get_current_user()) - if field == 'delete_notes': + if field == "delete_notes": bench.notes.remove(value) - notes = bench.notes.order_by('id') - return JsonResponse({ - 'notes': [{ - 'id': n.id, - 'entry': n.entry, - 'author': n.author.get_full_name(), - 'date': n.date.ctime() - } for n in notes] - }) + notes = bench.notes.order_by("id") + return JsonResponse( + { + "notes": [ + { + "id": n.id, + "entry": n.entry, + "author": n.author.get_full_name(), + "date": n.date.ctime(), + } + for n in notes + ] + } + ) bench.save() - return JsonResponse({ - field: value - }) + return JsonResponse({field: value}) - return redirect_to_return_url_or_else(request, reverse('view_product_benchmark', args=(pid, _type))) + return redirect_to_return_url_or_else( + request, reverse("view_product_benchmark", args=(pid, _type)) + ) def update_benchmark_summary(request, pid, _type, summary): - if request.method == 'POST': - field = request.POST.get('field') - value = request.POST.get('value') - value = {'true': True, 'false': False}.get(value, value) + if request.method == "POST": + field = request.POST.get("field") + value = request.POST.get("value") + value = {"true": True, "false": False}.get(value, value) - if field in ['publish', 'desired_level']: + if field in ["publish", "desired_level"]: summary = Benchmark_Product_Summary.objects.get(id=summary) data = {} - if field == 'publish': + if field == "publish": summary.publish = value - data = { - 'publish': value - } - elif field == 'desired_level': + data = {"publish": value} + elif field == "desired_level": summary.desired_level = value - data = { - 'desired_level': value, - 'text': asvs_level(summary) - } + data = {"desired_level": value, "text": asvs_level(summary)} summary.save() return JsonResponse(data) - return redirect_to_return_url_or_else(request, reverse('view_product_benchmark', args=(pid, _type))) + return redirect_to_return_url_or_else( + request, reverse("view_product_benchmark", args=(pid, _type)) + ) def return_score(queryset): @@ -101,24 +120,67 @@ def return_score(queryset): for item in queryset: if item["pass_fail"]: asvs_level_1_score = item["pass_fail__count"] - asvs_level_1_benchmark = asvs_level_1_benchmark + item["pass_fail__count"] + asvs_level_1_benchmark = ( + asvs_level_1_benchmark + item["pass_fail__count"] + ) return asvs_level_1_benchmark, asvs_level_1_score def score_asvs(product, benchmark_type): # Compliant to ASVS level 1 benchmarks - asvs_level_1 = Benchmark_Product.objects.filter(enabled=True, control__enabled=True, product=product, control__category__type=benchmark_type, control__category__enabled=True, control__level_1=True).values('pass_fail').annotate(Count('pass_fail')).order_by() + asvs_level_1 = ( + Benchmark_Product.objects.filter( + enabled=True, + control__enabled=True, + product=product, + control__category__type=benchmark_type, + control__category__enabled=True, + control__level_1=True, + ) + .values("pass_fail") + .annotate(Count("pass_fail")) + .order_by() + ) asvs_level_1_benchmark, asvs_level_1_score = return_score(asvs_level_1) # Compliant to ASVS level 2 benchmarks - asvs_level_2 = Benchmark_Product.objects.filter(~Q(control__level_1=True), enabled=True, control__enabled=True, product=product, control__category__type=benchmark_type, control__category__enabled=True, control__level_2=True).values('pass_fail').annotate(Count('pass_fail')).order_by() + asvs_level_2 = ( + Benchmark_Product.objects.filter( + ~Q(control__level_1=True), + enabled=True, + control__enabled=True, + product=product, + control__category__type=benchmark_type, + control__category__enabled=True, + control__level_2=True, + ) + .values("pass_fail") + .annotate(Count("pass_fail")) + .order_by() + ) asvs_level_2_benchmark, asvs_level_2_score = return_score(asvs_level_2) # Compliant to ASVS level 3 benchmarks - asvs_level_3 = Benchmark_Product.objects.filter(~Q(control__level_1=True), ~Q(control__level_2=True), enabled=True, control__enabled=True, control__category__enabled=True, product=product, control__category__type=benchmark_type, control__level_3=True).values('pass_fail').annotate(Count('pass_fail')).order_by() + asvs_level_3 = ( + Benchmark_Product.objects.filter( + ~Q(control__level_1=True), + ~Q(control__level_2=True), + enabled=True, + control__enabled=True, + control__category__enabled=True, + product=product, + control__category__type=benchmark_type, + control__level_3=True, + ) + .values("pass_fail") + .annotate(Count("pass_fail")) + .order_by() + ) asvs_level_3_benchmark, asvs_level_3_score = return_score(asvs_level_3) - benchmark_product_summary = Benchmark_Product_Summary.objects.get(product=product, benchmark_type=benchmark_type) + benchmark_product_summary = Benchmark_Product_Summary.objects.get( + product=product, benchmark_type=benchmark_type + ) benchmark_product_summary.asvs_level_1_benchmark = asvs_level_1_benchmark benchmark_product_summary.asvs_level_1_score = asvs_level_1_score @@ -130,78 +192,140 @@ def score_asvs(product, benchmark_type): benchmark_product_summary.save() -@user_is_authorized(Product, Permissions.Benchmark_Edit, 'pid') +@user_is_authorized(Product, Permissions.Benchmark_Edit, "pid") def benchmark_view(request, pid, type, cat=None): product = get_object_or_404(Product, id=pid) benchmark_type = get_object_or_404(Benchmark_Type, id=type) - benchmark_category = Benchmark_Category.objects.filter(type=type, enabled=True).order_by('name') + benchmark_category = Benchmark_Category.objects.filter( + type=type, enabled=True + ).order_by("name") # Add requirements to the product - new_benchmarks = Benchmark_Requirement.objects.filter(category__type=type, category__type__enabled=True, - enabled=True).exclude( - id__in=Benchmark_Product.objects.filter(product=product).values_list('control_id', flat=True)) + new_benchmarks = Benchmark_Requirement.objects.filter( + category__type=type, category__type__enabled=True, enabled=True + ).exclude( + id__in=Benchmark_Product.objects.filter(product=product).values_list( + "control_id", flat=True + ) + ) add_benchmark(new_benchmarks, product) # Create the benchmark summary category try: - benchmark_product_summary = Benchmark_Product_Summary.objects.get(product=product, benchmark_type=benchmark_type) - except: - benchmark_product_summary = Benchmark_Product_Summary(product=product, benchmark_type=benchmark_type) + benchmark_product_summary = Benchmark_Product_Summary.objects.get( + product=product, benchmark_type=benchmark_type + ) + except Exception: + benchmark_product_summary = Benchmark_Product_Summary( + product=product, benchmark_type=benchmark_type + ) benchmark_product_summary.save() if cat: - benchmarks = Benchmark_Product.objects.select_related('control', 'control__category').filter(product=product.id, control__category=cat, control__category__enabled=True, control__category__type=type, control__enabled=True).all().order_by('control__objective_number') + benchmarks = ( + Benchmark_Product.objects.select_related( + "control", "control__category" + ) + .filter( + product=product.id, + control__category=cat, + control__category__enabled=True, + control__category__type=type, + control__enabled=True, + ) + .all() + .order_by("control__objective_number") + ) else: - benchmarks = Benchmark_Product.objects.select_related('control', 'control__category').filter(product=product.id, control__category__enabled=True, control__category__type=type, control__enabled=True).all().order_by('control__category__name', 'control__objective_number') - - benchmark_summary_form = Benchmark_Product_SummaryForm(instance=benchmark_product_summary) - - noted_benchmarks = benchmarks.filter(notes__isnull=False).order_by('id').distinct() + benchmarks = ( + Benchmark_Product.objects.select_related( + "control", "control__category" + ) + .filter( + product=product.id, + control__category__enabled=True, + control__category__type=type, + control__enabled=True, + ) + .all() + .order_by("control__category__name", "control__objective_number") + ) + + benchmark_summary_form = Benchmark_Product_SummaryForm( + instance=benchmark_product_summary + ) + + noted_benchmarks = ( + benchmarks.filter(notes__isnull=False).order_by("id").distinct() + ) for bench in benchmarks: if bench.id in [b.id for b in noted_benchmarks]: bench.noted = True else: bench.noted = False - benchmarks = sorted(benchmarks, key=lambda x: [int(_) for _ in x.control.objective_number.split('.')]) - benchmark_category = sorted(benchmark_category, key=lambda x: int(x.name[:3].strip('V: '))) + benchmarks = sorted( + benchmarks, + key=lambda x: [int(_) for _ in x.control.objective_number.split(".")], + ) + benchmark_category = sorted( + benchmark_category, key=lambda x: int(x.name[:3].strip("V: ")) + ) product_tab = Product_Tab(product, title=_("Benchmarks"), tab="benchmarks") add_breadcrumb(title=_("Benchmarks"), top_level=False, request=request) - return render(request, 'dojo/benchmark.html', - {'benchmarks': benchmarks, - 'active_tab': 'benchmarks', - 'product_tab': product_tab, - 'benchmark_product_summary': benchmark_product_summary, - 'benchmark_summary_form': benchmark_summary_form, - 'benchmark_type': benchmark_type, - 'product': product, - 'benchmark_category': benchmark_category}) - - -@user_is_authorized(Product, Permissions.Benchmark_Delete, 'pid') + return render( + request, + "dojo/benchmark.html", + { + "benchmarks": benchmarks, + "active_tab": "benchmarks", + "product_tab": product_tab, + "benchmark_product_summary": benchmark_product_summary, + "benchmark_summary_form": benchmark_summary_form, + "benchmark_type": benchmark_type, + "product": product, + "benchmark_category": benchmark_category, + }, + ) + + +@user_is_authorized(Product, Permissions.Benchmark_Delete, "pid") def delete(request, pid, type): product = get_object_or_404(Product, id=pid) - benchmark_product_summary = Benchmark_Product_Summary.objects.filter(product=product, benchmark_type=type).first() + benchmark_product_summary = Benchmark_Product_Summary.objects.filter( + product=product, benchmark_type=type + ).first() form = DeleteBenchmarkForm(instance=benchmark_product_summary) - if request.method == 'POST': - if 'id' in request.POST and str(benchmark_product_summary.id) == request.POST['id']: - form = DeleteBenchmarkForm(request.POST, instance=benchmark_product_summary) + if request.method == "POST": + if ( + "id" in request.POST + and str(benchmark_product_summary.id) == request.POST["id"] + ): + form = DeleteBenchmarkForm( + request.POST, instance=benchmark_product_summary + ) if form.is_valid(): - benchmark_product = Benchmark_Product.objects.filter(product=product, control__category__type=type) + benchmark_product = Benchmark_Product.objects.filter( + product=product, control__category__type=type + ) benchmark_product.delete() benchmark_product_summary.delete() - messages.add_message(request, - messages.SUCCESS, - _('Benchmarks removed.'), - extra_tags='alert-success') - return HttpResponseRedirect(reverse('product')) - - product_tab = Product_Tab(product, title=_("Delete Benchmarks"), tab="benchmarks") - return render(request, 'dojo/delete_benchmark.html', { - 'product': product, - 'form': form, - 'product_tab': product_tab - }) + messages.add_message( + request, + messages.SUCCESS, + _("Benchmarks removed."), + extra_tags="alert-success", + ) + return HttpResponseRedirect(reverse("product")) + + product_tab = Product_Tab( + product, title=_("Delete Benchmarks"), tab="benchmarks" + ) + return render( + request, + "dojo/delete_benchmark.html", + {"product": product, "form": form, "product_tab": product_tab}, + ) diff --git a/dojo/components/sql_group_concat.py b/dojo/components/sql_group_concat.py index b86d417d12..5aa8f10d64 100644 --- a/dojo/components/sql_group_concat.py +++ b/dojo/components/sql_group_concat.py @@ -2,26 +2,34 @@ class Sql_GroupConcat(Aggregate): - function = 'GROUP_CONCAT' + function = "GROUP_CONCAT" allow_distinct = True - def __init__(self, expression, separator, distinct=False, ordering=None, **extra): + def __init__( + self, expression, separator, distinct=False, ordering=None, **extra + ): self.separator = separator - super(Sql_GroupConcat, self).__init__(expression, - distinct='DISTINCT ' if distinct else '', - ordering=' ORDER BY %s' % ordering if ordering is not None else '', - separator=' SEPARATOR "%s"' % separator, - output_field=CharField(), - **extra) + super(Sql_GroupConcat, self).__init__( + expression, + distinct="DISTINCT " if distinct else "", + ordering=" ORDER BY %s" % ordering if ordering is not None else "", + separator=' SEPARATOR "%s"' % separator, + output_field=CharField(), + **extra + ) def as_mysql(self, compiler, connection): - return super().as_sql(compiler, - connection, - template='%(function)s(%(distinct)s%(expressions)s%(ordering)s%(separator)s)', - separator=' SEPARATOR \'%s\'' % self.separator) + return super().as_sql( + compiler, + connection, + template="%(function)s(%(distinct)s%(expressions)s%(ordering)s%(separator)s)", + separator=" SEPARATOR '%s'" % self.separator, + ) def as_sql(self, compiler, connection, **extra): - return super().as_sql(compiler, - connection, - template='%(function)s(%(distinct)s%(expressions)s%(ordering)s)', - **extra) + return super().as_sql( + compiler, + connection, + template="%(function)s(%(distinct)s%(expressions)s%(ordering)s)", + **extra + ) diff --git a/dojo/components/urls.py b/dojo/components/urls.py index 7b3acf73d1..0183e3e807 100644 --- a/dojo/components/urls.py +++ b/dojo/components/urls.py @@ -2,6 +2,5 @@ from dojo.components import views urlpatterns = [ - re_path(r'^components$', views.components, - name='components'), + re_path(r"^components$", views.components, name="components"), ] diff --git a/dojo/components/views.py b/dojo/components/views.py index 7cc5787149..2a8f226f83 100644 --- a/dojo/components/views.py +++ b/dojo/components/views.py @@ -10,34 +10,61 @@ def components(request): - add_breadcrumb(title='Components', top_level=True, request=request) - separator = ', ' - # Get components ordered by component_name and concat component versions to the same row + add_breadcrumb(title="Components", top_level=True, request=request) + separator = ", " + # Get components ordered by component_name and concat component versions + # to the same row component_query = get_authorized_findings(Permissions.Finding_View) - if connection.vendor == 'postgresql': - component_query = component_query.values("component_name").order_by('component_name').annotate( - component_version=StringAgg('component_version', delimiter=separator, distinct=True)) + if connection.vendor == "postgresql": + component_query = ( + component_query.values("component_name") + .order_by("component_name") + .annotate( + component_version=StringAgg( + "component_version", delimiter=separator, distinct=True + ) + ) + ) else: - component_query = component_query.values("component_name").order_by('component_name') - component_query = component_query.annotate(component_version=Sql_GroupConcat( - 'component_version', separator=separator, distinct=True)) + component_query = component_query.values("component_name").order_by( + "component_name" + ) + component_query = component_query.annotate( + component_version=Sql_GroupConcat( + "component_version", separator=separator, distinct=True + ) + ) # Append counts - component_query = component_query.annotate(total=Count('id')).order_by('component_name') - component_query = component_query.annotate(active=Count('id', filter=Q(active=True))) - component_query = component_query.annotate(duplicate=(Count('id', filter=Q(duplicate=True)))) - component_query = component_query.order_by('-total') # Default sort by total descending + component_query = component_query.annotate(total=Count("id")).order_by( + "component_name" + ) + component_query = component_query.annotate( + active=Count("id", filter=Q(active=True)) + ) + component_query = component_query.annotate( + duplicate=(Count("id", filter=Q(duplicate=True))) + ) + component_query = component_query.order_by( + "-total" + ) # Default sort by total descending comp_filter = ComponentFilter(request.GET, queryset=component_query) result = get_page_items(request, comp_filter.qs, 25) # Filter out None values for auto-complete - component_words = component_query.exclude(component_name__isnull=True).values_list('component_name', flat=True) + component_words = component_query.exclude( + component_name__isnull=True + ).values_list("component_name", flat=True) - return render(request, 'dojo/components.html', { - 'filter': comp_filter, - 'result': result, - 'component_words': sorted(set(component_words)) - }) + return render( + request, + "dojo/components.html", + { + "filter": comp_filter, + "result": result, + "component_words": sorted(set(component_words)), + }, + ) From 6a8f2a15d4def5f22256860bdf2fc7832e1594cf Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 14 Jul 2023 09:34:21 -0500 Subject: [PATCH 33/85] Bump boto3 from 1.28.1 to 1.28.3 (#8381) Bumps [boto3](https://github.com/boto/boto3) from 1.28.1 to 1.28.3. - [Release notes](https://github.com/boto/boto3/releases) - [Changelog](https://github.com/boto/boto3/blob/develop/CHANGELOG.rst) - [Commits](https://github.com/boto/boto3/compare/1.28.1...1.28.3) --- updated-dependencies: - dependency-name: boto3 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index a4210867fc..2c6e3a3bfb 100644 --- a/requirements.txt +++ b/requirements.txt @@ -78,7 +78,7 @@ django-ratelimit==4.0.0 argon2-cffi==21.3.0 blackduck==1.1.0 pycurl==7.45.2 # Required for Celery Broker AWS (SQS) support -boto3==1.28.1 # Required for Celery Broker AWS (SQS) support +boto3==1.28.3 # Required for Celery Broker AWS (SQS) support netaddr==0.8.0 vulners==2.0.10 fontawesomefree==6.4.0 From 85e8f0665b23727d48e629ac57367876a7cbae40 Mon Sep 17 00:00:00 2001 From: Alejandro Tortolero Date: Mon, 17 Jul 2023 11:11:20 -0500 Subject: [PATCH 34/85] Update files with PEP8 standards in folder dojo/tools #003 (#8302) * Update files in folder dojo/tools/gitleaks with PEP8 standars. * Update files in folder dojo/tools/gosec with PEP8 standars. * Update files in folder dojo/tools/govulncheck with PEP8 standars. * Update files in folder dojo/tools/h1 with PEP8 standars. * Update files in folder dojo/tools/h1 with PEP8 standars. * Update files in folder dojo/tools/hadolint with PEP8 standars. * Update files in folder dojo/tools/harbor_vulnerability with PEP8 standars. * Update files in folder dojo/tools/horusec with PEP8 standars. * Change BaseException to Exception * Change BaseException to Exception --- dojo/tools/gitleaks/parser.py | 94 +++++++++++++++-------- dojo/tools/gosec/parser.py | 52 +++++++------ dojo/tools/govulncheck/parser.py | 78 +++++++++++++------ dojo/tools/h1/parser.py | 93 +++++++++++++++------- dojo/tools/hadolint/parser.py | 31 +++++--- dojo/tools/harbor_vulnerability/parser.py | 32 ++++---- dojo/tools/horusec/parser.py | 27 +++++-- 7 files changed, 264 insertions(+), 143 deletions(-) diff --git a/dojo/tools/gitleaks/parser.py b/dojo/tools/gitleaks/parser.py index 61c2f7d09c..513d43dd75 100644 --- a/dojo/tools/gitleaks/parser.py +++ b/dojo/tools/gitleaks/parser.py @@ -30,12 +30,12 @@ def get_findings(self, filename, test): dupes = dict() for issue in issues: - if issue.get('rule'): + if issue.get("rule"): self.get_finding_legacy(issue, test, dupes) - elif issue.get('Description'): + elif issue.get("Description"): self.get_finding_current(issue, test, dupes) else: - raise ValueError('Format is not recognized for Gitleaks') + raise ValueError("Format is not recognized for Gitleaks") return list(dupes.values()) @@ -44,10 +44,19 @@ def get_finding_legacy(self, issue, test, dupes): file_path = issue["file"] reason = issue["rule"] titleText = "Hard Coded " + reason - description = "**Commit:** " + issue["commitMessage"].rstrip("\n") + "\n" + description = ( + "**Commit:** " + issue["commitMessage"].rstrip("\n") + "\n" + ) description += "**Commit Hash:** " + issue["commit"] + "\n" description += "**Commit Date:** " + issue["date"] + "\n" - description += "**Author:** " + issue["author"] + " <" + issue["email"] + ">" + "\n" + description += ( + "**Author:** " + + issue["author"] + + " <" + + issue["email"] + + ">" + + "\n" + ) description += "**Reason:** " + reason + "\n" description += "**Path:** " + file_path + "\n" if "lineNumber" in issue: @@ -56,8 +65,18 @@ def get_finding_legacy(self, issue, test, dupes): if "operation" in issue: description += "**Operation:** " + issue["operation"] + "\n" if "leakURL" in issue: - description += "**Leak URL:** [" + issue["leakURL"] + "](" + issue["leakURL"] + ")\n" - description += "\n**String Found:**\n\n```\n" + issue["line"].replace(issue["offender"], "REDACTED") + "\n```" + description += ( + "**Leak URL:** [" + + issue["leakURL"] + + "](" + + issue["leakURL"] + + ")\n" + ) + description += ( + "\n**String Found:**\n\n```\n" + + issue["line"].replace(issue["offender"], "REDACTED") + + "\n```" + ) severity = "High" if "Github" in reason or "AWS" in reason or "Heroku" in reason: @@ -75,60 +94,71 @@ def get_finding_legacy(self, issue, test, dupes): static_finding=True, ) # manage tags - finding.unsaved_tags = issue.get("tags", "").split(', ') + finding.unsaved_tags = issue.get("tags", "").split(", ") - dupe_key = hashlib.sha256((issue["offender"] + file_path + str(line)).encode("utf-8")).hexdigest() + dupe_key = hashlib.sha256( + (issue["offender"] + file_path + str(line)).encode("utf-8") + ).hexdigest() if dupe_key not in dupes: dupes[dupe_key] = finding def get_finding_current(self, issue, test, dupes): - reason = issue.get('Description') - line = issue.get('StartLine') + reason = issue.get("Description") + line = issue.get("StartLine") if line: line = int(line) else: line = 0 - match = issue.get('Match') - secret = issue.get('Secret') - file_path = issue.get('File') - commit = issue.get('Commit') + match = issue.get("Match") + secret = issue.get("Secret") + file_path = issue.get("File") + commit = issue.get("Commit") # Author and email will not be used because of GDPR # author = issue.get('Author') # email = issue.get('Email') - date = issue.get('Date') - message = issue.get('Message') - tags = issue.get('Tags') - ruleId = issue.get('RuleID') + date = issue.get("Date") + message = issue.get("Message") + tags = issue.get("Tags") + ruleId = issue.get("RuleID") - title = f'Hard coded {reason} found in {file_path}' + title = f"Hard coded {reason} found in {file_path}" - description = '' + description = "" if secret: - description += f'**Secret:** {secret}\n' + description += f"**Secret:** {secret}\n" if match: - description += f'**Match:** {match}\n' + description += f"**Match:** {match}\n" if message: if len(message.split("\n")) > 1: - description += "**Commit message:**" + "\n```\n" + message.replace('```', '\\`\\`\\`') + "\n```\n" + description += ( + "**Commit message:**" + + "\n```\n" + + message.replace("```", "\\`\\`\\`") + + "\n```\n" + ) else: - description += f'**Commit message:** {message}\n' + description += f"**Commit message:** {message}\n" if commit: - description += f'**Commit hash:** {commit}\n' + description += f"**Commit hash:** {commit}\n" if date: - description += f'**Commit date:** {date}\n' + description += f"**Commit date:** {date}\n" if ruleId: - description += f'**Rule Id:** {ruleId}' - if description[-1] == '\n': + description += f"**Rule Id:** {ruleId}" + if description[-1] == "\n": description = description[:-1] severity = "High" - dupe_key = hashlib.md5((title + secret + str(line)).encode("utf-8")).hexdigest() + dupe_key = hashlib.md5( + (title + secret + str(line)).encode("utf-8") + ).hexdigest() if dupe_key in dupes: finding = dupes[dupe_key] - finding.description = finding.description + '\n\n***\n\n' + description + finding.description = ( + finding.description + "\n\n***\n\n" + description + ) finding.nb_occurences += 1 dupes[dupe_key] = finding else: @@ -142,7 +172,7 @@ def get_finding_current(self, issue, test, dupes): line=line, dynamic_finding=False, static_finding=True, - nb_occurences=1, + nb_occurences=1 ) if tags: finding.unsaved_tags = tags diff --git a/dojo/tools/gosec/parser.py b/dojo/tools/gosec/parser.py index b55c70e6c5..4d3824913b 100644 --- a/dojo/tools/gosec/parser.py +++ b/dojo/tools/gosec/parser.py @@ -4,7 +4,6 @@ class GosecParser(object): - def get_scan_types(self): return ["Gosec Scanner"] @@ -17,32 +16,37 @@ def get_description_for_scan_types(self, scan_type): def get_findings(self, filename, test): tree = filename.read() try: - data = json.loads(str(tree, 'utf-8')) - except: + data = json.loads(str(tree, "utf-8")) + except Exception: data = json.loads(tree) dupes = dict() for item in data["Issues"]: - impact = '' - references = '' - findingdetail = '' - title = '' + impact = "" + references = "" + findingdetail = "" + title = "" filename = item.get("file") line = item.get("line") scanner_confidence = item.get("confidence") title = item["details"] + " - rule " + item["rule_id"] -# Finding details information + # Finding details information findingdetail += "Filename: {}\n\n".format(filename) findingdetail += "Line number: {}\n\n".format(str(line)) - findingdetail += "Issue Confidence: {}\n\n".format(scanner_confidence) + findingdetail += "Issue Confidence: {}\n\n".format( + scanner_confidence + ) findingdetail += "Code:\n\n" findingdetail += "```{}```".format(item["code"]) sev = item["severity"] - # Best attempt at ongoing documentation provided by gosec, based on rule id - references = "https://securego.io/docs/rules/{}.html".format(item['rule_id']).lower() + # Best attempt at ongoing documentation provided by gosec, based on + # rule id + references = "https://securego.io/docs/rules/{}.html".format( + item["rule_id"] + ).lower() if scanner_confidence: # Assign integer value to confidence. @@ -53,9 +57,9 @@ def get_findings(self, filename, test): elif scanner_confidence == "LOW": scanner_confidence = 7 - if '-' in line: + if "-" in line: # if this is a range, only point to the beginning. - line = line.split('-', 1)[0] + line = line.split("-", 1)[0] if line.isdigit(): line = int(line) else: @@ -68,16 +72,18 @@ def get_findings(self, filename, test): else: dupes[dupe_key] = True - find = Finding(title=title, - test=test, - description=findingdetail, - severity=sev.title(), - impact=impact, - references=references, - file_path=filename, - line=line, - scanner_confidence=scanner_confidence, - static_finding=True) + find = Finding( + title=title, + test=test, + description=findingdetail, + severity=sev.title(), + impact=impact, + references=references, + file_path=filename, + line=line, + scanner_confidence=scanner_confidence, + static_finding=True + ) dupes[dupe_key] = find diff --git a/dojo/tools/govulncheck/parser.py b/dojo/tools/govulncheck/parser.py index eb5979c1d8..21c4a90576 100644 --- a/dojo/tools/govulncheck/parser.py +++ b/dojo/tools/govulncheck/parser.py @@ -6,11 +6,10 @@ logger = logging.getLogger(__name__) -SEVERITY = 'Info' +SEVERITY = "Info" class GovulncheckParser: - def get_scan_types(self): return ["Govulncheck Scanner"] @@ -22,44 +21,73 @@ def get_description_for_scan_types(self, scan_type): @staticmethod def get_location(data, node): - while data['Calls']['Functions'][str(node)]['CallSites'][0]['Parent'] != 1: - node = data['Calls']['Functions'][str(node)]['CallSites'][0]['Parent'] - return [f"{x['Pos']['Filename']}:{x['Pos']['Line']}:{x['Pos']['Column']}" for x in - data['Calls']['Functions'][str(node)]['CallSites']] + while ( + data["Calls"]["Functions"][str(node)]["CallSites"][0]["Parent"] + != 1 + ): + node = data["Calls"]["Functions"][str(node)]["CallSites"][0][ + "Parent" + ] + return [ + f"{x['Pos']['Filename']}:{x['Pos']['Line']}:{x['Pos']['Column']}" + for x in data["Calls"]["Functions"][str(node)]["CallSites"] + ] @staticmethod def get_version(data, node): - return data['Requires']['Modules'][str(node)]['Version'] + return data["Requires"]["Modules"][str(node)]["Version"] def get_findings(self, scan_file, test): findings = [] try: data = json.load(scan_file) - except Exception as e: + except Exception: raise ValueError("Invalid JSON format") else: - if data['Vulns']: - list_vulns = data['Vulns'] - for cve, elems in groupby(list_vulns, key=lambda vuln: vuln['OSV']['aliases'][0]): + if data["Vulns"]: + list_vulns = data["Vulns"] + for cve, elems in groupby( + list_vulns, key=lambda vuln: vuln["OSV"]["aliases"][0] + ): first_elem = list(islice(elems, 1)) d = { - 'cve': cve, - 'severity': SEVERITY, - 'title': first_elem[0]['OSV']['id'], - 'component_name': first_elem[0]['OSV']['affected'][0]['package']['name'], - 'component_version': self.get_version(data, first_elem[0]['RequireSink']), + "cve": cve, + "severity": SEVERITY, + "title": first_elem[0]["OSV"]["id"], + "component_name": first_elem[0]["OSV"]["affected"][0][ + "package" + ]["name"], + "component_version": self.get_version( + data, first_elem[0]["RequireSink"] + ), } - d['references'] = first_elem[0]['OSV']['references'][0]['url'] - d['url'] = first_elem[0]['OSV']['affected'][0]['database_specific']['url'] - d['unique_id_from_tool'] = first_elem[0]['OSV']['id'] + d["references"] = first_elem[0]["OSV"]["references"][0][ + "url" + ] + d["url"] = first_elem[0]["OSV"]["affected"][0][ + "database_specific" + ]["url"] + d["unique_id_from_tool"] = first_elem[0]["OSV"]["id"] vuln_methods = set( - first_elem[0]['OSV']['affected'][0]['ecosystem_specific']['imports'][0]['symbols']) - impact = set(self.get_location(data, first_elem[0]['CallSink'])) + first_elem[0]["OSV"]["affected"][0][ + "ecosystem_specific" + ]["imports"][0]["symbols"] + ) + impact = set( + self.get_location(data, first_elem[0]["CallSink"]) + ) for elem in elems: - impact.update(self.get_location(data, elem['CallSink'])) + impact.update( + self.get_location(data, elem["CallSink"]) + ) vuln_methods.update( - elem['OSV']['affected'][0]['ecosystem_specific']['imports'][0]['symbols']) - d['impact'] = '; '.join(impact) if impact else None - d['description'] = f"Vulnerable functions: {'; '.join(vuln_methods)}" + elem["OSV"]["affected"][0]["ecosystem_specific"][ + "imports" + ][0]["symbols"] + ) + d["impact"] = "; ".join(impact) if impact else None + d[ + "description" + ] = f"Vulnerable functions: {'; '.join(vuln_methods)}" findings.append(Finding(**d)) return findings diff --git a/dojo/tools/h1/parser.py b/dojo/tools/h1/parser.py index e14913f5c8..8d3409799f 100644 --- a/dojo/tools/h1/parser.py +++ b/dojo/tools/h1/parser.py @@ -4,7 +4,7 @@ from dojo.models import Finding -__author__ = 'Kirill Gotsman' +__author__ = "Kirill Gotsman" class H1Parser(object): @@ -29,15 +29,17 @@ def get_findings(self, file, test): # Load the contents of the JSON file into a dictionary data = file.read() try: - tree = json.loads(str(data, 'utf-8')) - except: + tree = json.loads(str(data, "utf-8")) + except Exception: tree = json.loads(data) # Convert JSON report to DefectDojo format dupes = dict() for content in tree["data"]: # Get all relevant data - date = content['attributes']['created_at'] - date = datetime.strftime(datetime.strptime(date, "%Y-%m-%dT%H:%M:%S.%fZ"), "%Y-%m-%d") + date = content["attributes"]["created_at"] + date = datetime.strftime( + datetime.strptime(date, "%Y-%m-%dT%H:%M:%S.%fZ"), "%Y-%m-%d" + ) # Build the title of the Dojo finding title = "#" + content["id"] + " " + content["attributes"]["title"] @@ -45,21 +47,31 @@ def get_findings(self, file, test): # References try: - issue_tracker_id = content['attributes']['issue_tracker_reference_id'] - issue_tracker_url = content['attributes']['issue_tracker_reference_url'] - references = "[{}]({})\n".format(issue_tracker_id, issue_tracker_url) - except: + issue_tracker_id = content["attributes"][ + "issue_tracker_reference_id" + ] + issue_tracker_url = content["attributes"][ + "issue_tracker_reference_url" + ] + references = "[{}]({})\n".format( + issue_tracker_id, issue_tracker_url + ) + except Exception: references = "" # Build the severity of the Dojo finding try: - severity = content["relationships"]["severity"]["data"]["attributes"]["rating"].capitalize() + severity = content["relationships"]["severity"]["data"][ + "attributes" + ]["rating"].capitalize() if severity not in ["Low", "Medium", "High", "Critical"]: severity = "Info" - except: + except Exception: severity = "Info" # Build the references of the Dojo finding - ref_link = "https://hackerone.com/reports/{}".format(content.get("id")) + ref_link = "https://hackerone.com/reports/{}".format( + content.get("id") + ) references += "[{}]({})".format(ref_link, ref_link) # Set active state of the Dojo finding @@ -70,11 +82,17 @@ def get_findings(self, file, test): # Set CWE of the Dojo finding try: - cwe = int(content["relationships"]["weakness"]["data"]["attributes"]["external_id"][4:]) - except: + cwe = int( + content["relationships"]["weakness"]["data"]["attributes"][ + "external_id" + ][4:] + ) + except Exception: cwe = 0 - dupe_key = hashlib.md5(str(references + title).encode('utf-8')).hexdigest() + dupe_key = hashlib.md5( + str(references + title).encode("utf-8") + ).hexdigest() if dupe_key in dupes: finding = dupes[dupe_key] if finding.references: @@ -96,43 +114,60 @@ def get_findings(self, file, test): impact="No impact provided", references=references, cwe=cwe, - dynamic_finding=False,) + dynamic_finding=False + ) finding.unsaved_endpoints = list() dupes[dupe_key] = finding return dupes.values() def build_description(self, content): - date = content['attributes']['created_at'] - date = datetime.strftime(datetime.strptime(date, "%Y-%m-%dT%H:%M:%S.%fZ"), "%Y-%m-%d") - reporter = content['relationships']['reporter']['data']['attributes']['username'] - triaged_date = content['attributes']['triaged_at'] + date = content["attributes"]["created_at"] + date = datetime.strftime( + datetime.strptime(date, "%Y-%m-%dT%H:%M:%S.%fZ"), "%Y-%m-%d" + ) + reporter = content["relationships"]["reporter"]["data"]["attributes"][ + "username" + ] + triaged_date = content["attributes"]["triaged_at"] # Build the description of the Dojo finding - description = "#" + content['attributes']['title'] + description = "#" + content["attributes"]["title"] description += "\nSubmitted: {}\nBy: {}\n".format(date, reporter) # Add triaged date if triaged_date is not None: triaged_date = datetime.strftime( - datetime.strptime(triaged_date, "%Y-%m-%dT%H:%M:%S.%fZ"), "%Y-%m-%d") + datetime.strptime(triaged_date, "%Y-%m-%dT%H:%M:%S.%fZ"), + "%Y-%m-%d", + ) description += "Triaged: {}\n".format(triaged_date) # Try to grab CVSS try: - cvss = content['relationships']['severity']['data']['attributes']['score'] + cvss = content["relationships"]["severity"]["data"]["attributes"][ + "score" + ] description += "CVSS: {}\n".format(cvss) - except: + except Exception: pass # Build rest of description meat - description += "##Report: \n{}\n".format(content["attributes"]["vulnerability_information"]) + description += "##Report: \n{}\n".format( + content["attributes"]["vulnerability_information"] + ) # Try to grab weakness if it's there try: - weakness_title = content['relationships']['weakness']['data']['attributes']['name'] - weakness_desc = content['relationships']['weakness']['data']['attributes']['description'] - description += "\n##Weakness: {}\n{}".format(weakness_title, weakness_desc) - except: + weakness_title = content["relationships"]["weakness"]["data"][ + "attributes" + ]["name"] + weakness_desc = content["relationships"]["weakness"]["data"][ + "attributes" + ]["description"] + description += "\n##Weakness: {}\n{}".format( + weakness_title, weakness_desc + ) + except Exception: pass return description diff --git a/dojo/tools/hadolint/parser.py b/dojo/tools/hadolint/parser.py index a2132ae096..9e907160fc 100644 --- a/dojo/tools/hadolint/parser.py +++ b/dojo/tools/hadolint/parser.py @@ -4,7 +4,6 @@ class HadolintParser(object): - def get_scan_types(self): return ["Hadolint Dockerfile check"] @@ -22,35 +21,45 @@ def get_items(self, tree, test): items = {} for node in tree: item = get_item(node, test) - unique_key = str(node['line']) + "-" + str(node['column']) + node['code'] + node['file'] + unique_key = ( + str(node["line"]) + + "-" + + str(node["column"]) + + node["code"] + + node["file"] + ) items[unique_key] = item return items.values() def get_item(vulnerability, test): - if 'level' in vulnerability: + if "level" in vulnerability: # If we're dealing with a license finding, there will be no cvssScore - if vulnerability['level'] == "error": + if vulnerability["level"] == "error": severity = "Critical" - elif vulnerability['level'] == "warning": + elif vulnerability["level"] == "warning": severity = "High" else: severity = "Info" - # TODO: some seem to not have anything. Needs UNKNOWN new status in the model. Some vuln do not yet have cvss assigned. + # TODO: some seem to not have anything. Needs UNKNOWN new status in the + # model. Some vuln do not yet have cvss assigned. else: severity = "Info" # create the finding object, with 'static' type finding = Finding( - title=vulnerability['code'] + ": " + vulnerability['message'], + title=vulnerability["code"] + ": " + vulnerability["message"], test=test, severity=severity, - file_path=vulnerability['file'], - line=vulnerability['line'], - description="Vulnerability ID: {}\nDetails: {}\n".format(vulnerability['code'], vulnerability['message']), + file_path=vulnerability["file"], + line=vulnerability["line"], + description="Vulnerability ID: {}\nDetails: {}\n".format( + vulnerability["code"], vulnerability["message"] + ), static_finding=True, - dynamic_finding=False) + dynamic_finding=False, + ) finding.description = finding.description.strip() diff --git a/dojo/tools/harbor_vulnerability/parser.py b/dojo/tools/harbor_vulnerability/parser.py index 393f246f99..3bfdf132a8 100644 --- a/dojo/tools/harbor_vulnerability/parser.py +++ b/dojo/tools/harbor_vulnerability/parser.py @@ -18,11 +18,10 @@ def get_description_for_scan_types(self, scan_type): return "Import vulnerabilities from Harbor API." def get_findings(self, filename, test): - tree = filename.read() try: data = json.loads(str(tree, "utf-8")) - except: + except Exception: data = json.loads(tree) # When doing dictionary, we can detect duplications @@ -39,31 +38,30 @@ def get_findings(self, filename, test): return list() for item in vulnerability: - - id = item.get('id') - package_name = item.get('package') - package_version = item.get('version') - description = item.get('description', 'No description found') - severity = item.get('severity') - fix_version = item.get('fix_version') - links = item.get('links') - - title = f'{id} - {package_name} ({package_version})' + id = item.get("id") + package_name = item.get("package") + package_version = item.get("version") + description = item.get("description", "No description found") + severity = item.get("severity") + fix_version = item.get("fix_version") + links = item.get("links") + + title = f"{id} - {package_name} ({package_version})" severity = transpose_severity(severity) if fix_version: - mitigation = f'Upgrade {package_name} to version {fix_version}' + mitigation = f"Upgrade {package_name} to version {fix_version}" else: mitigation = None if links: - references = '' + references = "" for link in links: - references += f'{link}\n' + references += f"{link}\n" else: references = None - if id and id.startswith('CVE'): + if id and id.startswith("CVE"): vulnerability_id = id else: vulnerability_id = None @@ -97,4 +95,4 @@ def transpose_severity(severity): if severity in Finding.SEVERITIES: return severity else: - return 'Info' + return "Info" diff --git a/dojo/tools/horusec/parser.py b/dojo/tools/horusec/parser.py index 6d5df3d9c6..8eeecc1dbc 100644 --- a/dojo/tools/horusec/parser.py +++ b/dojo/tools/horusec/parser.py @@ -27,13 +27,20 @@ def get_description_for_scan_types(self, scan_type): def get_findings(self, filename, test): data = json.load(filename) - report_date = datetime.strptime(data.get("createdAt")[0:10], "%Y-%m-%d") - return [self._get_finding(node, report_date) for node in data.get("analysisVulnerabilities")] + report_date = datetime.strptime( + data.get("createdAt")[0:10], "%Y-%m-%d" + ) + return [ + self._get_finding(node, report_date) + for node in data.get("analysisVulnerabilities") + ] def get_tests(self, scan_type, scan): data = json.load(scan) report_date = parse(data.get("createdAt")) - test = ParserTest(name=self.ID, type=self.ID, version=data.get("version").lstrip("v")) # remove the v in vX.Y.Z + test = ParserTest( + name=self.ID, type=self.ID, version=data.get("version").lstrip("v") + ) # remove the v in vX.Y.Z test.description = "\n".join( [ f"**Status:** {data.get('status')}", @@ -43,7 +50,10 @@ def get_tests(self, scan_type, scan): "```", ] ) - test.findings = [self._get_finding(node, report_date) for node in data.get("analysisVulnerabilities")] + test.findings = [ + self._get_finding(node, report_date) + for node in data.get("analysisVulnerabilities") + ] return [test] def _get_finding(self, data, date): @@ -62,9 +72,14 @@ def _get_finding(self, data, date): severity=data["vulnerabilities"]["severity"].title(), description=description, file_path=data["vulnerabilities"]["file"], - scanner_confidence=self.CONDIFDENCE[data["vulnerabilities"]["confidence"]], + scanner_confidence=self.CONDIFDENCE[ + data["vulnerabilities"]["confidence"] + ], ) # sometimes the attribute 'line' is empty - if data["vulnerabilities"].get("line") and data["vulnerabilities"]["line"].isdigit(): + if ( + data["vulnerabilities"].get("line") + and data["vulnerabilities"]["line"].isdigit() + ): finding.line = int(data["vulnerabilities"]["line"]) return finding From f21d957c9be0515f0068447d929e6a20571182de Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 18 Jul 2023 07:52:39 -0500 Subject: [PATCH 35/85] Bump asteval from 0.9.30 to 0.9.31 (#8387) Bumps [asteval](https://github.com/newville/asteval) from 0.9.30 to 0.9.31. - [Release notes](https://github.com/newville/asteval/releases) - [Commits](https://github.com/newville/asteval/compare/0.9.30...0.9.31) --- updated-dependencies: - dependency-name: asteval dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 2c6e3a3bfb..4b96b88e53 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,5 @@ # requirements.txt for DefectDojo using Python 3.x -asteval==0.9.30 +asteval==0.9.31 bleach==6.0.0 bleach[css] celery==5.3.1 From 04437d6ba0145779e8f08f477294bd78dd967aff Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 18 Jul 2023 07:53:04 -0500 Subject: [PATCH 36/85] Bump sqlalchemy from 2.0.18 to 2.0.19 (#8388) Bumps [sqlalchemy](https://github.com/sqlalchemy/sqlalchemy) from 2.0.18 to 2.0.19. - [Release notes](https://github.com/sqlalchemy/sqlalchemy/releases) - [Changelog](https://github.com/sqlalchemy/sqlalchemy/blob/main/CHANGES.rst) - [Commits](https://github.com/sqlalchemy/sqlalchemy/commits) --- updated-dependencies: - dependency-name: sqlalchemy dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 4b96b88e53..363277bb89 100644 --- a/requirements.txt +++ b/requirements.txt @@ -43,7 +43,7 @@ python-dateutil==2.8.2 pytz==2023.3 redis==4.6.0 requests==2.31.0 -sqlalchemy==2.0.18 # Required by Celery broker transport +sqlalchemy==2.0.19 # Required by Celery broker transport supervisor==4.2.5 urllib3==1.26.11 uWSGI==2.0.21 From cbac92fd0420e35b65ae2faeb41e07814b9b7876 Mon Sep 17 00:00:00 2001 From: DefectDojo release bot Date: Tue, 18 Jul 2023 15:16:05 +0000 Subject: [PATCH 37/85] Update versions in application files --- components/package.json | 2 +- dojo/__init__.py | 2 +- helm/defectdojo/Chart.yaml | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/components/package.json b/components/package.json index f7d640dd11..4e8aea8365 100644 --- a/components/package.json +++ b/components/package.json @@ -1,6 +1,6 @@ { "name": "defectdojo", - "version": "2.24.2", + "version": "2.25.0-dev", "license" : "BSD-3-Clause", "private": true, "dependencies": { diff --git a/dojo/__init__.py b/dojo/__init__.py index 57f04bb300..4c1f6f5856 100644 --- a/dojo/__init__.py +++ b/dojo/__init__.py @@ -4,6 +4,6 @@ # Django starts so that shared_task will use this app. from .celery import app as celery_app # noqa -__version__ = '2.24.2' +__version__ = '2.25.0-dev' __url__ = 'https://github.com/DefectDojo/django-DefectDojo' __docs__ = 'https://documentation.defectdojo.com' diff --git a/helm/defectdojo/Chart.yaml b/helm/defectdojo/Chart.yaml index 14cedc4a02..bc2050d815 100644 --- a/helm/defectdojo/Chart.yaml +++ b/helm/defectdojo/Chart.yaml @@ -1,8 +1,8 @@ apiVersion: v2 -appVersion: "2.24.2" +appVersion: "2.25.0-dev" description: A Helm chart for Kubernetes to install DefectDojo name: defectdojo -version: 1.6.76 +version: 1.6.77-dev icon: https://www.defectdojo.org/img/favicon.ico maintainers: - name: madchap From 09a5f65003f0ca29874ac0adba0d160cc0ae2932 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 18 Jul 2023 11:11:38 -0500 Subject: [PATCH 38/85] Bump boto3 from 1.28.3 to 1.28.4 (#8392) Bumps [boto3](https://github.com/boto/boto3) from 1.28.3 to 1.28.4. - [Release notes](https://github.com/boto/boto3/releases) - [Changelog](https://github.com/boto/boto3/blob/develop/CHANGELOG.rst) - [Commits](https://github.com/boto/boto3/compare/1.28.3...1.28.4) --- updated-dependencies: - dependency-name: boto3 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 363277bb89..6053d8edd8 100644 --- a/requirements.txt +++ b/requirements.txt @@ -78,7 +78,7 @@ django-ratelimit==4.0.0 argon2-cffi==21.3.0 blackduck==1.1.0 pycurl==7.45.2 # Required for Celery Broker AWS (SQS) support -boto3==1.28.3 # Required for Celery Broker AWS (SQS) support +boto3==1.28.4 # Required for Celery Broker AWS (SQS) support netaddr==0.8.0 vulners==2.0.10 fontawesomefree==6.4.0 From 41adcdfe476feca8b6f61dfeda0bdd57bed37681 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 18 Jul 2023 11:12:02 -0500 Subject: [PATCH 39/85] Bump gunicorn from 20.1.0 to 21.0.1 (#8391) Bumps [gunicorn](https://github.com/benoitc/gunicorn) from 20.1.0 to 21.0.1. - [Release notes](https://github.com/benoitc/gunicorn/releases) - [Commits](https://github.com/benoitc/gunicorn/compare/20.1.0...21.0.1) --- updated-dependencies: - dependency-name: gunicorn dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 6053d8edd8..a2bf0d47dd 100644 --- a/requirements.txt +++ b/requirements.txt @@ -26,7 +26,7 @@ django-watson==1.6.3 django-prometheus==2.3.1 Django==4.1.10 djangorestframework==3.14.0 -gunicorn==20.1.0 +gunicorn==21.0.1 html2text==2020.1.16 humanize==4.7.0 jira==3.5.2 From e4fcca10b0102ae87fcc0466e609a3d8fb2cbe08 Mon Sep 17 00:00:00 2001 From: Cody Maffucci <46459665+Maffooch@users.noreply.github.com> Date: Wed, 19 Jul 2023 14:30:52 -0500 Subject: [PATCH 40/85] Update chromedriver fetching mechanism (#8403) --- Dockerfile.integration-tests-debian | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/Dockerfile.integration-tests-debian b/Dockerfile.integration-tests-debian index 06ab484a52..cc5ca50607 100644 --- a/Dockerfile.integration-tests-debian +++ b/Dockerfile.integration-tests-debian @@ -11,6 +11,7 @@ RUN \ curl \ unzip \ gpg \ + jq \ && \ apt-get clean && \ rm -rf /var/lib/apt/lists && \ @@ -28,12 +29,17 @@ RUN \ rm -rf /var/lib/apt/lists && \ true -RUN pip install --no-cache-dir selenium==4.9.0 requests chromedriver-autoinstaller +RUN pip install --no-cache-dir selenium==4.9.0 requests # Installing Chromedriver WORKDIR /opt/chrome-driver RUN \ - python -c "import chromedriver_autoinstaller; chromedriver_autoinstaller.install(cwd=True)" && \ + chrome_version=$(apt-cache show google-chrome-stable | grep Version | awk '{print $2}' | cut -d '-' -f 1) && \ + chrome_version_blob=$(curl -k https://googlechromelabs.github.io/chrome-for-testing/known-good-versions-with-downloads.json | jq ".versions[] | select(.version==\"$chrome_version\")") && \ + chromedriver_url=$(echo $chrome_version_blob | jq -r ".downloads.chromedriver[] | select(.platform==\"linux64\") | .url") && \ + wget https://edgedl.me.gvt1.com/edgedl/chrome/chrome-for-testing/115.0.5790.98/linux64/chromedriver-linux64.zip && \ + unzip -j chromedriver-linux64.zip chromedriver-linux64/chromedriver && \ + rm -rf chromedriver-linux64.zip && \ chmod -R 0755 . WORKDIR /app From c33e93d56f4bf955945c1b645190a9a233f93c10 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 19 Jul 2023 16:12:50 -0500 Subject: [PATCH 41/85] Bump gunicorn from 21.0.1 to 21.2.0 (#8401) Bumps [gunicorn](https://github.com/benoitc/gunicorn) from 21.0.1 to 21.2.0. - [Release notes](https://github.com/benoitc/gunicorn/releases) - [Commits](https://github.com/benoitc/gunicorn/compare/21.0.1...21.2.0) --- updated-dependencies: - dependency-name: gunicorn dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index a2bf0d47dd..75b8429f70 100644 --- a/requirements.txt +++ b/requirements.txt @@ -26,7 +26,7 @@ django-watson==1.6.3 django-prometheus==2.3.1 Django==4.1.10 djangorestframework==3.14.0 -gunicorn==21.0.1 +gunicorn==21.2.0 html2text==2020.1.16 humanize==4.7.0 jira==3.5.2 From b9e4aaee018095fb24358d666253772a278f39c5 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Wed, 19 Jul 2023 16:13:05 -0500 Subject: [PATCH 42/85] Update rabbitmq Docker tag from 3.12.1 to v3.12.2 (docker-compose.yml) (#8396) Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> --- docker-compose.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker-compose.yml b/docker-compose.yml index a016f7c349..841505210f 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -154,7 +154,7 @@ services: volumes: - defectdojo_postgres:/var/lib/postgresql/data rabbitmq: - image: rabbitmq:3.12.1-alpine@sha256:8ec30b33b1bb517145a3c43cd0d377367db0fa4903650e1a026c541f15bfc9a8 + image: rabbitmq:3.12.2-alpine@sha256:6c0d0405858c736586c171ce1538acdbe78430c8e0405a0fb1b3b05c193e8899 profiles: - mysql-rabbitmq - postgres-rabbitmq From 77cdbb2afe8066ba4579becd8178e6ecfa94ff9a Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Wed, 19 Jul 2023 16:13:24 -0500 Subject: [PATCH 43/85] Update gcr.io/cloudsql-docker/gce-proxy Docker tag from 1.33.8 to v1.33.9 (helm/defectdojo/values.yaml) (#8395) Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> --- helm/defectdojo/values.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/helm/defectdojo/values.yaml b/helm/defectdojo/values.yaml index 02c48ae917..33539871b0 100644 --- a/helm/defectdojo/values.yaml +++ b/helm/defectdojo/values.yaml @@ -421,7 +421,7 @@ cloudsql: image: # set repo and image tag of gce-proxy repository: gcr.io/cloudsql-docker/gce-proxy - tag: 1.33.8 + tag: 1.33.9 pullPolicy: IfNotPresent # set CloudSQL instance: 'project:zone:instancename' instance: "" From a8bf43ee84d3c0ab335bce6c4bf7b6ca6647889a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 19 Jul 2023 16:17:27 -0500 Subject: [PATCH 44/85] Bump boto3 from 1.28.4 to 1.28.6 (#8404) Bumps [boto3](https://github.com/boto/boto3) from 1.28.4 to 1.28.6. - [Release notes](https://github.com/boto/boto3/releases) - [Changelog](https://github.com/boto/boto3/blob/develop/CHANGELOG.rst) - [Commits](https://github.com/boto/boto3/compare/1.28.4...1.28.6) --- updated-dependencies: - dependency-name: boto3 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 75b8429f70..74f515b2e8 100644 --- a/requirements.txt +++ b/requirements.txt @@ -78,7 +78,7 @@ django-ratelimit==4.0.0 argon2-cffi==21.3.0 blackduck==1.1.0 pycurl==7.45.2 # Required for Celery Broker AWS (SQS) support -boto3==1.28.4 # Required for Celery Broker AWS (SQS) support +boto3==1.28.6 # Required for Celery Broker AWS (SQS) support netaddr==0.8.0 vulners==2.0.10 fontawesomefree==6.4.0 From ae6e95e3043936e43f0baeff7cfe0d5060c7ae54 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 19 Jul 2023 16:18:17 -0500 Subject: [PATCH 45/85] Bump pyjwt from 2.7.0 to 2.8.0 (#8402) Bumps [pyjwt](https://github.com/jpadilla/pyjwt) from 2.7.0 to 2.8.0. - [Release notes](https://github.com/jpadilla/pyjwt/releases) - [Changelog](https://github.com/jpadilla/pyjwt/blob/master/CHANGELOG.rst) - [Commits](https://github.com/jpadilla/pyjwt/compare/2.7.0...2.8.0) --- updated-dependencies: - dependency-name: pyjwt dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 74f515b2e8..a806a4fe5a 100644 --- a/requirements.txt +++ b/requirements.txt @@ -67,7 +67,7 @@ django-debug-toolbar-request-history==0.1.4 vcrpy==5.0.0 vcrpy-unittest==0.1.7 django-tagulous==1.3.3 -PyJWT==2.7.0 +PyJWT==2.8.0 cvss==2.6 django-fieldsignals==0.7.0 hyperlink==21.0.0 From 839b7bca1ec76d8b0bc80122fce3fbcfbeb12d2b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 20 Jul 2023 16:56:16 -0500 Subject: [PATCH 46/85] Bump boto3 from 1.28.6 to 1.28.7 (#8409) Bumps [boto3](https://github.com/boto/boto3) from 1.28.6 to 1.28.7. - [Release notes](https://github.com/boto/boto3/releases) - [Changelog](https://github.com/boto/boto3/blob/develop/CHANGELOG.rst) - [Commits](https://github.com/boto/boto3/compare/1.28.6...1.28.7) --- updated-dependencies: - dependency-name: boto3 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index a806a4fe5a..6fcf3362f0 100644 --- a/requirements.txt +++ b/requirements.txt @@ -78,7 +78,7 @@ django-ratelimit==4.0.0 argon2-cffi==21.3.0 blackduck==1.1.0 pycurl==7.45.2 # Required for Celery Broker AWS (SQS) support -boto3==1.28.6 # Required for Celery Broker AWS (SQS) support +boto3==1.28.7 # Required for Celery Broker AWS (SQS) support netaddr==0.8.0 vulners==2.0.10 fontawesomefree==6.4.0 From e3f44fafe789ecbb8c64a27209f591ae92ad110a Mon Sep 17 00:00:00 2001 From: Alejandro Tortolero Date: Fri, 21 Jul 2023 04:49:29 -0500 Subject: [PATCH 47/85] Update files with PEP8 standards in folder dojo/tools #002 (#8301) --- dojo/tools/dockle/parser.py | 11 +- dojo/tools/drheader/__init__.py | 2 +- dojo/tools/drheader/parser.py | 2 - dojo/tools/dsop/parser.py | 211 +++++++++++------- dojo/tools/eslint/__init__.py | 2 +- dojo/tools/eslint/parser.py | 40 ++-- dojo/tools/fortify/__init__.py | 2 +- dojo/tools/fortify/parser.py | 79 ++++--- dojo/tools/generic/parser.py | 100 +++++++-- dojo/tools/ggshield/parser.py | 53 +++-- dojo/tools/github_vulnerability/parser.py | 82 +++++-- dojo/tools/gitlab_container_scan/parser.py | 32 ++- dojo/tools/gitlab_dast/parser.py | 24 +- dojo/tools/gitlab_dep_scan/parser.py | 109 +++++---- .../gitlab_secret_detection_report/parser.py | 8 +- 15 files changed, 490 insertions(+), 267 deletions(-) diff --git a/dojo/tools/dockle/parser.py b/dojo/tools/dockle/parser.py index ba6cc2a615..5c07472bed 100644 --- a/dojo/tools/dockle/parser.py +++ b/dojo/tools/dockle/parser.py @@ -27,18 +27,17 @@ def get_description_for_scan_types(self, scan_type): def get_findings(self, filename, test): data = json.load(filename) dupes = {} - for item in data['details']: - code = item['code'] - dockle_severity = item['level'] - title = item['title'] + for item in data["details"]: + code = item["code"] + dockle_severity = item["level"] + title = item["title"] if dockle_severity == "IGNORE": continue if dockle_severity in self.SEVERITY: severity = self.SEVERITY[dockle_severity] else: severity = "Medium" - description = item.get('alerts', []) - description.sort() + description = sorted(item.get("alerts", [])) description = "\n".join(description) dupe_key = hashlib.sha256( (code + title).encode("utf-8") diff --git a/dojo/tools/drheader/__init__.py b/dojo/tools/drheader/__init__.py index 8b699123cc..656c6e59da 100644 --- a/dojo/tools/drheader/__init__.py +++ b/dojo/tools/drheader/__init__.py @@ -1 +1 @@ -__author__ = 'SPoint42' +__author__ = "SPoint42" diff --git a/dojo/tools/drheader/parser.py b/dojo/tools/drheader/parser.py index ab61d23ab8..4fc141ad0e 100644 --- a/dojo/tools/drheader/parser.py +++ b/dojo/tools/drheader/parser.py @@ -4,7 +4,6 @@ class DrHeaderParser(object): - def get_scan_types(self): return ["DrHeader JSON Importer"] @@ -21,7 +20,6 @@ def get_findings(self, filename, test): except ValueError as err: data = {} for item in data: - findingdetail = '' title = "Header : " + item["rule"] message = item["message"] severity = item["severity"].title() diff --git a/dojo/tools/dsop/parser.py b/dojo/tools/dsop/parser.py index 462f03398e..597e69c105 100644 --- a/dojo/tools/dsop/parser.py +++ b/dojo/tools/dsop/parser.py @@ -5,7 +5,6 @@ class DsopParser: - def get_scan_types(self): return ["DSOP Scan"] @@ -18,11 +17,15 @@ def get_description_for_scan_types(self, scan_type): def get_findings(self, file, test): book = load_workbook(file) items = list() - self.__parse_disa(test, items, book['OpenSCAP - DISA Compliance']) - self.__parse_oval(test, items, book['OpenSCAP - OVAL Results']) - self.__parse_twistlock(test, items, book['Twistlock Vulnerability Results']) - self.__parse_anchore(test, items, book['Anchore CVE Results']) - self.__parse_anchore_compliance(test, items, book['Anchore Compliance Results']) + self.__parse_disa(test, items, book["OpenSCAP - DISA Compliance"]) + self.__parse_oval(test, items, book["OpenSCAP - OVAL Results"]) + self.__parse_twistlock( + test, items, book["Twistlock Vulnerability Results"] + ) + self.__parse_anchore(test, items, book["Anchore CVE Results"]) + self.__parse_anchore_compliance( + test, items, book["Anchore Compliance Results"] + ) return items def __parse_disa(self, test, items, sheet): @@ -35,32 +38,43 @@ def __parse_disa(self, test, items, sheet): for i in range(len(row)): headers[row[i]] = i else: - if row[headers['result']] not in ('fail', 'notchecked'): + if row[headers["result"]] not in ("fail", "notchecked"): continue - title = row[headers['title']] - unique_id = row[headers['ruleid']] - if row[headers['severity']] == 'unknown': - severity = 'Info' + title = row[headers["title"]] + unique_id = row[headers["ruleid"]] + if row[headers["severity"]] == "unknown": + severity = "Info" else: - severity = row[headers['severity']].title() - references = row[headers['refs']] - description = row[headers['desc']] - impact = row[headers['rationale']] - date = row[headers['scanned_date']] + severity = row[headers["severity"]].title() + references = row[headers["refs"]] + description = row[headers["desc"]] + impact = row[headers["rationale"]] + date = row[headers["scanned_date"]] tags = "disa" - finding = Finding(title=title, date=date, severity=severity, description=description, - impact=impact, references=references, test=test, unique_id_from_tool=unique_id, - static_finding=True, dynamic_finding=False) - - if row[headers['identifiers']]: - finding.unsaved_vulnerability_ids = [row[headers['identifiers']]] + finding = Finding( + title=title, + date=date, + severity=severity, + description=description, + impact=impact, + references=references, + test=test, + unique_id_from_tool=unique_id, + static_finding=True, + dynamic_finding=False, + ) + + if row[headers["identifiers"]]: + finding.unsaved_vulnerability_ids = [ + row[headers["identifiers"]] + ] finding.unsaved_tags = tags items.append(finding) def __parse_oval(self, test, items, sheet): - severity_pattern = re.compile(r'\((.*)\)') + severity_pattern = re.compile(r"\((.*)\)") headers = dict() first = True for row in sheet.iter_rows(min_row=1, values_only=True): @@ -70,30 +84,38 @@ def __parse_oval(self, test, items, sheet): for i in range(len(row)): headers[row[i]] = i else: - if not row[headers['result']] or row[headers['result']] in ('false'): + if not row[headers["result"]] or row[headers["result"]] in ( + "false" + ): continue - title = row[headers['title']] + title = row[headers["title"]] match = severity_pattern.search(title) if match: sev = match.group(1) - if sev == 'Important': - severity = 'High' - elif sev == 'Moderate': - severity = 'Medium' - elif sev == 'None': - severity = 'Info' + if sev == "Important": + severity = "High" + elif sev == "Moderate": + severity = "Medium" + elif sev == "None": + severity = "Info" else: severity = sev else: - severity = 'Info' - unique_id = row[headers['id']] + severity = "Info" + unique_id = row[headers["id"]] tags = "oval" - finding = Finding(title=title, severity=severity, unique_id_from_tool=unique_id, - test=test, static_finding=True, dynamic_finding=False) + finding = Finding( + title=title, + severity=severity, + unique_id_from_tool=unique_id, + test=test, + static_finding=True, + dynamic_finding=False, + ) - if row[headers['ref']]: - finding.unsaved_vulnerability_ids = [row[headers['ref']]] + if row[headers["ref"]]: + finding.unsaved_vulnerability_ids = [row[headers["ref"]]] finding.unsaved_tags = tags items.append(finding) @@ -108,31 +130,41 @@ def __parse_twistlock(self, test, items, sheet): for i in range(len(row)): headers[row[i]] = i else: - if row[headers['severity']] is None: + if row[headers["severity"]] is None: continue - description = row[headers['desc']] - mitigation = row[headers['status']] - url = row[headers['link']] - - component_name = row[headers['packageName']] - component_version = row[headers['packageVersion']] - title = '{}: {} - {}'.format(row[headers['cve']], component_name, component_version) - if row[headers['severity']] == 'important': - severity = 'High' - elif row[headers['severity']] == 'moderate': - severity = 'Medium' + description = row[headers["desc"]] + # mitigation = row[headers["status"]] + url = row[headers["link"]] + + component_name = row[headers["packageName"]] + component_version = row[headers["packageVersion"]] + title = "{}: {} - {}".format( + row[headers["cve"]], component_name, component_version + ) + if row[headers["severity"]] == "important": + severity = "High" + elif row[headers["severity"]] == "moderate": + severity = "Medium" else: - severity = row[headers['severity']].title() - severity_justification = row[headers['vecStr']] + severity = row[headers["severity"]].title() + severity_justification = row[headers["vecStr"]] tags = "twistlock" - finding = Finding(title=title, url=url, severity=severity, description=description, - component_name=component_name, component_version=component_version, - severity_justification=severity_justification, test=test, - static_finding=True, dynamic_finding=False) - - if row[headers['cve']]: - finding.unsaved_vulnerability_ids = [row[headers['cve']]] + finding = Finding( + title=title, + url=url, + severity=severity, + description=description, + component_name=component_name, + component_version=component_version, + severity_justification=severity_justification, + test=test, + static_finding=True, + dynamic_finding=False, + ) + + if row[headers["cve"]]: + finding.unsaved_vulnerability_ids = [row[headers["cve"]]] finding.unsaved_tags = tags items.append(finding) @@ -149,22 +181,28 @@ def __parse_anchore(self, test, items, sheet): else: if row[0] is None: continue - severity = row[headers['severity']] - component = row[headers['package']] - file_path = row[headers['package_path']] - mitigation = row[headers['fix']] - description = "Image affected: {}".format(row[headers['tag']]) - title = '{}: {}'.format(row[headers['cve']], component) + severity = row[headers["severity"]] + component = row[headers["package"]] + file_path = row[headers["package_path"]] + mitigation = row[headers["fix"]] + description = "Image affected: {}".format(row[headers["tag"]]) + title = "{}: {}".format(row[headers["cve"]], component) tags = "anchore" - finding = Finding(title=title, severity=severity, - mitigation=mitigation, component_name=component, - description=description, test=test, - static_finding=True, dynamic_finding=False, - file_path=file_path) - - if row[headers['cve']]: - finding.unsaved_vulnerability_ids = [row[headers['cve']]] + finding = Finding( + title=title, + severity=severity, + mitigation=mitigation, + component_name=component, + description=description, + test=test, + static_finding=True, + dynamic_finding=False, + file_path=file_path, + ) + + if row[headers["cve"]]: + finding.unsaved_vulnerability_ids = [row[headers["cve"]]] finding.unsaved_tags = tags items.append(finding) @@ -179,24 +217,35 @@ def __parse_anchore_compliance(self, test, items, sheet): for i in range(len(row)): headers[row[i]] = i else: - if row[headers['policy_id']] != "DoDFileChecks": + if row[headers["policy_id"]] != "DoDFileChecks": continue - if row[headers['gate_action']] == "warn": + if row[headers["gate_action"]] == "warn": severity = "Medium" - elif row[headers['gate_action']] == "stop": + elif row[headers["gate_action"]] == "stop": severity = "Critical" else: severity = "Info" severity = severity mitigation = "To be investigated" - description = "Gate: {} (Trigger: {}): {}".format(row[headers['gate']], row[headers['trigger']], row[headers['check_output']]) - title = '{}: {}'.format(row[headers['policy_id']], row[headers['trigger_id']]) + description = "Gate: {} (Trigger: {}): {}".format( + row[headers["gate"]], + row[headers["trigger"]], + row[headers["check_output"]], + ) + title = "{}: {}".format( + row[headers["policy_id"]], row[headers["trigger_id"]] + ) tags = "anchore_compliance" - finding = Finding(title=title, severity=severity, - mitigation=mitigation, - description=description, test=test, - static_finding=True, dynamic_finding=False) + finding = Finding( + title=title, + severity=severity, + mitigation=mitigation, + description=description, + test=test, + static_finding=True, + dynamic_finding=False, + ) finding.unsaved_tags = tags items.append(finding) diff --git a/dojo/tools/eslint/__init__.py b/dojo/tools/eslint/__init__.py index bd1ada5085..ad5566ba42 100644 --- a/dojo/tools/eslint/__init__.py +++ b/dojo/tools/eslint/__init__.py @@ -1 +1 @@ -__author__ = 'omerlh' +__author__ = "omerlh" diff --git a/dojo/tools/eslint/parser.py b/dojo/tools/eslint/parser.py index 5b586b0cb7..c3e2167b8c 100644 --- a/dojo/tools/eslint/parser.py +++ b/dojo/tools/eslint/parser.py @@ -4,7 +4,6 @@ class ESLintParser(object): - def get_scan_types(self): return ["ESLint Scan"] @@ -25,42 +24,45 @@ def _convert_eslint_severity_to_dojo_severity(self, eslint_severity): def get_findings(self, filename, test): tree = filename.read() try: - data = json.loads(str(tree, 'utf-8')) - except: + data = json.loads(str(tree, "utf-8")) + except Exception: data = json.loads(tree) items = list() for item in data: - findingdetail = '' + findingdetail = "" - if (len(item["messages"]) == 0): + if len(item["messages"]) == 0: continue for message in item["messages"]: - if message["message"] is None: title = str("Finding Not defined") else: title = str(message["message"]) if message["ruleId"] is not None: - title = title + ' Test ID: ' + str(message["ruleId"]) + title = title + " Test ID: " + str(message["ruleId"]) findingdetail += "Filename: " + item["filePath"] + "\n" findingdetail += "Line number: " + str(message["line"]) + "\n" - sev = self._convert_eslint_severity_to_dojo_severity(message["severity"]) - - find = Finding(title=title, - test=test, - description=findingdetail, - severity=sev.title(), - file_path=item["filePath"], - line=message["line"], - url='N/A', - static_finding=True, - mitigation='N/A', - impact='N/A') + sev = self._convert_eslint_severity_to_dojo_severity( + message["severity"] + ) + + find = Finding( + title=title, + test=test, + description=findingdetail, + severity=sev.title(), + file_path=item["filePath"], + line=message["line"], + url="N/A", + static_finding=True, + mitigation="N/A", + impact="N/A", + ) items.append(find) return items diff --git a/dojo/tools/fortify/__init__.py b/dojo/tools/fortify/__init__.py index 93ff97749b..5c0e70cba7 100644 --- a/dojo/tools/fortify/__init__.py +++ b/dojo/tools/fortify/__init__.py @@ -1 +1 @@ -__author__ = 'Rajarshi333' +__author__ = "Rajarshi333" diff --git a/dojo/tools/fortify/parser.py b/dojo/tools/fortify/parser.py index c334bf8fe1..38f3c336a4 100644 --- a/dojo/tools/fortify/parser.py +++ b/dojo/tools/fortify/parser.py @@ -1,4 +1,3 @@ - import logging from defusedxml import ElementTree @@ -9,7 +8,6 @@ class FortifyParser(object): - def get_scan_types(self): return ["Fortify Scan"] @@ -30,9 +28,16 @@ def get_findings(self, filename, test): issues = [] meta_pair = ({}, {}) issue_pair = ([], []) - for ReportSection in root.findall('ReportSection'): - if ReportSection.findtext('Title') in ["Results Outline", "Issue Count by Category"]: - place = 0 if ReportSection.findtext('Title') == "Results Outline" else 1 + for ReportSection in root.findall("ReportSection"): + if ReportSection.findtext("Title") in [ + "Results Outline", + "Issue Count by Category", + ]: + place = ( + 0 + if ReportSection.findtext("Title") == "Results Outline" + else 1 + ) # Get information on the vulnerability like the Abstract, Explanation, # Recommendation, and Tips for group in ReportSection.iter("GroupingSection"): @@ -40,8 +45,10 @@ def get_findings(self, filename, test): maj_attr_summary = group.find("MajorAttributeSummary") if maj_attr_summary: meta_info = maj_attr_summary.findall("MetaInfo") - meta_pair[place][title] = {x.findtext("Name"): x.findtext("Value") - for x in meta_info} + meta_pair[place][title] = { + x.findtext("Name"): x.findtext("Value") + for x in meta_info + } # Collect all issues for issue in ReportSection.iter("Issue"): issue_pair[place].append(issue) @@ -56,15 +63,17 @@ def get_findings(self, filename, test): # All issues obtained, create a map for reference issue_map = {} for issue in issues: - issue_id = issue.attrib['iid'] + issue.attrib["iid"] details = { "Category": issue.find("Category").text, - "Folder": issue.find("Folder").text, "Kingdom": issue.find("Kingdom").text, + "Folder": issue.find("Folder").text, + "Kingdom": issue.find("Kingdom").text, "Abstract": issue.find("Abstract").text, "Friority": issue.find("Friority").text, "FileName": issue.find("Primary").find("FileName").text, "FilePath": issue.find("Primary").find("FilePath").text, - "LineStart": issue.find("Primary").find("LineStart").text} + "LineStart": issue.find("Primary").find("LineStart").text, + } if issue.find("Primary").find("Snippet"): details["Snippet"] = issue.find("Primary").find("Snippet").text @@ -76,27 +85,32 @@ def get_findings(self, filename, test): "FileName": issue.find("Source").find("FileName").text, "FilePath": issue.find("Source").find("FilePath").text, "LineStart": issue.find("Source").find("LineStart").text, - "Snippet": issue.find("Source").find("Snippet").text} + "Snippet": issue.find("Source").find("Snippet").text, + } details["Source"] = source - issue_map.update({issue.attrib['iid']: details}) + issue_map.update({issue.attrib["iid"]: details}) items = [] dupes = set() for issue_key, issue in issue_map.items(): - title = self.format_title(issue["Category"], issue["FileName"], issue["LineStart"]) + title = self.format_title( + issue["Category"], issue["FileName"], issue["LineStart"] + ) if title not in dupes: - items.append(Finding( - title=title, - severity=issue["Friority"], - file_path=issue['FilePath'], - line=int(issue['LineStart']), - static_finding=True, - test=test, - description=self.format_description(issue, cat_meta), - mitigation=self.format_mitigation(issue, cat_meta), - unique_id_from_tool=issue_key - )) + items.append( + Finding( + title=title, + severity=issue["Friority"], + file_path=issue["FilePath"], + line=int(issue["LineStart"]), + static_finding=True, + test=test, + description=self.format_description(issue, cat_meta), + mitigation=self.format_mitigation(issue, cat_meta), + unique_id_from_tool=issue_key, + ) + ) dupes.add(title) return items @@ -139,14 +153,21 @@ def format_description(self, issue, meta_info) -> str: """ desc = "##Catagory: {}\n".format(issue["Category"]) desc += "###Abstract:\n{}\n###Snippet:\n**File: {}: {}**\n```\n{}\n```\n".format( - issue["Abstract"], issue["FileName"], issue["LineStart"], issue["Snippet"]) + issue["Abstract"], + issue["FileName"], + issue["LineStart"], + issue["Snippet"], + ) explanation = meta_info[issue["Category"]].get("Explanation") source = issue.get("Source") if source: - desc += "##Source:\nThis snippet provides more context on the execution path that " \ - "leads to this finding. \n" \ - "####Snippet:\n**File: {}: {}**\n```\n{}\n```\n".format( - source["FileName"], source["LineStart"], source["Snippet"]) + desc += ( + "##Source:\nThis snippet provides more context on the execution path that " + "leads to this finding. \n" + "####Snippet:\n**File: {}: {}**\n```\n{}\n```\n".format( + source["FileName"], source["LineStart"], source["Snippet"] + ) + ) if explanation: desc += "##Explanation:\n {}".format(explanation) return desc diff --git a/dojo/tools/generic/parser.py b/dojo/tools/generic/parser.py index d75ef2ff9e..ab0f451b0a 100644 --- a/dojo/tools/generic/parser.py +++ b/dojo/tools/generic/parser.py @@ -69,26 +69,61 @@ def _get_test_json(self, data): del item["vulnerability_ids"] # check for required keys - required = {'title', 'severity', 'description'} + required = {"title", "severity", "description"} missing = sorted(required.difference(item)) if missing: raise ValueError(f"Required fields are missing: {missing}") # check for allowed keys allowed = { - 'date', 'cwe', 'cve', 'cvssv3', 'cvssv3_score', 'mitigation', 'impact', - 'steps_to_reproduce', 'severity_justification', 'references', 'active', 'verified', - 'false_p', 'out_of_scope', 'risk_accepted', 'under_review', 'is_mitigated', - 'thread_id', 'mitigated', 'numerical_severity', 'param', 'payload', 'line', 'file_path', - 'component_name', 'component_version', 'static_finding', 'dynamic_finding', - 'scanner_confidence', 'unique_id_from_tool', 'vuln_id_from_tool', 'sast_source_object', - 'sast_sink_object', 'sast_source_line', 'sast_source_file_path', 'nb_occurences', - 'publish_date', 'service', 'planned_remediation_date', 'planned_remediation_version', - 'effort_for_fixing', 'tags' + "date", + "cwe", + "cve", + "cvssv3", + "cvssv3_score", + "mitigation", + "impact", + "steps_to_reproduce", + "severity_justification", + "references", + "active", + "verified", + "false_p", + "out_of_scope", + "risk_accepted", + "under_review", + "is_mitigated", + "thread_id", + "mitigated", + "numerical_severity", + "param", + "payload", + "line", + "file_path", + "component_name", + "component_version", + "static_finding", + "dynamic_finding", + "scanner_confidence", + "unique_id_from_tool", + "vuln_id_from_tool", + "sast_source_object", + "sast_sink_object", + "sast_source_line", + "sast_source_file_path", + "nb_occurences", + "publish_date", + "service", + "planned_remediation_date", + "planned_remediation_version", + "effort_for_fixing", + "tags", }.union(required) not_allowed = sorted(set(item).difference(allowed)) if not_allowed: - raise ValueError(f"Not allowed fields are present: {not_allowed}") + raise ValueError( + f"Not allowed fields are present: {not_allowed}" + ) finding = Finding(**item) @@ -96,13 +131,14 @@ def _get_test_json(self, data): if unsaved_endpoints: finding.unsaved_endpoints = [] for endpoint_item in unsaved_endpoints: - if type(endpoint_item) is str: + if isinstance(endpoint_item, str): if "://" in endpoint_item: # is the host full uri? endpoint = Endpoint.from_uri(endpoint_item) # can raise exception if the host is not valid URL else: endpoint = Endpoint.from_uri("//" + endpoint_item) - # can raise exception if there is no way to parse the host + # can raise exception if there is no way to parse + # the host else: endpoint = Endpoint(**endpoint_item) finding.unsaved_endpoints.append(endpoint) @@ -113,17 +149,23 @@ def _get_test_json(self, data): finding.unsaved_vulnerability_ids = [finding.cve] if unsaved_vulnerability_ids: if finding.unsaved_vulnerability_ids: - finding.unsaved_vulnerability_ids.append(unsaved_vulnerability_ids) + finding.unsaved_vulnerability_ids.append( + unsaved_vulnerability_ids + ) else: - finding.unsaved_vulnerability_ids = unsaved_vulnerability_ids + finding.unsaved_vulnerability_ids = ( + unsaved_vulnerability_ids + ) test_internal.findings.append(finding) return test_internal def _get_findings_csv(self, filename): content = filename.read() - if type(content) is bytes: + if isinstance(content, bytes): content = content.decode("utf-8") - reader = csv.DictReader(io.StringIO(content), delimiter=",", quotechar='"') + reader = csv.DictReader( + io.StringIO(content), delimiter=",", quotechar='"' + ) dupes = dict() for row in reader: @@ -132,7 +174,9 @@ def _get_findings_csv(self, filename): description=row["Description"], date=parse(row["Date"]).date(), severity=row["Severity"], - duplicate=self._convert_bool(row.get("Duplicate", "FALSE")), # bool False by default + duplicate=self._convert_bool( + row.get("Duplicate", "FALSE") + ), # bool False by default nb_occurences=1, ) # manage active @@ -159,9 +203,13 @@ def _get_findings_csv(self, filename): # manage Vulnerability Id if "Vulnerability Id" in row and row["Vulnerability Id"]: if finding.unsaved_vulnerability_ids: - finding.unsaved_vulnerability_ids.append(row["Vulnerability Id"]) + finding.unsaved_vulnerability_ids.append( + row["Vulnerability Id"] + ) else: - finding.unsaved_vulnerability_ids = [row["Vulnerability Id"]] + finding.unsaved_vulnerability_ids = [ + row["Vulnerability Id"] + ] # manage CWE if "CweId" in row: finding.cwe = int(row["CweId"]) @@ -177,7 +225,9 @@ def _get_findings_csv(self, filename): # manage endpoints if "Url" in row: finding.unsaved_endpoints = [ - Endpoint.from_uri(row["Url"]) if "://" in row["Url"] else Endpoint.from_uri("//" + row["Url"]) + Endpoint.from_uri(row["Url"]) + if "://" in row["Url"] + else Endpoint.from_uri("//" + row["Url"]) ] # manage internal de-duplication @@ -194,9 +244,13 @@ def _get_findings_csv(self, filename): find = dupes[key] find.unsaved_endpoints.extend(finding.unsaved_endpoints) if find.unsaved_vulnerability_ids: - find.unsaved_vulnerability_ids.extend(finding.unsaved_vulnerability_ids) + find.unsaved_vulnerability_ids.extend( + finding.unsaved_vulnerability_ids + ) else: - find.unsaved_vulnerability_ids = finding.unsaved_vulnerability_ids + find.unsaved_vulnerability_ids = ( + finding.unsaved_vulnerability_ids + ) find.nb_occurences += 1 else: dupes[key] = finding diff --git a/dojo/tools/ggshield/parser.py b/dojo/tools/ggshield/parser.py index 79cb00886a..3d6373c87e 100755 --- a/dojo/tools/ggshield/parser.py +++ b/dojo/tools/ggshield/parser.py @@ -23,51 +23,51 @@ def get_findings(self, filename, test): Converts a Ggshield report to DefectDojo findings """ json_data = json.load(filename) - issues = json_data.get('scans') + issues = json_data.get("scans") dupes = dict() for issue in issues: - if issue.get('total_incidents') > 0: + if issue.get("total_incidents") > 0: findings = {} - commit = issue.get('id') - extra_info = issue.get('extra_info') + commit = issue.get("id") + extra_info = issue.get("extra_info") findings["commit"] = commit - findings["author"] = extra_info.get('author') - findings["email"] = extra_info.get('email') - date = parser.parse(extra_info.get('date')) + findings["author"] = extra_info.get("author") + findings["email"] = extra_info.get("email") + date = parser.parse(extra_info.get("date")) commit_date = str(date).split(" ")[0] findings["commit_date"] = commit_date - for entity in issue.get('entities_with_incidents'): - file_path = entity.get('filename') + for entity in issue.get("entities_with_incidents"): + file_path = entity.get("filename") findings["file_path"] = file_path - for incident in entity.get('incidents'): - policy = incident.get('policy') - secret_key_type = incident.get('type') - total_occurrences = incident.get('total_occurrences') + for incident in entity.get("incidents"): + policy = incident.get("policy") + secret_key_type = incident.get("type") + total_occurrences = incident.get("total_occurrences") findings["policy"] = policy findings["secret_key_type"] = secret_key_type findings["total_occurrences"] = total_occurrences - for item in incident.get('occurrences'): + for item in incident.get("occurrences"): self.get_items(item, findings, dupes, test) return list(dupes.values()) def get_items(self, item, findings, dupes, test): - findings["match"] = item.get('match') - findings["type"] = item.get('type') - line_start = item.get('line_start') - line_end = item.get('line_end') + findings["match"] = item.get("match") + findings["type"] = item.get("type") + line_start = item.get("line_start") + line_end = item.get("line_end") if line_start: line_start = int(line_start) if line_end: line_end = int(line_end) - findings["line_start"] = item.get('line_start') - findings["line_end"] = item.get('line_end') + findings["line_start"] = item.get("line_start") + findings["line_end"] = item.get("line_end") title = f'Hard coded {findings["secret_key_type"]} found in {findings["file_path"]}' severity = "High" if "*" in findings["match"]: findings["match"] = findings["match"].replace("*", "-") - description = '' + description = "" if findings["match"]: description += f'**Secret:** {findings["match"]}\n' if findings["type"]: @@ -99,10 +99,17 @@ def get_items(self, item, findings, dupes, test): line=findings["line_start"], dynamic_finding=False, static_finding=True, - date=findings["commit_date"] + date=findings["commit_date"], ) - key = hashlib.md5((title + findings["match"] + str(findings["line_start"]) + str(findings["line_end"])).encode("utf-8")).hexdigest() + key = hashlib.md5( + ( + title + + findings["match"] + + str(findings["line_start"]) + + str(findings["line_end"]) + ).encode("utf-8") + ).hexdigest() if key not in dupes: dupes[key] = finding diff --git a/dojo/tools/github_vulnerability/parser.py b/dojo/tools/github_vulnerability/parser.py index a00ec74fd5..15bf37606c 100644 --- a/dojo/tools/github_vulnerability/parser.py +++ b/dojo/tools/github_vulnerability/parser.py @@ -22,28 +22,42 @@ def get_findings(self, filename, test): vulnerabilityAlerts = self._search_vulnerability_alerts(data["data"]) if not vulnerabilityAlerts: - raise ValueError("Invalid report, no 'vulnerabilityAlerts' node found") + raise ValueError( + "Invalid report, no 'vulnerabilityAlerts' node found" + ) repository_url = None if "repository" in data["data"]: if "nameWithOwner" in data["data"]["repository"]: - repository_url = 'https://github.com/{}'.format(data["data"]["repository"]["nameWithOwner"]) + repository_url = "https://github.com/{}".format( + data["data"]["repository"]["nameWithOwner"] + ) if "url" in data["data"]["repository"]: repository_url = data["data"]["repository"]["url"] dupes = dict() for alert in vulnerabilityAlerts["nodes"]: - description = alert["securityVulnerability"]["advisory"]["description"] + description = alert["securityVulnerability"]["advisory"][ + "description" + ] if "number" in alert and repository_url is not None: - dependabot_url = repository_url + '/security/dependabot/{}'.format(alert["number"]) - description = '[{}]({})\n'.format(dependabot_url, dependabot_url) + description + dependabot_url = ( + repository_url + + "/security/dependabot/{}".format(alert["number"]) + ) + description = ( + "[{}]({})\n".format(dependabot_url, dependabot_url) + + description + ) finding = Finding( title=alert["securityVulnerability"]["advisory"]["summary"], test=test, description=description, - severity=self._convert_security(alert["securityVulnerability"].get("severity", "MODERATE")), + severity=self._convert_security( + alert["securityVulnerability"].get("severity", "MODERATE") + ), static_finding=True, dynamic_finding=False, unique_id_from_tool=alert["id"], @@ -55,41 +69,71 @@ def get_findings(self, filename, test): if "createdAt" in alert: finding.date = dateutil.parser.parse(alert["createdAt"]) - if "state" in alert and ("FIXED" == alert["state"] or "DISMISSED" == alert["state"]): + if "state" in alert and ( + "FIXED" == alert["state"] or "DISMISSED" == alert["state"] + ): finding.active = False finding.is_mitigated = True # if the package is present if "package" in alert["securityVulnerability"]: - finding.component_name = alert["securityVulnerability"]["package"].get("name") + finding.component_name = alert["securityVulnerability"][ + "package" + ].get("name") if "references" in alert["securityVulnerability"]["advisory"]: finding.references = "" - for ref in alert["securityVulnerability"]["advisory"]["references"]: + for ref in alert["securityVulnerability"]["advisory"][ + "references" + ]: finding.references += ref["url"] + "\r\n" if "identifiers" in alert["securityVulnerability"]["advisory"]: unsaved_vulnerability_ids = list() - for identifier in alert["securityVulnerability"]["advisory"]["identifiers"]: + for identifier in alert["securityVulnerability"]["advisory"][ + "identifiers" + ]: if identifier.get("value"): - unsaved_vulnerability_ids.append(identifier.get("value")) + unsaved_vulnerability_ids.append( + identifier.get("value") + ) if unsaved_vulnerability_ids: - finding.unsaved_vulnerability_ids = unsaved_vulnerability_ids + finding.unsaved_vulnerability_ids = ( + unsaved_vulnerability_ids + ) if "cvss" in alert["securityVulnerability"]["advisory"]: - if "score" in alert["securityVulnerability"]["advisory"]["cvss"]: - score = alert["securityVulnerability"]["advisory"]["cvss"]["score"] + if ( + "score" + in alert["securityVulnerability"]["advisory"]["cvss"] + ): + score = alert["securityVulnerability"]["advisory"]["cvss"][ + "score" + ] if score is not None: finding.cvssv3_score = score - if "vectorString" in alert["securityVulnerability"]["advisory"]["cvss"]: - cvss_vector_string = alert["securityVulnerability"]["advisory"]["cvss"]["vectorString"] + if ( + "vectorString" + in alert["securityVulnerability"]["advisory"]["cvss"] + ): + cvss_vector_string = alert["securityVulnerability"][ + "advisory" + ]["cvss"]["vectorString"] if cvss_vector_string is not None: - cvss_objects = cvss_parser.parse_cvss_from_text(cvss_vector_string) + cvss_objects = cvss_parser.parse_cvss_from_text( + cvss_vector_string + ) if len(cvss_objects) > 0: finding.cvssv3 = cvss_objects[0].clean_vector() - if "cwes" in alert["securityVulnerability"]["advisory"] and "nodes" in alert["securityVulnerability"]["advisory"]["cwes"]: - cwe_nodes = alert["securityVulnerability"]["advisory"]["cwes"]["nodes"] + if ( + "cwes" in alert["securityVulnerability"]["advisory"] + and "nodes" + in alert["securityVulnerability"]["advisory"]["cwes"] + ): + cwe_nodes = alert["securityVulnerability"]["advisory"]["cwes"][ + "nodes" + ] if cwe_nodes and len(cwe_nodes) > 0: finding.cwe = int(cwe_nodes[0].get("cweId")[4:]) diff --git a/dojo/tools/gitlab_container_scan/parser.py b/dojo/tools/gitlab_container_scan/parser.py index 81e865b87e..0912d2fd2e 100644 --- a/dojo/tools/gitlab_container_scan/parser.py +++ b/dojo/tools/gitlab_container_scan/parser.py @@ -29,10 +29,18 @@ def _get_dependency_name(self, dependency): return "" def _get_identifier_cve(self, identifier): - return identifier["value"] if identifier.get("type", "no-type") == "cve" else None + return ( + identifier["value"] + if identifier.get("type", "no-type") == "cve" + else None + ) def _get_identifier_cwe(self, identifier): - return identifier["value"] if identifier.get("type", "no-type") == "cwe" else None + return ( + identifier["value"] + if identifier.get("type", "no-type") == "cwe" + else None + ) def _get_first_cve(self, identifiers): cwe = "" @@ -54,7 +62,11 @@ def _get_package_string(self, dependency): return f"{dependency_name}-{dependency_version}" return dependency_name # check if name is missing, but at least version is here - return f"unknown-package-{dependency_version}" if dependency_version else None + return ( + f"unknown-package-{dependency_version}" + if dependency_version + else None + ) def get_findings(self, file, test): findings = [] @@ -70,7 +82,8 @@ def get_findings(self, file, test): title = vulnerability.get("message") dependency = vulnerability["location"]["dependency"] identifiers = vulnerability["identifiers"] - # In new versiona, the message field is no longer in report, so build the title from other parts + # In new versiona, the message field is no longer in report, so + # build the title from other parts if not title: issue_string = self._get_first_cve(identifiers) location_string = self._get_package_string(dependency) @@ -100,14 +113,19 @@ def get_findings(self, file, test): if unsaved_vulnerability_ids: finding.unsaved_vulnerability_ids = unsaved_vulnerability_ids - # Check package key before name as both is optional on GitLab schema + # Check package key before name as both is optional on GitLab + # schema dependency_name = self._get_dependency_name(dependency) if dependency_name: - finding.component_name = textwrap.shorten(dependency_name, width=190, placeholder="...") + finding.component_name = textwrap.shorten( + dependency_name, width=190, placeholder="..." + ) dependency_version = self._get_dependency_version(dependency) if dependency_version: - finding.component_version = textwrap.shorten(dependency_version, width=90, placeholder="...") + finding.component_version = textwrap.shorten( + dependency_version, width=90, placeholder="..." + ) if "solution" in vulnerability: finding.mitigation = vulnerability["solution"] diff --git a/dojo/tools/gitlab_dast/parser.py b/dojo/tools/gitlab_dast/parser.py index 5644880969..fc02d5901b 100644 --- a/dojo/tools/gitlab_dast/parser.py +++ b/dojo/tools/gitlab_dast/parser.py @@ -34,11 +34,15 @@ def get_items(self, tree, test): item = self.get_item(node, test, scanner) item_key = hashlib.sha256( - "|".join([item.severity, item.title, item.description]).encode() + "|".join( + [item.severity, item.title, item.description] + ).encode() ).hexdigest() if item_key in items: - items[item_key].unsaved_endpoints.extend(item.unsaved_endpoints) + items[item_key].unsaved_endpoints.extend( + item.unsaved_endpoints + ) items[item_key].nb_occurences += 1 else: items[item_key] = item @@ -60,10 +64,14 @@ def get_confidence_numeric(self, confidence): # iterating through properties of each vulnerability def get_item(self, vuln, test, scanner): # scanner_confidence - scanner_confidence = self.get_confidence_numeric(vuln.get('confidence', 'Could not be determined')) + scanner_confidence = self.get_confidence_numeric( + vuln.get("confidence", "Could not be determined") + ) # description - description = f"Scanner: {scanner.get('name', 'Could not be determined')}\n" + description = ( + f"Scanner: {scanner.get('name', 'Could not be determined')}\n" + ) if "message" in vuln: description += f"{vuln['message']}\n" elif "description" in vuln: @@ -80,14 +88,18 @@ def get_item(self, vuln, test, scanner): # date if "discovered_at" in vuln: - finding.date = datetime.strptime(vuln["discovered_at"], "%Y-%m-%dT%H:%M:%S.%f") + finding.date = datetime.strptime( + vuln["discovered_at"], "%Y-%m-%dT%H:%M:%S.%f" + ) # id if "id" in vuln: finding.unique_id_from_tool = vuln["id"] # title - finding.title = vuln["name"] if "name" in vuln else finding.unique_id_from_tool + finding.title = ( + vuln["name"] if "name" in vuln else finding.unique_id_from_tool + ) # cwe for identifier in vuln["identifiers"]: if identifier["type"].lower() == "cwe": diff --git a/dojo/tools/gitlab_dep_scan/parser.py b/dojo/tools/gitlab_dep_scan/parser.py index f293a99ac2..16692e8819 100644 --- a/dojo/tools/gitlab_dep_scan/parser.py +++ b/dojo/tools/gitlab_dep_scan/parser.py @@ -4,7 +4,6 @@ class GitlabDepScanParser(object): - def get_scan_types(self): return ["GitLab Dependency Scanning Report"] @@ -26,10 +25,10 @@ def parse_json(self, json_output): try: data = json_output.read() try: - tree = json.loads(str(data, 'utf-8')) - except: + tree = json.loads(str(data, "utf-8")) + except Exception: tree = json.loads(data) - except: + except Exception: raise ValueError("Invalid format") return tree @@ -37,7 +36,7 @@ def parse_json(self, json_output): def get_items(self, tree, test): items = {} scanner = tree.get("scan", {}).get("scanner", {}) - for node in tree['vulnerabilities']: + for node in tree["vulnerabilities"]: item = self.get_item(node, test, scanner) if item: items[item.unique_id_from_tool] = item @@ -45,73 +44,89 @@ def get_items(self, tree, test): return list(items.values()) def get_item(self, vuln, test, scan): - if 'id' in vuln: - unique_id_from_tool = vuln['id'] + if "id" in vuln: + unique_id_from_tool = vuln["id"] else: - # If the new unique id is not provided, fall back to deprecated "cve" fingerprint (old version) - unique_id_from_tool = vuln['cve'] - - title = '' - if 'name' in vuln: - title = vuln['name'] - elif 'message' in vuln: - title = vuln['message'] - elif 'description' in vuln: - title = vuln['description'] + # If the new unique id is not provided, fall back to deprecated + # "cve" fingerprint (old version) + unique_id_from_tool = vuln["cve"] + + title = "" + if "name" in vuln: + title = vuln["name"] + elif "message" in vuln: + title = vuln["message"] + elif "description" in vuln: + title = vuln["description"] else: - # All other fields are optional, if none of them has a value, fall back on the unique id + # All other fields are optional, if none of them has a value, fall + # back on the unique id title = unique_id_from_tool - description = f'Scanner: {scan.get("name", "could not be determined")}\n' - if 'message' in vuln: + description = ( + f'Scanner: {scan.get("name", "could not be determined")}\n' + ) + if "message" in vuln: description += f"{vuln['message']}\n" - if 'description' in vuln: + if "description" in vuln: description += f"{vuln['description']}\n" - location = vuln['location'] - file_path = location['file'] if 'file' in location else None + location = vuln["location"] + file_path = location["file"] if "file" in location else None component_name = None component_version = None - if 'dependency' in location: - component_version = location['dependency']['version'] if 'version' in location['dependency'] else None - if 'package' in location['dependency']: - component_name = location['dependency']['package']['name'] if 'name' in location['dependency']['package'] else None - - severity = vuln['severity'] - if severity in ['Undefined', 'Unknown']: + if "dependency" in location: + component_version = ( + location["dependency"]["version"] + if "version" in location["dependency"] + else None + ) + if "package" in location["dependency"]: + component_name = ( + location["dependency"]["package"]["name"] + if "name" in location["dependency"]["package"] + else None + ) + + severity = vuln["severity"] + if severity in ["Undefined", "Unknown"]: # Severity can be "Undefined" or "Unknown" in report - # In that case we set it as Info and specify the initial severity in the title - title = f'[{severity} severity] {title}' - severity = 'Info' + # In that case we set it as Info and specify the initial severity + # in the title + title = f"[{severity} severity] {title}" + severity = "Info" # Dependency Scanning analyzers doesn't provide confidence property - # See https://docs.gitlab.com/ee/user/application_security/dependency_scanning/analyzers.html#analyzers-data + # See + # https://docs.gitlab.com/ee/user/application_security/dependency_scanning/analyzers.html#analyzers-data scanner_confidence = False - mitigation = '' - if 'solution' in vuln: - mitigation = vuln['solution'] + mitigation = "" + if "solution" in vuln: + mitigation = vuln["solution"] cwe = None vulnerability_id = None - references = '' - if 'identifiers' in vuln: - for identifier in vuln['identifiers']: - if identifier['type'].lower() == 'cwe': - cwe = identifier['value'] - elif identifier['type'].lower() == 'cve': - vulnerability_id = identifier['value'] + references = "" + if "identifiers" in vuln: + for identifier in vuln["identifiers"]: + if identifier["type"].lower() == "cwe": + cwe = identifier["value"] + elif identifier["type"].lower() == "cve": + vulnerability_id = identifier["value"] else: references += f"Identifier type: {identifier['type']}\n" references += f"Name: {identifier['name']}\n" references += f"Value: {identifier['value']}\n" - if 'url' in identifier: + if "url" in identifier: references += f"URL: {identifier['url']}\n" - references += '\n' + references += "\n" finding = Finding( - title=f"{vulnerability_id}: {title}" if vulnerability_id else title, + title=f"{vulnerability_id}: {title}" + if vulnerability_id + else title, test=test, description=description, severity=severity, diff --git a/dojo/tools/gitlab_secret_detection_report/parser.py b/dojo/tools/gitlab_secret_detection_report/parser.py index 64753b96f0..f6e89adb84 100644 --- a/dojo/tools/gitlab_secret_detection_report/parser.py +++ b/dojo/tools/gitlab_secret_detection_report/parser.py @@ -29,7 +29,9 @@ def get_findings(self, file, test): # Vulnerabilities is stored on vulnerabilities key vulnerabilities = data["vulnerabilities"] - detection_string = "detected; please remove and revoke it if this is a leak." + detection_string = ( + "detected; please remove and revoke it if this is a leak." + ) for vulnerability in vulnerabilities: title = vulnerability.get("message", vulnerability.get("name")) if detection_string not in title: @@ -53,7 +55,9 @@ def get_findings(self, file, test): if "start_line" in location: finding.line = int(location["start_line"]) if "raw_source_code_extract" in vulnerability: - finding.description += "\n" + vulnerability["raw_source_code_extract"] + finding.description += ( + "\n" + vulnerability["raw_source_code_extract"] + ) findings.append(finding) return findings From 7fc5c91830a85029ef18a8b52653a5eb5b3078fb Mon Sep 17 00:00:00 2001 From: Alejandro Tortolero Date: Fri, 21 Jul 2023 04:50:26 -0500 Subject: [PATCH 48/85] Update files with PEP8 standards in folder dojo/tools #001 (#8282) --- dojo/tools/dawnscanner/__init__.py | 2 +- dojo/tools/dawnscanner/parser.py | 11 +- dojo/tools/dependency_check/parser.py | 305 ++++++++++++++++++-------- dojo/tools/detect_secrets/parser.py | 27 ++- dojo/tools/dockerbench/parser.py | 131 +++++------ 5 files changed, 309 insertions(+), 167 deletions(-) diff --git a/dojo/tools/dawnscanner/__init__.py b/dojo/tools/dawnscanner/__init__.py index a9dff13c02..35525adf2f 100644 --- a/dojo/tools/dawnscanner/__init__.py +++ b/dojo/tools/dawnscanner/__init__.py @@ -1 +1 @@ -__author__ = 'jaguasch' +__author__ = "jaguasch" diff --git a/dojo/tools/dawnscanner/parser.py b/dojo/tools/dawnscanner/parser.py index 2471b8caac..e191d2da06 100644 --- a/dojo/tools/dawnscanner/parser.py +++ b/dojo/tools/dawnscanner/parser.py @@ -24,8 +24,11 @@ def get_findings(self, filename, test): items = [] for item in data["vulnerabilities"]: - - findingdetail = item["message"] if item["message"][0:2] != "b," else item["message"][0:-1] + findingdetail = ( + item["message"] + if item["message"][0:2] != "b," + else item["message"][0:-1] + ) finding = Finding( title=item["name"], @@ -40,7 +43,9 @@ def get_findings(self, filename, test): ) if self.CVE_REGEX.match(item["name"]): - finding.unsaved_vulnerability_ids = [self.CVE_REGEX.findall(item["name"])[0]] + finding.unsaved_vulnerability_ids = [ + self.CVE_REGEX.findall(item["name"])[0] + ] items.append(finding) diff --git a/dojo/tools/dependency_check/parser.py b/dojo/tools/dependency_check/parser.py index 3f2b6adb7a..93e6e5f3f1 100644 --- a/dojo/tools/dependency_check/parser.py +++ b/dojo/tools/dependency_check/parser.py @@ -15,47 +15,72 @@ class DependencyCheckParser(object): SEVERITY_MAPPING = { - 'info': 'Info', - 'low': 'Low', - 'moderate': 'Medium', - 'medium': 'Medium', - 'high': 'High', - 'critical': 'Critical' + "info": "Info", + "low": "Low", + "moderate": "Medium", + "medium": "Medium", + "high": "High", + "critical": "Critical", } def add_finding(self, finding, dupes): - key_str = '|'.join([ - str(finding.title), - str(finding.cwe), - str(finding.file_path).lower() - ]) - key = hashlib.sha256(key_str.encode('utf-8')).hexdigest() + key_str = "|".join( + [ + str(finding.title), + str(finding.cwe), + str(finding.file_path).lower(), + ] + ) + key = hashlib.sha256(key_str.encode("utf-8")).hexdigest() if key not in dupes: dupes[key] = finding - def get_filename_and_path_from_dependency(self, dependency, related_dependency, namespace): + def get_filename_and_path_from_dependency( + self, dependency, related_dependency, namespace + ): if not related_dependency: - return dependency.findtext(f'{namespace}fileName'), dependency.findtext(f'{namespace}filePath') - if related_dependency.findtext(f'{namespace}fileName'): - return related_dependency.findtext(f'{namespace}fileName'), related_dependency.findtext(f'{namespace}filePath') + return dependency.findtext( + f"{namespace}fileName" + ), dependency.findtext(f"{namespace}filePath") + if related_dependency.findtext(f"{namespace}fileName"): + return related_dependency.findtext( + f"{namespace}fileName" + ), related_dependency.findtext(f"{namespace}filePath") else: - # without filename, it would be just a duplicate finding so we have to skip it. filename is only present for relateddependencies since v6.0.0 - # logger.debug('related_dependency: %s', ElementTree.tostring(related_dependency, encoding='utf8', method='xml')) + # without filename, it would be just a duplicate finding so we have to skip it. filename + # is only present for relateddependencies since v6.0.0 + # logger.debug('related_dependency: %s', + # ElementTree.tostring(related_dependency, encoding='utf8', method='xml')) return None, None - def get_component_name_and_version_from_dependency(self, dependency, related_dependency, namespace): - identifiers_node = dependency.find(namespace + 'identifiers') + def get_component_name_and_version_from_dependency( + self, dependency, related_dependency, namespace + ): + identifiers_node = dependency.find(namespace + "identifiers") if identifiers_node: # analyzing identifier from the more generic to - package_node = identifiers_node.find('.//' + namespace + 'package') + package_node = identifiers_node.find(".//" + namespace + "package") if package_node: - id = package_node.findtext(f'{namespace}id') + id = package_node.findtext(f"{namespace}id") purl = PackageURL.from_string(id) purl_parts = purl.to_dict() - component_name = purl_parts['namespace'] + ':' if purl_parts['namespace'] and len(purl_parts['namespace']) > 0 else '' - component_name += purl_parts['name'] if purl_parts['name'] and len(purl_parts['name']) > 0 else '' + component_name = ( + purl_parts["namespace"] + ":" + if purl_parts["namespace"] + and len(purl_parts["namespace"]) > 0 + else "" + ) + component_name += ( + purl_parts["name"] + if purl_parts["name"] and len(purl_parts["name"]) > 0 + else "" + ) component_name = component_name if component_name else None - component_version = purl_parts['version'] if purl_parts['version'] and len(purl_parts['version']) > 0 else '' + component_version = ( + purl_parts["version"] + if purl_parts["version"] and len(purl_parts["version"]) > 0 + else "" + ) return component_name, component_version # vulnerabilityIds_node = identifiers_node.find('.//' + namespace + 'vulnerabilityIds') @@ -68,27 +93,46 @@ def get_component_name_and_version_from_dependency(self, dependency, related_dep # component_version = cpe.get_version()[0] if len(cpe.get_version()) > 0 else None # return component_name, component_version - cpe_node = identifiers_node.find('.//' + namespace + 'identifier[@type="cpe"]') + cpe_node = identifiers_node.find( + ".//" + namespace + 'identifier[@type="cpe"]' + ) if cpe_node: - id = cpe_node.findtext(f'{namespace}name') + id = cpe_node.findtext(f"{namespace}name") cpe = CPE(id) - component_name = cpe.get_vendor()[0] + ':' if len(cpe.get_vendor()) > 0 else '' - component_name += cpe.get_product()[0] if len(cpe.get_product()) > 0 else '' + component_name = ( + cpe.get_vendor()[0] + ":" + if len(cpe.get_vendor()) > 0 + else "" + ) + component_name += ( + cpe.get_product()[0] if len(cpe.get_product()) > 0 else "" + ) component_name = component_name if component_name else None - component_version = cpe.get_version()[0] if len(cpe.get_version()) > 0 else None + component_version = ( + cpe.get_version()[0] + if len(cpe.get_version()) > 0 + else None + ) return component_name, component_version - maven_node = identifiers_node.find('.//' + namespace + 'identifier[@type="maven"]') + maven_node = identifiers_node.find( + ".//" + namespace + 'identifier[@type="maven"]' + ) if maven_node: - maven_parts = maven_node.findtext(f'{namespace}name').split(':') + maven_parts = maven_node.findtext(f"{namespace}name").split( + ":" + ) # logger.debug('maven_parts:' + str(maven_parts)) if len(maven_parts) == 3: - component_name = maven_parts[0] + ':' + maven_parts[1] + component_name = maven_parts[0] + ":" + maven_parts[1] component_version = maven_parts[2] return component_name, component_version - # TODO what happens when there multiple evidencecollectednodes with product or version as type? - evidence_collected_node = dependency.find(namespace + 'evidenceCollected') + # TODO what happens when there multiple evidencecollectednodes with + # product or version as type? + evidence_collected_node = dependency.find( + namespace + "evidenceCollected" + ) if evidence_collected_node: # # @@ -104,19 +148,32 @@ def get_component_name_and_version_from_dependency(self, dependency, related_dep # will find the first product and version node. if there are multiple it may not pick the best # since 6.0.0 howoever it seems like there's always a packageurl above so not sure if we need the effort to # implement more logic here - product_node = evidence_collected_node.find('.//' + namespace + 'evidence[@type="product"]') + product_node = evidence_collected_node.find( + ".//" + namespace + 'evidence[@type="product"]' + ) if product_node: - component_name = product_node.findtext(f'{namespace}value') - version_node = evidence_collected_node.find('.//' + namespace + 'evidence[@type="version"]') + component_name = product_node.findtext(f"{namespace}value") + version_node = evidence_collected_node.find( + ".//" + namespace + 'evidence[@type="version"]' + ) if version_node: - component_version = version_node.findtext(f'{namespace}value') + component_version = version_node.findtext( + f"{namespace}value" + ) return component_name, component_version return None, None - def get_finding_from_vulnerability(self, dependency, related_dependency, vulnerability, test, namespace): - dependency_filename, dependency_filepath = self.get_filename_and_path_from_dependency(dependency, related_dependency, namespace) + def get_finding_from_vulnerability( + self, dependency, related_dependency, vulnerability, test, namespace + ): + ( + dependency_filename, + dependency_filepath, + ) = self.get_filename_and_path_from_dependency( + dependency, related_dependency, namespace + ) # logger.debug('dependency_filename: %s', dependency_filename) if dependency_filename is None: @@ -125,23 +182,26 @@ def get_finding_from_vulnerability(self, dependency, related_dependency, vulnera tags = [] mitigated = None is_Mitigated = False - name = vulnerability.findtext(f'{namespace}name') - if vulnerability.find(f'{namespace}cwes'): - cwe_field = vulnerability.find(f'{namespace}cwes').findtext(f'{namespace}cwe') + name = vulnerability.findtext(f"{namespace}name") + if vulnerability.find(f"{namespace}cwes"): + cwe_field = vulnerability.find(f"{namespace}cwes").findtext( + f"{namespace}cwe" + ) else: - cwe_field = vulnerability.findtext(f'{namespace}cwe') + cwe_field = vulnerability.findtext(f"{namespace}cwe") - description = vulnerability.findtext(f'{namespace}description') + description = vulnerability.findtext(f"{namespace}description") - source = vulnerability.get('source') + source = vulnerability.get("source") if source: - description += '\n**Source:** ' + str(source) + description += "\n**Source:** " + str(source) - # I need the notes field since this is how the suppression is documented. - notes = vulnerability.findtext(f'.//{namespace}notes') + # I need the notes field since this is how the suppression is + # documented. + notes = vulnerability.findtext(f".//{namespace}notes") vulnerability_id = name[:28] - if vulnerability_id and not vulnerability_id.startswith('CVE'): + if vulnerability_id and not vulnerability_id.startswith("CVE"): # for vulnerability sources which have a CVE, it is the start of the 'name'. # for other sources, we have to set it to None vulnerability_id = None @@ -153,37 +213,58 @@ def get_finding_from_vulnerability(self, dependency, related_dependency, vulnera if m: cwe = int(m.group(2)) - component_name, component_version = self.get_component_name_and_version_from_dependency(dependency, related_dependency, namespace) + ( + component_name, + component_version, + ) = self.get_component_name_and_version_from_dependency( + dependency, related_dependency, namespace + ) stripped_name = name # startswith CVE-XXX-YYY - stripped_name = re.sub(r'^CVE-\d{4}-\d{4,7}', '', stripped_name).strip() + stripped_name = re.sub( + r"^CVE-\d{4}-\d{4,7}", "", stripped_name + ).strip() # startswith CWE-XXX: - stripped_name = re.sub(r'^CWE-\d+\:', '', stripped_name).strip() + stripped_name = re.sub(r"^CWE-\d+\:", "", stripped_name).strip() # startswith CWE-XXX - stripped_name = re.sub(r'^CWE-\d+', '', stripped_name).strip() + stripped_name = re.sub(r"^CWE-\d+", "", stripped_name).strip() if component_name is None: - logger.warning("component_name was None for File: {}, using dependency file name instead.".format(dependency_filename)) + logger.warning( + "component_name was None for File: {}, using dependency file name instead.".format( + dependency_filename + ) + ) component_name = dependency_filename # some changes in v6.0.0 around CVSS version information # https://github.com/jeremylong/DependencyCheck/pull/2781 - cvssv2_node = vulnerability.find(namespace + 'cvssV2') - cvssv3_node = vulnerability.find(namespace + 'cvssV3') - severity = vulnerability.findtext(f'{namespace}severity') + cvssv2_node = vulnerability.find(namespace + "cvssV2") + cvssv3_node = vulnerability.find(namespace + "cvssV3") + severity = vulnerability.findtext(f"{namespace}severity") if not severity: if cvssv3_node is not None: - severity = cvssv3_node.findtext(f'{namespace}baseSeverity').lower().capitalize() + severity = ( + cvssv3_node.findtext(f"{namespace}baseSeverity") + .lower() + .capitalize() + ) elif cvssv2_node is not None: - severity = cvssv2_node.findtext(f'{namespace}severity').lower().capitalize() + severity = ( + cvssv2_node.findtext(f"{namespace}severity") + .lower() + .capitalize() + ) # handle if the severity have something not in the mapping # default to 'Medium' and produce warnings in logs if severity: if severity.strip().lower() not in self.SEVERITY_MAPPING: - logger.warning(f"Warning: Unknow severity value detected '{severity}'. Bypass to 'Medium' value") + logger.warning( + f"Warning: Unknow severity value detected '{severity}'. Bypass to 'Medium' value" + ) severity = "Medium" else: severity = self.SEVERITY_MAPPING[severity.strip().lower()] @@ -191,21 +272,26 @@ def get_finding_from_vulnerability(self, dependency, related_dependency, vulnera severity = "Medium" reference_detail = None - references_node = vulnerability.find(namespace + 'references') + references_node = vulnerability.find(namespace + "references") if references_node is not None: - reference_detail = '' - for reference_node in references_node.findall(namespace + 'reference'): + reference_detail = "" + for reference_node in references_node.findall( + namespace + "reference" + ): ref_source = reference_node.findtext(f"{namespace}source") ref_url = reference_node.findtext(f"{namespace}url") ref_name = reference_node.findtext(f"{namespace}name") if ref_url == ref_name: - reference_detail += f'**Source:** {ref_source}\n' \ - f'**URL:** {ref_url}\n\n' + reference_detail += ( + f"**Source:** {ref_source}\n" f"**URL:** {ref_url}\n\n" + ) else: - reference_detail += f'**Source:** {ref_source}\n' \ - f'**URL:** {ref_url}\n' \ - f'**Name:** {ref_name}\n\n' + reference_detail += ( + f"**Source:** {ref_source}\n" + f"**URL:** {ref_url}\n" + f"**Name:** {ref_name}\n\n" + ) if related_dependency is not None: tags.append("related") @@ -214,20 +300,29 @@ def get_finding_from_vulnerability(self, dependency, related_dependency, vulnera if notes is None: notes = "Document on why we are suppressing this vulnerability is missing!" tags.append("no_suppression_document") - mitigation = '**This vulnerability is mitigated and/or suppressed:** {}\n'.format(notes) - mitigation = mitigation + 'Update {}:{} to at least the version recommended in the description'.format(component_name, component_version) + mitigation = "**This vulnerability is mitigated and/or suppressed:** {}\n".format( + notes + ) + mitigation = ( + mitigation + + "Update {}:{} to at least the version recommended in the description".format( + component_name, component_version + ) + ) mitigated = datetime.utcnow() is_Mitigated = True active = False tags.append("suppressed") else: - mitigation = 'Update {}:{} to at least the version recommended in the description'.format(component_name, component_version) - description += '\n**Filepath:** ' + str(dependency_filepath) + mitigation = "Update {}:{} to at least the version recommended in the description".format( + component_name, component_version + ) + description += "\n**Filepath:** " + str(dependency_filepath) active = True finding = Finding( - title=f'{component_name}:{component_version} | {name}', + title=f"{component_name}:{component_version} | {name}", file_path=dependency_filename, test=test, cwe=cwe, @@ -261,7 +356,7 @@ def get_description_for_scan_types(self, scan_type): def get_findings(self, filename, test): dupes = dict() - namespace = '' + namespace = "" content = filename.read() scan = ElementTree.fromstring(content) @@ -269,39 +364,73 @@ def get_findings(self, filename, test): matches = re.match(regex, scan.tag) try: namespace = matches.group(0) - except: + except BaseException: namespace = "" - dependencies = scan.find(namespace + 'dependencies') + dependencies = scan.find(namespace + "dependencies") scan_date = None if scan.find(f"{namespace}projectInfo"): projectInfo_node = scan.find(f"{namespace}projectInfo") if projectInfo_node.findtext(f"{namespace}reportDate"): - scan_date = dateutil.parser.parse(projectInfo_node.findtext(f"{namespace}reportDate")) + scan_date = dateutil.parser.parse( + projectInfo_node.findtext(f"{namespace}reportDate") + ) if dependencies: - for dependency in dependencies.findall(namespace + 'dependency'): - vulnerabilities = dependency.find(namespace + 'vulnerabilities') + for dependency in dependencies.findall(namespace + "dependency"): + vulnerabilities = dependency.find( + namespace + "vulnerabilities" + ) if vulnerabilities is not None: - for vulnerability in vulnerabilities.findall(namespace + 'vulnerability'): + for vulnerability in vulnerabilities.findall( + namespace + "vulnerability" + ): if vulnerability: - finding = self.get_finding_from_vulnerability(dependency, None, vulnerability, test, namespace) + finding = self.get_finding_from_vulnerability( + dependency, + None, + vulnerability, + test, + namespace, + ) if scan_date: finding.date = scan_date self.add_finding(finding, dupes) - relatedDependencies = dependency.find(namespace + 'relatedDependencies') + relatedDependencies = dependency.find( + namespace + "relatedDependencies" + ) if relatedDependencies: - for relatedDependency in relatedDependencies.findall(namespace + 'relatedDependency'): - finding = self.get_finding_from_vulnerability(dependency, relatedDependency, vulnerability, test, namespace) + for ( + relatedDependency + ) in relatedDependencies.findall( + namespace + "relatedDependency" + ): + finding = ( + self.get_finding_from_vulnerability( + dependency, + relatedDependency, + vulnerability, + test, + namespace, + ) + ) if finding: # could be None if scan_date: finding.date = scan_date self.add_finding(finding, dupes) - for suppressedVulnerability in vulnerabilities.findall(namespace + 'suppressedVulnerability'): + for suppressedVulnerability in vulnerabilities.findall( + namespace + "suppressedVulnerability" + ): if suppressedVulnerability: - finding = self.get_finding_from_vulnerability(dependency, None, suppressedVulnerability, test, namespace) + finding = self.get_finding_from_vulnerability( + dependency, + None, + suppressedVulnerability, + test, + namespace, + ) if scan_date: finding.date = scan_date self.add_finding(finding, dupes) diff --git a/dojo/tools/detect_secrets/parser.py b/dojo/tools/detect_secrets/parser.py index 0a71bb3ed9..0da274ba9f 100644 --- a/dojo/tools/detect_secrets/parser.py +++ b/dojo/tools/detect_secrets/parser.py @@ -21,22 +21,22 @@ def get_description_for_scan_types(self, scan_type): def get_findings(self, filename, test): data = json.load(filename) dupes = {} - if data.get('generated_at'): - find_date = dateutil.parser.parse(data.get('generated_at')) - for detect_file in data.get('results'): - for item in data.get('results').get(detect_file): - type = item.get('type') - file = item.get('filename') - hashed_secret = item.get('hashed_secret') - is_verified = item.get('is_verified') - line = item.get('line_number') + if data.get("generated_at"): + find_date = dateutil.parser.parse(data.get("generated_at")) + for detect_file in data.get("results"): + for item in data.get("results").get(detect_file): + type = item.get("type") + file = item.get("filename") + hashed_secret = item.get("hashed_secret") + is_verified = item.get("is_verified") + line = item.get("line_number") description = "Detected potential secret with the following related data:\n" description += "**Filename:** " + file + "\n" description += "**Line:** " + str(line) + "\n" description += "**Type:** " + type + "\n" dupe_key = hashlib.sha256( - (type + file + str(line) + hashed_secret).encode('utf-8') + (type + file + str(line) + hashed_secret).encode("utf-8") ).hexdigest() if dupe_key in dupes: @@ -51,11 +51,14 @@ def get_findings(self, filename, test): date=find_date, severity="High", verified=is_verified, - active='is_secret' in item and item['is_secret'] is True or 'is_secret' not in item, + active="is_secret" in item + and item["is_secret"] is True + or "is_secret" not in item, file_path=file, line=line, nb_occurences=1, - false_p='is_secret' in item and item['is_secret'] is False, + false_p="is_secret" in item + and item["is_secret"] is False, ) dupes[dupe_key] = finding return list(dupes.values()) diff --git a/dojo/tools/dockerbench/parser.py b/dojo/tools/dockerbench/parser.py index d3cdbfacaa..870c3bc31b 100644 --- a/dojo/tools/dockerbench/parser.py +++ b/dojo/tools/dockerbench/parser.py @@ -5,7 +5,6 @@ class DockerBenchParser(object): - def get_scan_types(self): return ["docker-bench-security Scan"] @@ -24,17 +23,19 @@ def get_findings(self, json_output, test): def get_tests(tree, test): items_from_tests = [] - description = '' - if 'id' in tree: - description += tree['id'] + " " - if 'text' in tree: - description += tree['text'] - test_start = tree.get('start') - test_end = tree.get('end') - description += '\n' - - for node in tree['tests']: - items_from_results = get_results(node, test, test_start, test_end, description) + description = "" + if "id" in tree: + description += tree["id"] + " " + if "text" in tree: + description += tree["text"] + test_start = tree.get("start") + test_end = tree.get("end") + description += "\n" + + for node in tree["tests"]: + items_from_results = get_results( + node, test, test_start, test_end, description + ) items_from_tests += items_from_results return list(items_from_tests) @@ -43,13 +44,13 @@ def get_tests(tree, test): def get_results(tree, test, test_start, test_end, description): items_from_results = [] - if 'section' in tree: - description += tree['section'] + ' ' - if 'desc' in tree: - description += tree['desc'] - description += '\n' + if "section" in tree: + description += tree["section"] + " " + if "desc" in tree: + description += tree["desc"] + description += "\n" - for node in tree['results']: + for node in tree["results"]: item = get_item(node, test, test_start, test_end, description) if item: items_from_results.append(item) @@ -58,64 +59,68 @@ def get_results(tree, test, test_start, test_end, description): def get_item(vuln, test, test_start, test_end, description): - - status = vuln.get('result') - reason = vuln.get('desc') + status = vuln.get("result") + reason = vuln.get("desc") if status is None: return None - # docker-bench-security doesn't define severities. So we use the status to define the severity - if status.upper() == 'FAIL': - severity = 'Critical' - elif status.upper() == 'WARN' and '(Manual)' not in reason: - severity = 'High' - elif status.upper() == 'INFO' and '(Manual)' not in reason: - severity = 'Low' - elif status.upper() == 'NOTE' and '(Manual)' not in reason: - severity = 'Info' + # docker-bench-security doesn't define severities. So we use the status to + # define the severity + if status.upper() == "FAIL": + severity = "Critical" + elif status.upper() == "WARN" and "(Manual)" not in reason: + severity = "High" + elif status.upper() == "INFO" and "(Manual)" not in reason: + severity = "Low" + elif status.upper() == "NOTE" and "(Manual)" not in reason: + severity = "Info" else: return None # return here, e.g if status is PASS and don't add new finding - unique_id_from_tool = vuln.get('id') + unique_id_from_tool = vuln.get("id") - test_description = vuln.get('desc', 'No description') + test_description = vuln.get("desc", "No description") if unique_id_from_tool: - title = f'{unique_id_from_tool} - {test_description}' + title = f"{unique_id_from_tool} - {test_description}" else: - title = f'No test number - {test_description}' + title = f"No test number - {test_description}" if unique_id_from_tool: description += unique_id_from_tool if reason: - description += '\n' - description += 'desc: {}\n'.format(reason) - if vuln.get('details'): - description += '\n' - description += vuln['details'] - if vuln.get('audit'): - description += '\n' - description += 'Audit: {}\n'.format(vuln['audit']) - if vuln.get('expected_result'): - description += '\n' - description += 'Expected result: {}\n'.format(vuln['expected_result']) - if vuln.get('actual_value'): - description += '\n' - description += 'Actual value: {}\n'.format(vuln['actual_value']) - - mitigation = vuln.get('remediation') - if vuln.get('remediation-impact'): - mitigation += '\n' - mitigation += 'mitigation impact: {}\n'.format(vuln['remediation-impact']) - - finding = Finding(title=title, - date=datetime.fromtimestamp(int(test_end)), - test=test, - description=description, - severity=severity, - mitigation=mitigation, - unique_id_from_tool=unique_id_from_tool, - static_finding=True, - dynamic_finding=False) + description += "\n" + description += "desc: {}\n".format(reason) + if vuln.get("details"): + description += "\n" + description += vuln["details"] + if vuln.get("audit"): + description += "\n" + description += "Audit: {}\n".format(vuln["audit"]) + if vuln.get("expected_result"): + description += "\n" + description += "Expected result: {}\n".format(vuln["expected_result"]) + if vuln.get("actual_value"): + description += "\n" + description += "Actual value: {}\n".format(vuln["actual_value"]) + + mitigation = vuln.get("remediation") + if vuln.get("remediation-impact"): + mitigation += "\n" + mitigation += "mitigation impact: {}\n".format( + vuln["remediation-impact"] + ) + + finding = Finding( + title=title, + date=datetime.fromtimestamp(int(test_end)), + test=test, + description=description, + severity=severity, + mitigation=mitigation, + unique_id_from_tool=unique_id_from_tool, + static_finding=True, + dynamic_finding=False, + ) return finding From ba3a79b270d19fc2bfa6da39234941035a198260 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Quirin=20Hardy=20Zie=C3=9Fler?= Date: Fri, 21 Jul 2023 22:15:07 +0200 Subject: [PATCH 49/85] fix: Sonarqube re-upload #8379 (#8383) * :tada: fix #8379 * flake8 * :wastebasket: * optimized exception output * optimized exception output --- dojo/tools/api_sonarqube/api_client.py | 17 +++++++++++++---- dojo/tools/api_sonarqube/importer.py | 2 +- 2 files changed, 14 insertions(+), 5 deletions(-) diff --git a/dojo/tools/api_sonarqube/api_client.py b/dojo/tools/api_sonarqube/api_client.py index baef2e6d87..67f08eee22 100644 --- a/dojo/tools/api_sonarqube/api_client.py +++ b/dojo/tools/api_sonarqube/api_client.py @@ -261,11 +261,13 @@ def get_issue(self, issue_key): if issue["key"] == issue_key: return issue raise Exception( - f"""Expected Issue "{issue_key}", but it returned " - "{[x.get('key') for x in response.json().get('issues')]}.""" + f"Expected Issue \"{issue_key}\", but it returned" + f"{[x.get('key') for x in response.json().get('issues')]}. " + "Full response: " + f"{response.json()}" ) - def get_rule(self, rule_id): + def get_rule(self, rule_id, organization=None): """ Get detailed information about a rule :param rule_id: @@ -273,9 +275,16 @@ def get_rule(self, rule_id): """ rule = self.rules_cache.get(rule_id) if not rule: + request_filter = { + "key": rule_id + } + if organization: + request_filter["organization"] = organization + elif self.org_id: + request_filter["organization"] = self.org_id response = self.session.get( url=f"{self.sonar_api_url}/rules/show", - params={"key": rule_id}, + params=request_filter, headers=self.default_headers, ) if not response.ok: diff --git a/dojo/tools/api_sonarqube/importer.py b/dojo/tools/api_sonarqube/importer.py index ec10c7999c..31a5c62e77 100644 --- a/dojo/tools/api_sonarqube/importer.py +++ b/dojo/tools/api_sonarqube/importer.py @@ -144,7 +144,7 @@ def import_issues(self, test): component_key = issue["component"] line = issue.get("line") rule_id = issue["rule"] - rule = client.get_rule(rule_id) + rule = client.get_rule(rule_id, organization=organization) severity = self.convert_sonar_severity(issue["severity"]) try: sonarqube_permalink = f"[Issue permalink]({sonarUrl}project/issues?issues={issue['key']}&open={issue['key']}&resolved={issue['status']}&id={issue['project']}) \n" From a818fe7d1ae61cf0cc19caa40f594b9d2c8b20b5 Mon Sep 17 00:00:00 2001 From: Cody Maffucci <46459665+Maffooch@users.noreply.github.com> Date: Fri, 21 Jul 2023 17:16:21 -0500 Subject: [PATCH 50/85] Update flake8.yml --- .github/workflows/flake8.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/flake8.yml b/.github/workflows/flake8.yml index 83ff06b4b6..aa5d57da6e 100644 --- a/.github/workflows/flake8.yml +++ b/.github/workflows/flake8.yml @@ -31,6 +31,6 @@ jobs: uses: actions/checkout@v3 # - uses: tayfun/flake8-your-pr@master - - uses: valentijnscholten/flake8-your-pr@master + - uses: DefectDojo/flake8-your-pr@master env: GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}" From 9ea4ff16263c485fc09edec33ed481a6e549e1b2 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 21 Jul 2023 20:45:27 -0500 Subject: [PATCH 51/85] Bump boto3 from 1.28.7 to 1.28.8 (#8411) Bumps [boto3](https://github.com/boto/boto3) from 1.28.7 to 1.28.8. - [Release notes](https://github.com/boto/boto3/releases) - [Changelog](https://github.com/boto/boto3/blob/develop/CHANGELOG.rst) - [Commits](https://github.com/boto/boto3/compare/1.28.7...1.28.8) --- updated-dependencies: - dependency-name: boto3 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 6fcf3362f0..2c07a2134d 100644 --- a/requirements.txt +++ b/requirements.txt @@ -78,7 +78,7 @@ django-ratelimit==4.0.0 argon2-cffi==21.3.0 blackduck==1.1.0 pycurl==7.45.2 # Required for Celery Broker AWS (SQS) support -boto3==1.28.7 # Required for Celery Broker AWS (SQS) support +boto3==1.28.8 # Required for Celery Broker AWS (SQS) support netaddr==0.8.0 vulners==2.0.10 fontawesomefree==6.4.0 From b8ea3437752a339207ae47672b9b5e7dcebcd9e6 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Fri, 21 Jul 2023 20:46:17 -0500 Subject: [PATCH 52/85] Update dependency postcss from 8.4.26 to v8.4.27 (docs/package.json) (#8413) Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> --- docs/package-lock.json | 14 +++++++------- docs/package.json | 2 +- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/docs/package-lock.json b/docs/package-lock.json index bd3e42f6c1..c6fe4dae97 100644 --- a/docs/package-lock.json +++ b/docs/package-lock.json @@ -6,7 +6,7 @@ "": { "devDependencies": { "autoprefixer": "10.4.14", - "postcss": "8.4.26", + "postcss": "8.4.27", "postcss-cli": "10.1.0" } }, @@ -596,9 +596,9 @@ } }, "node_modules/postcss": { - "version": "8.4.26", - "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.26.tgz", - "integrity": "sha512-jrXHFF8iTloAenySjM/ob3gSj7pCu0Ji49hnjqzsgSRa50hkWCKD0HQ+gMNJkW38jBI68MpAAg7ZWwHwX8NMMw==", + "version": "8.4.27", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.27.tgz", + "integrity": "sha512-gY/ACJtJPSmUFPDCHtX78+01fHa64FaU4zaaWfuh1MhGJISufJAH4cun6k/8fwsHYeK4UQmENQK+tRLCFJE8JQ==", "dev": true, "funding": [ { @@ -1366,9 +1366,9 @@ "dev": true }, "postcss": { - "version": "8.4.26", - "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.26.tgz", - "integrity": "sha512-jrXHFF8iTloAenySjM/ob3gSj7pCu0Ji49hnjqzsgSRa50hkWCKD0HQ+gMNJkW38jBI68MpAAg7ZWwHwX8NMMw==", + "version": "8.4.27", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.27.tgz", + "integrity": "sha512-gY/ACJtJPSmUFPDCHtX78+01fHa64FaU4zaaWfuh1MhGJISufJAH4cun6k/8fwsHYeK4UQmENQK+tRLCFJE8JQ==", "dev": true, "requires": { "nanoid": "^3.3.6", diff --git a/docs/package.json b/docs/package.json index aeb1be46b6..bdb66a8613 100644 --- a/docs/package.json +++ b/docs/package.json @@ -1,6 +1,6 @@ { "devDependencies": { - "postcss": "8.4.26", + "postcss": "8.4.27", "autoprefixer": "10.4.14", "postcss-cli": "10.1.0" } From 05ff9ff01a181ab220e23e828c81df30b0325df5 Mon Sep 17 00:00:00 2001 From: DefectDojo release bot Date: Mon, 24 Jul 2023 15:22:59 +0000 Subject: [PATCH 53/85] Update versions in application files --- components/package.json | 2 +- dojo/__init__.py | 2 +- helm/defectdojo/Chart.yaml | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/components/package.json b/components/package.json index 96d757ec98..4e8aea8365 100644 --- a/components/package.json +++ b/components/package.json @@ -1,6 +1,6 @@ { "name": "defectdojo", - "version": "2.24.3", + "version": "2.25.0-dev", "license" : "BSD-3-Clause", "private": true, "dependencies": { diff --git a/dojo/__init__.py b/dojo/__init__.py index 668b4a5cbf..4c1f6f5856 100644 --- a/dojo/__init__.py +++ b/dojo/__init__.py @@ -4,6 +4,6 @@ # Django starts so that shared_task will use this app. from .celery import app as celery_app # noqa -__version__ = '2.24.3' +__version__ = '2.25.0-dev' __url__ = 'https://github.com/DefectDojo/django-DefectDojo' __docs__ = 'https://documentation.defectdojo.com' diff --git a/helm/defectdojo/Chart.yaml b/helm/defectdojo/Chart.yaml index fa782dbafb..bb8dd70b3f 100644 --- a/helm/defectdojo/Chart.yaml +++ b/helm/defectdojo/Chart.yaml @@ -1,8 +1,8 @@ apiVersion: v2 -appVersion: "2.24.3" +appVersion: "2.25.0-dev" description: A Helm chart for Kubernetes to install DefectDojo name: defectdojo -version: 1.6.77 +version: 1.6.78-dev icon: https://www.defectdojo.org/img/favicon.ico maintainers: - name: madchap From b582c48f770045727b3e3e56fc8540d523ed71b8 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 25 Jul 2023 08:37:03 -0500 Subject: [PATCH 54/85] Bump drf-spectacular from 0.26.3 to 0.26.4 (#8416) Bumps [drf-spectacular](https://github.com/tfranzel/drf-spectacular) from 0.26.3 to 0.26.4. - [Release notes](https://github.com/tfranzel/drf-spectacular/releases) - [Changelog](https://github.com/tfranzel/drf-spectacular/blob/master/CHANGELOG.rst) - [Commits](https://github.com/tfranzel/drf-spectacular/compare/0.26.3...0.26.4) --- updated-dependencies: - dependency-name: drf-spectacular dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 2c07a2134d..ef1c950761 100644 --- a/requirements.txt +++ b/requirements.txt @@ -73,7 +73,7 @@ django-fieldsignals==0.7.0 hyperlink==21.0.0 django-test-migrations==1.3.0 djangosaml2==1.7.0 -drf-spectacular==0.26.3 +drf-spectacular==0.26.4 django-ratelimit==4.0.0 argon2-cffi==21.3.0 blackduck==1.1.0 From ea27b1a4076c5b5703667a16849cd8dfe7b3d626 Mon Sep 17 00:00:00 2001 From: Cody Maffucci <46459665+Maffooch@users.noreply.github.com> Date: Tue, 25 Jul 2023 11:32:20 -0500 Subject: [PATCH 55/85] Add JSON Ingestion to Veracode Parser (#8414) * Add JSON Ingestion to Veracode Parser * Update dojo/tools/veracode/json_parser.py Co-authored-by: Damien Carol --------- Co-authored-by: Damien Carol --- .../en/integrations/parsers/file/veracode.md | 45 +- dojo/tools/veracode/json_parser.py | 365 +++++++++++ dojo/tools/veracode/parser.py | 351 +--------- dojo/tools/veracode/xml_parser.py | 346 ++++++++++ .../veracode/dynamic_embedded_format.json | 233 +++++++ .../dynamic_findings_list_format.json | 203 ++++++ .../scans/veracode/sca_embedded_format.json | 618 ++++++++++++++++++ .../veracode/sca_findings_list_format.json | 588 +++++++++++++++++ .../veracode/static_embedded_format.json | 194 ++++++ .../veracode/static_findings_list_format.json | 164 +++++ unittests/tools/test_veracode_parser.py | 197 +++++- 11 files changed, 2962 insertions(+), 342 deletions(-) create mode 100644 dojo/tools/veracode/json_parser.py create mode 100644 dojo/tools/veracode/xml_parser.py create mode 100644 unittests/scans/veracode/dynamic_embedded_format.json create mode 100644 unittests/scans/veracode/dynamic_findings_list_format.json create mode 100644 unittests/scans/veracode/sca_embedded_format.json create mode 100644 unittests/scans/veracode/sca_findings_list_format.json create mode 100644 unittests/scans/veracode/static_embedded_format.json create mode 100644 unittests/scans/veracode/static_findings_list_format.json diff --git a/docs/content/en/integrations/parsers/file/veracode.md b/docs/content/en/integrations/parsers/file/veracode.md index 4fa9e99d2d..a155c1a9d4 100644 --- a/docs/content/en/integrations/parsers/file/veracode.md +++ b/docs/content/en/integrations/parsers/file/veracode.md @@ -2,4 +2,47 @@ title: "Veracode" toc_hide: true --- -Detailed XML Report + +Veracode reports can be ingested in either XML or JSON Format + +- Detailed XML Report +- JSON REST Findings from `/appsec/v2/applications/{application_guid}/findings/` + - Acceptable scan types include `STATIC`, `DYNAMIC`, and `SCA` + - Findings with a status of `CLOSED` will not be imported into DefectDojo + - Acceptable formats are as follows: + - Findings list + - Requires slight modification of the response returned from the API + - Exmample of a request being: `url | jq "{findings}"` + - Desired Format: + ``` + { + "findings": [ + { + ... + }, + ... + ] + } + ``` + - Embedded + - This response can be save directly to a file and uploaded + - Not as ideal for crafting a refined report consisting of multiple requests + - Desired Format: + ``` + { + "_embedded": { + "findings": [ + { + ... + }, + ... + ] + }, + "_links": { + ... + }, + "page": { + ... + } + } + ``` diff --git a/dojo/tools/veracode/json_parser.py b/dojo/tools/veracode/json_parser.py new file mode 100644 index 0000000000..fcbc46ad99 --- /dev/null +++ b/dojo/tools/veracode/json_parser.py @@ -0,0 +1,365 @@ +import json +import re +from cvss import CVSS3 + +from dojo.models import Finding +from dojo.models import Endpoint + + +class VeracodeJSONParser(object): + """This parser is written for Veracode REST Findings. + + API endpoints to use: https://docs.veracode.com/r/c_findings_v2_examples + + Example: curl | jq "{findings}" + + This should convert the format into something like this: + { + "findings": [ + { + ... + }, + ... + ] + } + """ + + severity_mapping = { + 1: "Info", + 2: "Low", + 3: "Medium", + 4: "High", + 5: "Critical", + } + + exploitability_mapping = { + -2: "Very Unlikely", + -1: "Unlikely", + 0: "Neutral", + 1: "Likely", + 2: "Very Likely", + } + + # This mapping was found here: https://docs.veracode.com/r/c_integrated_license_agent + license_mapping = { + 1: ("Unrecognized", "Unrecognized indicates that no license was found for the component. However, this does not indicate that there is no risk associated with the license."), + 2: ("Low", "Low-risk licenses are typically permissive licenses that require you to preserve the copyright and license notices, but allow distribution under different terms without disclosing source code."), + 3: ("Medium", "Medium-risk licenses are typically weak copyleft licenses that require you to preserve the copyright and license notices, and require distributors to make the source code of the component and any modifications under the same terms."), + 4: ("High", "High-risk licenses are typically strong copyleft licenses that require you to preserve the copyright and license notices, and require distributors to make the source code of the component and any modifications under the same terms."), + 5: ("Non OSS", "Non-OSS indicates that this file could be subject to commercial license terms. If so, you should refer to your applicable license agreement with such vendor for additional information."), + } + + def get_findings(self, json_output, test): + findings = [] + if json_output: + json_data = json.load(json_output) + findings += self.get_items(json_data, test) + return findings + + def get_items(self, tree, test): + parsed_findings = [] + # Attempt to get the findings where they are expected to be + # If they are not there, make an educated guess that the uploaded report + # is in the format of the direct response from the API + items = tree.get("findings", []) or tree.get("_embedded", {}).get("findings", []) + for vuln in items: + # Check the status of the finding to determine if an object should even be created + # If the finding is closed, skip it + if vuln.get("finding_status", {}).get("status", "") == "CLOSED": + continue + # Determine the scan type to dictate how that finding details will be handled + scan_type = vuln.get("scan_type") + # Get the finding details object + finding_details = vuln.get("finding_details") + # Get the info to determine if this finding violates a policy + policy_violated = vuln.get("violates_policy") + # Set up the finding with as many contextual details as possible + finding = self.create_finding_from_details(finding_details, scan_type, policy_violated) + # If the finding returned is empty, return nothing as the scan type must + # not be supported yet + if not finding: + continue + finding = self.parse_description(finding, vuln.get("description"), scan_type) + finding.nb_occurences = vuln.get("count", 1) + finding.test = test + + parsed_findings.append(finding) + + return parsed_findings + + def create_finding_from_details(self, finding_details, scan_type, policy_violated) -> Finding: + # Fetch the common attributes that should be in every scan type + severity = self.severity_mapping.get(finding_details.get("severity", 1)) + # Set up the finding with just severity for now + finding = Finding( + title=f"{scan_type} Finding", + severity=severity, + description="### Meta Information\n", + ) + # Set some unsaved fields + finding.unsaved_tags = [] + finding.unsaved_endpoints = [] + finding.unsaved_vulnerability_ids = [] + # Determine if this finding violates a policy + if policy_violated: + finding.unsaved_tags.append("policy-violation") + # Store the title in a var in case it may be needed later + cwe_title = None + # Try to get the common fields that may not be present + if cwe_dict := finding_details.get("cwe"): + cwe_title = cwe_dict.get("name") + finding.cwe = cwe_dict.get("id") + # Attempt to get the CVSS score + if uncleaned_cvss := finding_details.get("cvss"): + if isinstance(uncleaned_cvss, str): + if uncleaned_cvss.startswith("CVSS:3.1/") or uncleaned_cvss.startswith("CVSS:3.0/"): + finding.cvssv3 = CVSS3(str(uncleaned_cvss)).clean_vector(output_prefix=True) + elif not uncleaned_cvss.startswith("CVSS"): + finding.cvssv3 = CVSS3(f"CVSS:3.1/{str(uncleaned_cvss)}").clean_vector(output_prefix=True) + elif isinstance(uncleaned_cvss, (float, int)): + finding.cvssv3_score = float(uncleaned_cvss) + # Fill in extra info based on the scan type + if scan_type == "STATIC": + return self.add_static_details(finding, finding_details, backup_title=cwe_title) + elif scan_type == "DYNAMIC": + return self.add_dynamic_details(finding, finding_details, backup_title=cwe_title) + elif scan_type == "SCA": + return self.add_sca_details(finding, finding_details, backup_title=cwe_title) + + return None + + def add_static_details(self, finding, finding_details, backup_title=None) -> Finding: + finding.dynamic_finding = False + finding.static_finding = True + # Get the finding category to get the high level info about the vuln + if category := finding_details.get("finding_category"): + category_title = category.get("name") + else: + category_title = None + # Set the title of the finding to the name of the finding category. + # If not present, fall back on CWE title. If that is not present, do nothing + if category_title: + finding.title = category_title + elif backup_title: + finding.title = backup_title + # Fill in the file path and line number + if file_path := finding_details.get("file_path"): + finding.sast_source_file_path = file_path + finding.sast_sink_file_path = file_path + finding.file_path = file_path + if file_line_number := finding_details.get("file_line_number"): + finding.sast_source_line = file_line_number + finding.sast_sink_line = file_line_number + finding.line = file_line_number + if function_object := finding_details.get("procedure"): + if isinstance(function_object, str): + finding.sast_source_object = function_object + finding.sast_sink_object = function_object + # Set the exploitability if present + if exploitability_score := finding_details.get("exploitability"): + finding.description += f"**Exploitability Predication**: {self.exploitability_mapping.get(exploitability_score)}\n" + # Add the predicted attack vector if available + if attack_vector := finding_details.get("attack_vector"): + finding.description += f"**Attack Vector**: {attack_vector}\n" + # Add the module this vuln is located into the description + if module := finding_details.get("module"): + finding.description += f"**Module**: {module}\n" + + return finding + + def add_dynamic_details(self, finding, finding_details, backup_title=None) -> Finding: + finding.dynamic_finding = True + finding.static_finding = False + # Get the finding category to get the high level info about the vuln + if category := finding_details.get("finding_category"): + category_title = category.get("name") + else: + category_title = None + # Set the title of the finding to the name of the finding category. + # If not present, fall back on CWE title. If that is not present, do nothing + if category_title: + finding.title = category_title + elif backup_title: + finding.title = backup_title + # Add the url to the finding + if url := finding_details.get("url"): + # Create the Endpoint object from the url + finding.unsaved_endpoints.append( + Endpoint.from_uri(url) + ) + else: + # build it from the other attributes + host = finding_details.get("hostname") + port = finding_details.get("port") + path = finding_details.get("path") + # Create the Endpoint object from all of the pieces + finding.unsaved_endpoints.append( + Endpoint( + host=host, + port=port, + path=path, + ) + ) + # Add the plugin if available + if plugin := finding_details.get("plugin"): + finding.description += f"**Plugin**: {plugin}\n" + # Add the predicted attack vector if available + if attack_vector := finding_details.get("attack_vector"): + finding.description += f"**Attack Vector**: {attack_vector}\n" + # Add the vulnerable parameter into the description + if vulnerable_parameter := finding_details.get("vulnerable_parameter"): + finding.description += f"**Vulnerable Parameter**: {vulnerable_parameter}\n" + # Add a note that this finding was discovered by the VSA + if discovered_by_vsa := finding_details.get("discovered_by_vsa"): + if bool(discovered_by_vsa): + finding.description += "**Note**: This finding was discovered by Virtual Scan Appliance\n" + + return finding + + def add_sca_details(self, finding, finding_details, backup_title=None) -> Finding: + finding.dynamic_finding = False + finding.static_finding = False + # Set the initial standard as the CWE title + finding.title = backup_title + # Set some placeholders for title vars if needed + vuln_id = None + # Check for a CVE object + if cve_dict := finding_details.get("cve"): + vuln_id = cve_dict.get("name") + finding.unsaved_vulnerability_ids.append(vuln_id) + # See if the CVSS has already been set. If not, use the one here + if not finding.cvssv3: + if cvss_vector := cve_dict.get("cvss3", {}).get("vector"): + finding.cvssv3 = CVSS3(f"CVSS:3.1/{str(cvss_vector)}").clean_vector(output_prefix=True) + # Put the product ID in the metadata + if product_id := finding_details.get("product_id"): + finding.description += f"**Product ID**: {product_id}\n" + # Put the component ID in the metadata + if component_id := finding_details.get("component_id"): + finding.description += f"**Component ID**: {component_id}\n" + # Put the language in the metadata + if language := finding_details.get("language"): + finding.description += f"**Language**: {language}\n" + # List the paths this component is found + if component_paths := finding_details.get("component_path", []): + # Build the license string + component_paths_markdown = "#### Component Locations\n" + for path in component_paths: + component_paths_markdown += f"- {path.get('path')}\n" + # Do not add any extra text if the there are no paths here + if component_paths_markdown != "#### Component Locations\n": + finding.description += component_paths_markdown + # List the licenses at the bottom of the metadata + if licenses := finding_details.get("licenses", []): + # Build the license string + license_markdown = "#### Licenses\n" + for license in licenses: + license_name = license.get("license_id") + license_details = self.license_mapping.get(int(license.get("risk_rating", 5))) + license_markdown += f"- {license_name}: {license_details[0]}\n - {license_details[1]}\n" + # Do not add any extra text if the there are no licenses here + if license_markdown != "#### Licenses\n": + finding.description += license_markdown + # Add the component name and version + if component_name := finding_details.get("component_filename"): + if component_version := finding_details.get("version"): + finding.component_version = component_version + # Replace the version in the component name + finding.component_name = component_name.replace(finding.component_version, "") + # Check for any wonky formats post version replacement that had extensions + finding.component_name = finding.component_name.replace("-.", ".").replace("_.", ".") + # Check for the event that the component name did not have an extension, but name has a dangling hyphen/underscore + if finding.component_name.endswith("-") or finding.component_name.endswith("_"): + finding.component_name = finding.component_name[:-1] + # check if the CWE title was used. A cwe may not be present when a veracode SRCCLR is present + if not finding.title: + finding.title = f"{finding.component_name} - {vuln_id}" + + return finding + + def parse_description(self, finding, description_body, scan_type) -> Finding: + if scan_type == "STATIC": + # The description of the veracode finding is defined in three parts separated + # by tags: + # - Description: A detailed explanation of the vulnerability and why it is bad + # - Mitigation: What to do about the vulnerability + # - References: Any external links to further knowledge related to the vulnerability + + # Split the description body into sections based on a "" delimiter + sections = description_body.split("") + # Trim out the closing span tags and any trailing spaces in each section + sections = [section.replace("", "").strip() for section in sections if len(section) > 0] + # Make sure there is something to grab from the expected places + if len(sections) > 0: + finding.description += f"### Details\n{sections[0]}" + + # Determine there is a mitigation section in the first index + if len(sections) > 1 and "References:" not in sections[1]: + finding.mitigation = sections[1] + # Determine if the references section is actually in the first index + elif len(sections) > 1 and "References:" in sections[1]: + finding.references = self.parse_references(sections[1]) + + # Determine if the references are in the second index + if len(sections) > 2 and "References:" in sections[2]: + finding.references = self.parse_references(sections[2]) + elif scan_type == "DYNAMIC": + # The description of the veracode finding is defined in three parts separated + # by tags: + # - Description: A detailed explanation of the vulnerability and why it is bad + # - Mitigation: What to do about the vulnerability + # - References: (No "References:" string ) Any external links to further knowledge related to the vulnerability + + # Split the description body into sections based on a "" delimiter + sections = description_body.split("") + # Trim out the closing span tags and any trailing spaces in each section + sections = [section.replace("", "").strip() for section in sections if len(section) > 0] + # Make sure there is something to grab from the expected places + if len(sections) > 0: + finding.description += f"### Details\n{sections[0]}" + + # Determine there is a mitigation section in the first index + if len(sections) > 1 and " 1 and " 2 and " str: + # Remove the "References: " tag from the text + text = text.replace("References: ", "") + # Split on the href tags + sections = text.split(" 0] + # Iterate over the references to find the link and label for each entry + regex_search = 'href=\\"(.*)\\">(.*)' + references = [] + for reference in sections: + if matches := re.search(regex_search, reference): + references.append(matches.groups()) + # Build a markdown string for the references text + reference_string = "" + for reference in references: + link = None + label = None + # Try to get the link + if len(reference) > 0: + link = reference[0] + if len(reference) > 1: + label = reference[1] + # Build a full link if both the label and link are present + if link and label: + reference_string += f"- [{label}]({link})\n" + elif link and not label: + reference_string += f"- {link}\n" + + return reference_string diff --git a/dojo/tools/veracode/parser.py b/dojo/tools/veracode/parser.py index a6ee3a38a2..008234f21b 100644 --- a/dojo/tools/veracode/parser.py +++ b/dojo/tools/veracode/parser.py @@ -1,30 +1,8 @@ -import re -import uuid -from datetime import datetime - -from defusedxml import ElementTree - -from dojo.models import Finding -from dojo.models import Endpoint - -XML_NAMESPACE = {"x": "https://www.veracode.com/schema/reports/export/1.0"} +from dojo.tools.veracode.json_parser import VeracodeJSONParser +from dojo.tools.veracode.xml_parser import VeracodeXMLParser class VeracodeParser(object): - """This parser is written for Veracode Detailed XML reports, version 1.5. - - Version is annotated in the report, `detailedreport/@report_format_version`. - see https://help.veracode.com/r/t_download_XML_report - """ - - vc_severity_mapping = { - 1: "Info", - 2: "Low", - 3: "Medium", - 4: "High", - 5: "Critical", - } - def get_scan_types(self): return ["Veracode Scan"] @@ -32,323 +10,16 @@ def get_label_for_scan_types(self, scan_type): return "Veracode Scan" def get_description_for_scan_types(self, scan_type): - return "Detailed XML Report" - - def get_findings(self, filename, test): - root = ElementTree.parse(filename).getroot() - - app_id = root.attrib["app_id"] - report_date = datetime.strptime( - root.attrib["last_update_time"], "%Y-%m-%d %H:%M:%S %Z" - ) - - dupes = dict() - - # Get SAST findings - # This assumes `` only exists within the `` - # nodes. - for category_node in root.findall( - "x:severity/x:category", namespaces=XML_NAMESPACE - ): - # Mitigation text. - mitigation_text = "" - mitigation_text += ( - category_node.find( - "x:recommendations/x:para", namespaces=XML_NAMESPACE - ).get("text") - + "\n\n" - ) - # Bullet list of recommendations: - mitigation_text += "".join( - list( - map( - lambda x: " * " + x.get("text") + "\n", - category_node.findall( - "x:recommendations/x:para/x:bulletitem", - namespaces=XML_NAMESPACE, - ), - ) - ) - ) - - for flaw_node in category_node.findall( - "x:cwe/x:staticflaws/x:flaw", namespaces=XML_NAMESPACE - ): - dupe_key = flaw_node.attrib["issueid"] - - # Only process if we didn't do that before. - if dupe_key not in dupes: - # Add to list. - dupes[dupe_key] = self.__xml_static_flaw_to_finding( - app_id, flaw_node, mitigation_text, test - ) - - for flaw_node in category_node.findall( - "x:cwe/x:dynamicflaws/x:flaw", namespaces=XML_NAMESPACE - ): - dupe_key = flaw_node.attrib["issueid"] - - if dupe_key not in dupes: - dupes[dupe_key] = self.__xml_dynamic_flaw_to_finding( - app_id, flaw_node, mitigation_text, test - ) - - # Get SCA findings - for component in root.findall( - "x:software_composition_analysis/x:vulnerable_components" - "/x:component", - namespaces=XML_NAMESPACE, - ): - _library = component.attrib["library"] - if "library_id" in component.attrib and component.attrib[ - "library_id" - ].startswith("maven:"): - # Set the library name from the maven component if it's - # available to align with CycloneDX + Veracode SCA - split_library_id = component.attrib["library_id"].split(":") - if len(split_library_id) > 2: - _library = split_library_id[2] - _vendor = component.attrib["vendor"] - _version = component.attrib["version"] - - for vulnerability in component.findall( - "x:vulnerabilities/x:vulnerability", namespaces=XML_NAMESPACE - ): - # We don't have a Id for SCA findings so just generate a random - # one - dupes[str(uuid.uuid4())] = self.__xml_sca_flaw_to_finding( - test, - report_date, - _vendor, - _library, - _version, - vulnerability, - ) - - return list(dupes.values()) - - @classmethod - def __xml_flaw_to_unique_id(cls, app_id, xml_node): - issue_id = xml_node.attrib["issueid"] - return "app-" + app_id + "_issue-" + issue_id - - @classmethod - def __xml_flaw_to_severity(cls, xml_node): - return cls.vc_severity_mapping.get( - int(xml_node.attrib["severity"]), "Info" - ) - - @classmethod - def __xml_flaw_to_finding(cls, app_id, xml_node, mitigation_text, test): - # Defaults - finding = Finding() - finding.test = test - finding.mitigation = mitigation_text - finding.static_finding = True - finding.dynamic_finding = False - finding.unique_id_from_tool = cls.__xml_flaw_to_unique_id( - app_id, xml_node - ) - - # Report values - finding.severity = cls.__xml_flaw_to_severity(xml_node) - finding.cwe = int(xml_node.attrib["cweid"]) - finding.title = xml_node.attrib["categoryname"] - finding.impact = "CIA Impact: " + xml_node.attrib["cia_impact"].upper() - - # Note that DD's legacy dedupe hashing uses the description field, - # so for compatibility, description field should contain very static - # info. - _description = xml_node.attrib["description"].replace(". ", ".\n") - finding.description = _description - - _references = "None" - if "References:" in _description: - _references = _description[ - _description.index("References:") + 13: - ].replace(") ", ")\n") - finding.references = ( - _references - + "\n\nVulnerable Module: " - + xml_node.attrib["module"] - + "\nType: " - + xml_node.attrib["type"] - + "\nVeracode issue ID: " - + xml_node.attrib["issueid"] + return ( + "Reports can be imported as JSON or XML report formats." ) - _date_found = test.target_start - if "date_first_occurrence" in xml_node.attrib: - _date_found = datetime.strptime( - xml_node.attrib["date_first_occurrence"], - "%Y-%m-%d %H:%M:%S %Z", - ) - finding.date = _date_found - - _is_mitigated = False - _mitigated_date = None - if ( - "mitigation_status" in xml_node.attrib - and xml_node.attrib["mitigation_status"].lower() == "accepted" - ): - if ( - "remediation_status" in xml_node.attrib - and xml_node.attrib["remediation_status"].lower() == "fixed" - ): - _is_mitigated = True - else: - # This happens if any mitigation (including 'Potential false positive') - # was accepted in VC. - for mitigation in xml_node.findall( - "x:mitigations/x:mitigation", namespaces=XML_NAMESPACE - ): - _is_mitigated = True - _mitigated_date = datetime.strptime( - mitigation.attrib["date"], "%Y-%m-%d %H:%M:%S %Z" - ) - finding.is_mitigated = _is_mitigated - finding.mitigated = _mitigated_date - finding.active = not _is_mitigated - - # Check if it's a FP in veracode. - # Only check in case finding was mitigated, since DD doesn't allow - # both `verified` and `false_p` to be true, while `verified` is implied on the import - # level, not on the finding-level. - _false_positive = False - if _is_mitigated: - _remediation_status = xml_node.attrib["remediation_status"].lower() - if ( - "false positive" in _remediation_status - or "falsepositive" in _remediation_status - ): - _false_positive = True - finding.false_p = _false_positive - - return finding - - @classmethod - def __xml_static_flaw_to_finding( - cls, app_id, xml_node, mitigation_text, test - ): - finding = cls.__xml_flaw_to_finding( - app_id, xml_node, mitigation_text, test - ) - finding.static_finding = True - finding.dynamic_finding = False - - _line_number = xml_node.attrib["line"] - _functionrelativelocation = xml_node.attrib["functionrelativelocation"] - if ( - _line_number is not None - and _line_number.isdigit() - and _functionrelativelocation is not None - and _functionrelativelocation.isdigit() - ): - finding.line = int(_line_number) + int(_functionrelativelocation) - finding.sast_source_line = finding.line - - _source_file = xml_node.attrib.get("sourcefile") - _sourcefilepath = xml_node.attrib.get("sourcefilepath") - finding.file_path = _sourcefilepath + _source_file - finding.sast_source_file_path = _sourcefilepath + _source_file - - _sast_source_obj = xml_node.attrib.get("functionprototype") - finding.sast_source_object = ( - _sast_source_obj if _sast_source_obj else None - ) - - finding.unsaved_tags = ["sast"] - - return finding - - @classmethod - def __xml_dynamic_flaw_to_finding( - cls, app_id, xml_node, mitigation_text, test - ): - finding = cls.__xml_flaw_to_finding( - app_id, xml_node, mitigation_text, test - ) - finding.static_finding = False - finding.dynamic_finding = True - - url_host = xml_node.attrib.get("url") - finding.unsaved_endpoints = [Endpoint.from_uri(url_host)] - - finding.unsaved_tags = ["dast"] - - return finding - - @staticmethod - def _get_cwe(val): - # Match only the first CWE! - cweSearch = re.search("CWE-(\\d+)", val, re.IGNORECASE) - if cweSearch: - return int(cweSearch.group(1)) + def get_findings(self, filename, test): + if filename.name.lower().endswith(".xml"): + return VeracodeXMLParser().get_findings(filename, test) + elif filename.name.lower().endswith(".json"): + return VeracodeJSONParser().get_findings(filename, test) else: - return None - - @classmethod - def __xml_sca_flaw_to_finding( - cls, test, report_date, vendor, library, version, xml_node - ): - # Defaults - finding = Finding() - finding.test = test - finding.static_finding = True - finding.dynamic_finding = False - - # Report values - cvss_score = float(xml_node.attrib["cvss_score"]) - finding.cvssv3_score = cvss_score - finding.severity = cls.__xml_flaw_to_severity(xml_node) - finding.unsaved_vulnerability_ids = [xml_node.attrib["cve_id"]] - finding.cwe = cls._get_cwe(xml_node.attrib["cwe_id"]) - finding.title = "Vulnerable component: {0}:{1}".format( - library, version - ) - finding.component_name = library - finding.component_version = version - - # Use report-date, otherwise DD doesn't - # overwrite old matching SCA findings. - finding.date = report_date - - _description = "This library has known vulnerabilities.\n" - _description += ( - "**CVE:** {0} ({1})\n" - "CVS Score: {2} ({3})\n" - "Summary: \n>{4}" - "\n\n-----\n\n".format( - xml_node.attrib["cve_id"], - xml_node.attrib.get("first_found_date"), - xml_node.attrib["cvss_score"], - cls.vc_severity_mapping.get( - int(xml_node.attrib["severity"]), "Info" - ), - xml_node.attrib["cve_summary"], + raise ValueError( + "Filename extension not recognized. Use .xml or .json" ) - ) - finding.description = _description - - finding.unsaved_tags = ["sca"] - - _is_mitigated = False - _mitigated_date = None - if ( - "mitigation" in xml_node.attrib - and xml_node.attrib["mitigation"].lower() == "true" - ): - # This happens if any mitigation (including 'Potential false positive') - # was accepted in VC. - for mitigation in xml_node.findall( - "x:mitigations/x:mitigation", namespaces=XML_NAMESPACE - ): - _is_mitigated = True - _mitigated_date = datetime.strptime( - mitigation.attrib["date"], "%Y-%m-%d %H:%M:%S %Z" - ) - finding.is_mitigated = _is_mitigated - finding.mitigated = _mitigated_date - finding.active = not _is_mitigated - - return finding diff --git a/dojo/tools/veracode/xml_parser.py b/dojo/tools/veracode/xml_parser.py new file mode 100644 index 0000000000..c8f71b79a4 --- /dev/null +++ b/dojo/tools/veracode/xml_parser.py @@ -0,0 +1,346 @@ +import re +import uuid +from datetime import datetime + +from defusedxml import ElementTree + +from dojo.models import Finding +from dojo.models import Endpoint + +XML_NAMESPACE = {"x": "https://www.veracode.com/schema/reports/export/1.0"} + + +class VeracodeXMLParser(object): + """This parser is written for Veracode Detailed XML reports, version 1.5. + + Version is annotated in the report, `detailedreport/@report_format_version`. + see https://help.veracode.com/r/t_download_XML_report + """ + + vc_severity_mapping = { + 1: "Info", + 2: "Low", + 3: "Medium", + 4: "High", + 5: "Critical", + } + + def get_findings(self, filename, test): + root = ElementTree.parse(filename).getroot() + + app_id = root.attrib["app_id"] + report_date = datetime.strptime( + root.attrib["last_update_time"], "%Y-%m-%d %H:%M:%S %Z" + ) + + dupes = dict() + + # Get SAST findings + # This assumes `` only exists within the `` + # nodes. + for category_node in root.findall( + "x:severity/x:category", namespaces=XML_NAMESPACE + ): + # Mitigation text. + mitigation_text = "" + mitigation_text += ( + category_node.find( + "x:recommendations/x:para", namespaces=XML_NAMESPACE + ).get("text") + + "\n\n" + ) + # Bullet list of recommendations: + mitigation_text += "".join( + list( + map( + lambda x: " * " + x.get("text") + "\n", + category_node.findall( + "x:recommendations/x:para/x:bulletitem", + namespaces=XML_NAMESPACE, + ), + ) + ) + ) + + for flaw_node in category_node.findall( + "x:cwe/x:staticflaws/x:flaw", namespaces=XML_NAMESPACE + ): + dupe_key = flaw_node.attrib["issueid"] + + # Only process if we didn't do that before. + if dupe_key not in dupes: + # Add to list. + dupes[dupe_key] = self.__xml_static_flaw_to_finding( + app_id, flaw_node, mitigation_text, test + ) + + for flaw_node in category_node.findall( + "x:cwe/x:dynamicflaws/x:flaw", namespaces=XML_NAMESPACE + ): + dupe_key = flaw_node.attrib["issueid"] + + if dupe_key not in dupes: + dupes[dupe_key] = self.__xml_dynamic_flaw_to_finding( + app_id, flaw_node, mitigation_text, test + ) + + # Get SCA findings + for component in root.findall( + "x:software_composition_analysis/x:vulnerable_components" + "/x:component", + namespaces=XML_NAMESPACE, + ): + _library = component.attrib["library"] + if "library_id" in component.attrib and component.attrib[ + "library_id" + ].startswith("maven:"): + # Set the library name from the maven component if it's + # available to align with CycloneDX + Veracode SCA + split_library_id = component.attrib["library_id"].split(":") + if len(split_library_id) > 2: + _library = split_library_id[2] + _vendor = component.attrib["vendor"] + _version = component.attrib["version"] + + for vulnerability in component.findall( + "x:vulnerabilities/x:vulnerability", namespaces=XML_NAMESPACE + ): + # We don't have a Id for SCA findings so just generate a random + # one + dupes[str(uuid.uuid4())] = self.__xml_sca_flaw_to_finding( + test, + report_date, + _vendor, + _library, + _version, + vulnerability, + ) + + return list(dupes.values()) + + @classmethod + def __xml_flaw_to_unique_id(cls, app_id, xml_node): + issue_id = xml_node.attrib["issueid"] + return "app-" + app_id + "_issue-" + issue_id + + @classmethod + def __xml_flaw_to_severity(cls, xml_node): + return cls.vc_severity_mapping.get( + int(xml_node.attrib["severity"]), "Info" + ) + + @classmethod + def __xml_flaw_to_finding(cls, app_id, xml_node, mitigation_text, test): + # Defaults + finding = Finding() + finding.test = test + finding.mitigation = mitigation_text + finding.static_finding = True + finding.dynamic_finding = False + finding.unique_id_from_tool = cls.__xml_flaw_to_unique_id( + app_id, xml_node + ) + + # Report values + finding.severity = cls.__xml_flaw_to_severity(xml_node) + finding.cwe = int(xml_node.attrib["cweid"]) + finding.title = xml_node.attrib["categoryname"] + finding.impact = "CIA Impact: " + xml_node.attrib["cia_impact"].upper() + + # Note that DD's legacy dedupe hashing uses the description field, + # so for compatibility, description field should contain very static + # info. + _description = xml_node.attrib["description"].replace(". ", ".\n") + finding.description = _description + + _references = "None" + if "References:" in _description: + _references = _description[ + _description.index("References:") + 13: + ].replace(") ", ")\n") + finding.references = ( + _references + + "\n\nVulnerable Module: " + + xml_node.attrib["module"] + + "\nType: " + + xml_node.attrib["type"] + + "\nVeracode issue ID: " + + xml_node.attrib["issueid"] + ) + + _date_found = test.target_start + if "date_first_occurrence" in xml_node.attrib: + _date_found = datetime.strptime( + xml_node.attrib["date_first_occurrence"], + "%Y-%m-%d %H:%M:%S %Z", + ) + finding.date = _date_found + + _is_mitigated = False + _mitigated_date = None + if ( + "mitigation_status" in xml_node.attrib + and xml_node.attrib["mitigation_status"].lower() == "accepted" + ): + if ( + "remediation_status" in xml_node.attrib + and xml_node.attrib["remediation_status"].lower() == "fixed" + ): + _is_mitigated = True + else: + # This happens if any mitigation (including 'Potential false positive') + # was accepted in VC. + for mitigation in xml_node.findall( + "x:mitigations/x:mitigation", namespaces=XML_NAMESPACE + ): + _is_mitigated = True + _mitigated_date = datetime.strptime( + mitigation.attrib["date"], "%Y-%m-%d %H:%M:%S %Z" + ) + finding.is_mitigated = _is_mitigated + finding.mitigated = _mitigated_date + finding.active = not _is_mitigated + + # Check if it's a FP in veracode. + # Only check in case finding was mitigated, since DD doesn't allow + # both `verified` and `false_p` to be true, while `verified` is implied on the import + # level, not on the finding-level. + _false_positive = False + if _is_mitigated: + _remediation_status = xml_node.attrib["remediation_status"].lower() + if ( + "false positive" in _remediation_status + or "falsepositive" in _remediation_status + ): + _false_positive = True + finding.false_p = _false_positive + + return finding + + @classmethod + def __xml_static_flaw_to_finding( + cls, app_id, xml_node, mitigation_text, test + ): + finding = cls.__xml_flaw_to_finding( + app_id, xml_node, mitigation_text, test + ) + finding.static_finding = True + finding.dynamic_finding = False + + _line_number = xml_node.attrib["line"] + _functionrelativelocation = xml_node.attrib["functionrelativelocation"] + if ( + _line_number is not None + and _line_number.isdigit() + and _functionrelativelocation is not None + and _functionrelativelocation.isdigit() + ): + finding.line = int(_line_number) + int(_functionrelativelocation) + finding.sast_source_line = finding.line + + _source_file = xml_node.attrib.get("sourcefile") + _sourcefilepath = xml_node.attrib.get("sourcefilepath") + finding.file_path = _sourcefilepath + _source_file + finding.sast_source_file_path = _sourcefilepath + _source_file + + _sast_source_obj = xml_node.attrib.get("functionprototype") + if isinstance(_sast_source_obj, str): + finding.sast_source_object = ( + _sast_source_obj if _sast_source_obj else None + ) + + finding.unsaved_tags = ["sast"] + + return finding + + @classmethod + def __xml_dynamic_flaw_to_finding( + cls, app_id, xml_node, mitigation_text, test + ): + finding = cls.__xml_flaw_to_finding( + app_id, xml_node, mitigation_text, test + ) + finding.static_finding = False + finding.dynamic_finding = True + + url_host = xml_node.attrib.get("url") + finding.unsaved_endpoints = [Endpoint.from_uri(url_host)] + + finding.unsaved_tags = ["dast"] + + return finding + + @staticmethod + def _get_cwe(val): + # Match only the first CWE! + cweSearch = re.search("CWE-(\\d+)", val, re.IGNORECASE) + if cweSearch: + return int(cweSearch.group(1)) + else: + return None + + @classmethod + def __xml_sca_flaw_to_finding( + cls, test, report_date, vendor, library, version, xml_node + ): + # Defaults + finding = Finding() + finding.test = test + finding.static_finding = True + finding.dynamic_finding = False + + # Report values + cvss_score = float(xml_node.attrib["cvss_score"]) + finding.cvssv3_score = cvss_score + finding.severity = cls.__xml_flaw_to_severity(xml_node) + finding.unsaved_vulnerability_ids = [xml_node.attrib["cve_id"]] + finding.cwe = cls._get_cwe(xml_node.attrib["cwe_id"]) + finding.title = "Vulnerable component: {0}:{1}".format( + library, version + ) + finding.component_name = library + finding.component_version = version + + # Use report-date, otherwise DD doesn't + # overwrite old matching SCA findings. + finding.date = report_date + + _description = "This library has known vulnerabilities.\n" + _description += ( + "**CVE:** {0} ({1})\n" + "CVS Score: {2} ({3})\n" + "Summary: \n>{4}" + "\n\n-----\n\n".format( + xml_node.attrib["cve_id"], + xml_node.attrib.get("first_found_date"), + xml_node.attrib["cvss_score"], + cls.vc_severity_mapping.get( + int(xml_node.attrib["severity"]), "Info" + ), + xml_node.attrib["cve_summary"], + ) + ) + finding.description = _description + + finding.unsaved_tags = ["sca"] + + _is_mitigated = False + _mitigated_date = None + if ( + "mitigation" in xml_node.attrib + and xml_node.attrib["mitigation"].lower() == "true" + ): + # This happens if any mitigation (including 'Potential false positive') + # was accepted in VC. + for mitigation in xml_node.findall( + "x:mitigations/x:mitigation", namespaces=XML_NAMESPACE + ): + _is_mitigated = True + _mitigated_date = datetime.strptime( + mitigation.attrib["date"], "%Y-%m-%d %H:%M:%S %Z" + ) + finding.is_mitigated = _is_mitigated + finding.mitigated = _mitigated_date + finding.active = not _is_mitigated + + return finding diff --git a/unittests/scans/veracode/dynamic_embedded_format.json b/unittests/scans/veracode/dynamic_embedded_format.json new file mode 100644 index 0000000000..eb6329ee87 --- /dev/null +++ b/unittests/scans/veracode/dynamic_embedded_format.json @@ -0,0 +1,233 @@ +{ + "_embedded": { + "findings": [ + { + "issue_id": 1568514641, + "scan_type": "DYNAMIC", + "description": "Injections happen when untrusted data is inserted into an interpreted syntax and subsequently evaluated on the server side. This syntax may be a SQL query, a parsed JSON or XML document, an executed script or other syntax that may be in use within the application. Although the target syntax has not been identified, the application behavior demonstrates that the input HTTP parameter may be inserted without proper escaping. It was observed by sending valid and invalid payloads that should throw or should not throw errors. By inserting a proper and improper comments such as ``, `*/_/*`, `/*_*/` into the `api` parameter, the scanner was able to spot a difference in the responses, which is a good indicator of a potential vulnerability. Confidence: medium. Response codes: `404`, `404`, `404`. Similarities: `` vs `*/_/*`: 0.0; `*/_/*` vs `/*_*/`: 0.0; `` vs `/*_*/`: 1.0. It is recommended to identify how the current parameter is used in the application source code, and make sure it is escaped before inserting into any syntax or query. You can add valid values to an allowlist and invalid values to a blocklist. CWE", + "count": 1, + "context_type": "APPLICATION", + "context_guid": "zyx789-xyz987", + "violates_policy": true, + "finding_status": { + "first_found_date": "2021-08-27T07:33:40.989Z", + "status": "OPEN", + "resolution": "UNRESOLVED", + "mitigation_review_status": "NONE", + "new": false, + "resolution_status": "NONE", + "last_seen_date": "2021-09-10T04:06:31.614Z" + }, + "finding_details": { + "severity": 4, + "cwe": { + "id": 74, + "name": "Improper Neutralization of Special Elements in Output Used by a Downstream Component ('Injection')", + "href": "https://api.veracode.com/appsec/v1/cwes/74" + }, + "path": "/api/*_*//new_user_sign_up?param=wild-things", + "hostname": "application.insecure-company-alliance.com", + "plugin": "Code Injection", + "port": "443", + "discovered_by_vsa": 0, + "finding_category": { + "id": 28, + "name": "Code Injection", + "href": "https://api.veracode.com/appsec/v1/categories/28" + }, + "vulnerable_parameter": "api", + "attack_vector": "Improper Neutralization of Special Elements in Output Used by a Downstream Component ('Injection')", + "url": "https://application.insecure-company-alliance.com:443/api/*_*//new_user_sign_up?param=wild-things" + }, + "build_id": 986413 + }, + { + "issue_id": 1568514646, + "scan_type": "DYNAMIC", + "description": "The target web page does not specify Content-Security-Policy. While lack of Content-Security-Policy on the website does not represents a security risk by itself, a strict Content-Security-Policy may serve as an additional protection layer from Cross-Site-Scripting attacks. Content-Security-Policy helps to ensure that all resources (such as scripts, images or video files) on the website are loaded from trusted sources. Content-Security-Policy may also help to protect from UI redressing attacks, such as \"Clickjacking\". Consider implementing strict Content-Security-Policy by setting the corresponding HTTP header for each page response from the website. It is recommended to move all scripts to trusted locations and specify \"script-src\" or \"default-src\" directives. If inline javascript is required on the website, inline scripts may be accompanied with a cryptographic nonce (number used once) or a hash, to ensure only trusted scripts are executed. CWE", + "count": 1, + "context_type": "APPLICATION", + "context_guid": "zyx789-xyz987", + "violates_policy": true, + "finding_status": { + "first_found_date": "2020-11-13T04:24:21.030Z", + "status": "OPEN", + "resolution": "MITIGATED", + "mitigation_review_status": "NONE", + "new": false, + "resolution_status": "APPROVED", + "last_seen_date": "2021-09-10T04:06:31.614Z" + }, + "finding_details": { + "severity": 3, + "cwe": { + "id": 829, + "name": "Inclusion of Functionality from Untrusted Control Sphere", + "href": "https://api.veracode.com/appsec/v1/cwes/829" + }, + "path": "/?param=wild-things", + "hostname": "application.insecure-company-alliance.com", + "plugin": "Insecure Dependencies", + "port": "443", + "discovered_by_vsa": 0, + "finding_category": { + "id": 32, + "name": "Insecure Dependencies", + "href": "https://api.veracode.com/appsec/v1/categories/32" + }, + "attack_vector": "Inclusion of Functionality from Untrusted Control Sphere", + "url": "https://application.insecure-company-alliance.com:443/?param=wild-things" + }, + "build_id": 986413 + }, + { + "issue_id": 1568514647, + "scan_type": "DYNAMIC", + "description": "The site application.insecure-company-alliance.com:443 does not properly utilize the X-Content-Type-Options header. This header can be set to protect against MIME type confusion attacks in Internet Explorer versions 9 and higher, Chrome and Safari. Content sniffing is a method browsers use to attempt to determine the real content type of a response by looking at the content itself, instead of the response header's content-type value. By returning X-Content-Type-Options: nosniff, certain elements will only load external resources if their content-type matches what is expected. It should be noted this check was only executed against the first few pages of this site. Set the X-Content-Type-Options header to a value of \"nosniff\" (without quotes). It is recommended this header be set side wide by configuring your web or application server to return this value for all resources. More information on configuring this header can be found at https://www.veracode.com/blog/2014/03/guidelines-for-setting-security-headers#xcto CWE", + "count": 1, + "context_type": "APPLICATION", + "context_guid": "zyx789-xyz987", + "violates_policy": false, + "finding_status": { + "first_found_date": "2020-01-21T20:59:10.531Z", + "status": "CLOSED", + "resolution": "MITIGATED", + "mitigation_review_status": "NONE", + "new": false, + "resolution_status": "APPROVED", + "last_seen_date": "2021-09-10T04:06:31.614Z" + }, + "finding_details": { + "severity": 0, + "cwe": { + "id": 16, + "name": "Configuration", + "href": "https://api.veracode.com/appsec/v1/cwes/16" + }, + "path": "/?param=something-CRAZY", + "hostname": "application.insecure-company-alliance.com", + "plugin": "Server Configuration", + "port": "443", + "discovered_by_vsa": 0, + "finding_category": { + "id": 27, + "name": "Server Configuration", + "href": "https://api.veracode.com/appsec/v1/categories/27" + }, + "vulnerable_parameter": "x-content-type-options header", + "attack_vector": "Configuration", + "url": "https://application.insecure-company-alliance.com:443/?param=something-CRAZY" + }, + "build_id": 986413 + }, + { + "issue_id": 156851234646, + "scan_type": "DYNAMIC", + "description": "Weak TLSv1.2 cipher TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA is supported on application.insecure-company-alliance.com:-1. Only support strong versions of ciphers. Veracode recommends the guidance published by the National Institute of Standards and Technology (NIST), excerpted below from NIST 800-52: Cipher suites that use the Triple Data Encryption Algorithm (TDEA, also written as 3DES) are no longer allowed due to the limited amounts of data that can be processed under a single key. The server should be configured to only use cipher suites for which it has a valid certificate containing a signature providing at least 112 bits of security. Prefer ephemeral keys over static keys (i.e., prefer DHE over DH, and prefer ECDHE over ECDH). Ephemeral keys provide perfect forward secrecy. Prefer GCM or CCM modes over CBC mode. The use of an authenticated encryption mode prevents several attacks. Note that these are not available in versions prior to TLS 1.2. CWE", + "count": 1, + "context_type": "APPLICATION", + "context_guid": "zyx789-xyz987", + "violates_policy": true, + "finding_status": { + "first_found_date": "2020-06-23T06:52:22.283Z", + "status": "CLOSED", + "resolution": "MITIGATED", + "mitigation_review_status": "NONE", + "new": false, + "resolution_status": "APPROVED", + "last_seen_date": "2021-09-10T04:06:31.614Z" + }, + "finding_details": { + "severity": 3, + "cwe": { + "id": 757, + "name": "Selection of Less-Secure Algorithm During Negotiation ('Algorithm Downgrade')", + "href": "https://api.veracode.com/appsec/v1/cwes/757" + }, + "path": "/", + "hostname": "application.insecure-company-alliance.com", + "plugin": "Server Configuration", + "port": "443", + "discovered_by_vsa": 0, + "finding_category": { + "id": 27, + "name": "Server Configuration", + "href": "https://api.veracode.com/appsec/v1/categories/27" + }, + "vulnerable_parameter": "TLSv1.2 cipher TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA", + "attack_vector": "Selection of Less-Secure Algorithm During Negotiation ('Algorithm Downgrade')", + "url": "https://application.insecure-company-alliance.com:443/" + }, + "build_id": 986413 + }, + { + "issue_id": 15623414646, + "scan_type": "DYNAMIC", + "description": "Weak TLSv1.2 cipher TLS_RSA_WITH_AES_256_CBC_SHA is supported on application.insecure-company-alliance.com:-1. Only support strong versions of ciphers. Veracode recommends the guidance published by the National Institute of Standards and Technology (NIST), excerpted below from NIST 800-52: Cipher suites that use the Triple Data Encryption Algorithm (TDEA, also written as 3DES) are no longer allowed due to the limited amounts of data that can be processed under a single key. The server should be configured to only use cipher suites for which it has a valid certificate containing a signature providing at least 112 bits of security. Prefer ephemeral keys over static keys (i.e., prefer DHE over DH, and prefer ECDHE over ECDH). Ephemeral keys provide perfect forward secrecy. Prefer GCM or CCM modes over CBC mode. The use of an authenticated encryption mode prevents several attacks. Note that these are not available in versions prior to TLS 1.2. CWE", + "count": 1, + "context_type": "APPLICATION", + "context_guid": "zyx789-xyz987", + "violates_policy": true, + "finding_status": { + "first_found_date": "2020-06-23T06:52:22.283Z", + "status": "OPEN", + "resolution": "MITIGATED", + "mitigation_review_status": "NONE", + "new": false, + "resolution_status": "APPROVED", + "last_seen_date": "2021-09-10T04:06:31.614Z" + }, + "finding_details": { + "severity": 3, + "cwe": { + "id": 757, + "name": "Selection of Less-Secure Algorithm During Negotiation ('Algorithm Downgrade')", + "href": "https://api.veracode.com/appsec/v1/cwes/757" + }, + "path": "/", + "hostname": "application.insecure-company-alliance.com", + "plugin": "Server Configuration", + "port": "443", + "discovered_by_vsa": 0, + "finding_category": { + "id": 27, + "name": "Server Configuration", + "href": "https://api.veracode.com/appsec/v1/categories/27" + }, + "vulnerable_parameter": "TLSv1.2 cipher TLS_RSA_WITH_AES_256_CBC_SHA", + "attack_vector": "Selection of Less-Secure Algorithm During Negotiation ('Algorithm Downgrade')", + "url": "https://application.insecure-company-alliance.com:443/" + }, + "build_id": 986413 + } + ] + }, + "_links": { + "first": { + "href": "link" + }, + "self": { + "href": "link", + "templated": true + }, + "next": { + "href": "link" + }, + "last": { + "href": "link" + }, + "application": { + "href": "link" + }, + "non-sca": { + "href": "link", + "templated": true + } + }, + "page": { + "size": 5, + "total_elements": 10, + "total_pages": 2, + "number": 0 + } +} \ No newline at end of file diff --git a/unittests/scans/veracode/dynamic_findings_list_format.json b/unittests/scans/veracode/dynamic_findings_list_format.json new file mode 100644 index 0000000000..3f35433de9 --- /dev/null +++ b/unittests/scans/veracode/dynamic_findings_list_format.json @@ -0,0 +1,203 @@ +{ + "findings": [ + { + "issue_id": 1568514641, + "scan_type": "DYNAMIC", + "description": "Injections happen when untrusted data is inserted into an interpreted syntax and subsequently evaluated on the server side. This syntax may be a SQL query, a parsed JSON or XML document, an executed script or other syntax that may be in use within the application. Although the target syntax has not been identified, the application behavior demonstrates that the input HTTP parameter may be inserted without proper escaping. It was observed by sending valid and invalid payloads that should throw or should not throw errors. By inserting a proper and improper comments such as ``, `*/_/*`, `/*_*/` into the `api` parameter, the scanner was able to spot a difference in the responses, which is a good indicator of a potential vulnerability. Confidence: medium. Response codes: `404`, `404`, `404`. Similarities: `` vs `*/_/*`: 0.0; `*/_/*` vs `/*_*/`: 0.0; `` vs `/*_*/`: 1.0. It is recommended to identify how the current parameter is used in the application source code, and make sure it is escaped before inserting into any syntax or query. You can add valid values to an allowlist and invalid values to a blocklist. CWE", + "count": 1, + "context_type": "APPLICATION", + "context_guid": "zyx789-xyz987", + "violates_policy": true, + "finding_status": { + "first_found_date": "2021-08-27T07:33:40.989Z", + "status": "OPEN", + "resolution": "UNRESOLVED", + "mitigation_review_status": "NONE", + "new": false, + "resolution_status": "NONE", + "last_seen_date": "2021-09-10T04:06:31.614Z" + }, + "finding_details": { + "severity": 4, + "cwe": { + "id": 74, + "name": "Improper Neutralization of Special Elements in Output Used by a Downstream Component ('Injection')", + "href": "https://api.veracode.com/appsec/v1/cwes/74" + }, + "path": "/api/*_*//new_user_sign_up?param=wild-things", + "hostname": "application.insecure-company-alliance.com", + "plugin": "Code Injection", + "port": "443", + "discovered_by_vsa": 0, + "finding_category": { + "id": 28, + "name": "Code Injection", + "href": "https://api.veracode.com/appsec/v1/categories/28" + }, + "vulnerable_parameter": "api", + "attack_vector": "Improper Neutralization of Special Elements in Output Used by a Downstream Component ('Injection')", + "url": "https://application.insecure-company-alliance.com:443/api/*_*//new_user_sign_up?param=wild-things" + }, + "build_id": 986413 + }, + { + "issue_id": 1568514646, + "scan_type": "DYNAMIC", + "description": "The target web page does not specify Content-Security-Policy. While lack of Content-Security-Policy on the website does not represents a security risk by itself, a strict Content-Security-Policy may serve as an additional protection layer from Cross-Site-Scripting attacks. Content-Security-Policy helps to ensure that all resources (such as scripts, images or video files) on the website are loaded from trusted sources. Content-Security-Policy may also help to protect from UI redressing attacks, such as \"Clickjacking\". Consider implementing strict Content-Security-Policy by setting the corresponding HTTP header for each page response from the website. It is recommended to move all scripts to trusted locations and specify \"script-src\" or \"default-src\" directives. If inline javascript is required on the website, inline scripts may be accompanied with a cryptographic nonce (number used once) or a hash, to ensure only trusted scripts are executed. CWE", + "count": 1, + "context_type": "APPLICATION", + "context_guid": "zyx789-xyz987", + "violates_policy": true, + "finding_status": { + "first_found_date": "2020-11-13T04:24:21.030Z", + "status": "OPEN", + "resolution": "MITIGATED", + "mitigation_review_status": "NONE", + "new": false, + "resolution_status": "APPROVED", + "last_seen_date": "2021-09-10T04:06:31.614Z" + }, + "finding_details": { + "severity": 3, + "cwe": { + "id": 829, + "name": "Inclusion of Functionality from Untrusted Control Sphere", + "href": "https://api.veracode.com/appsec/v1/cwes/829" + }, + "path": "/?param=wild-things", + "hostname": "application.insecure-company-alliance.com", + "plugin": "Insecure Dependencies", + "port": "443", + "discovered_by_vsa": 0, + "finding_category": { + "id": 32, + "name": "Insecure Dependencies", + "href": "https://api.veracode.com/appsec/v1/categories/32" + }, + "attack_vector": "Inclusion of Functionality from Untrusted Control Sphere", + "url": "https://application.insecure-company-alliance.com:443/?param=wild-things" + }, + "build_id": 986413 + }, + { + "issue_id": 1568514647, + "scan_type": "DYNAMIC", + "description": "The site application.insecure-company-alliance.com:443 does not properly utilize the X-Content-Type-Options header. This header can be set to protect against MIME type confusion attacks in Internet Explorer versions 9 and higher, Chrome and Safari. Content sniffing is a method browsers use to attempt to determine the real content type of a response by looking at the content itself, instead of the response header's content-type value. By returning X-Content-Type-Options: nosniff, certain elements will only load external resources if their content-type matches what is expected. It should be noted this check was only executed against the first few pages of this site. Set the X-Content-Type-Options header to a value of \"nosniff\" (without quotes). It is recommended this header be set side wide by configuring your web or application server to return this value for all resources. More information on configuring this header can be found at https://www.veracode.com/blog/2014/03/guidelines-for-setting-security-headers#xcto CWE", + "count": 1, + "context_type": "APPLICATION", + "context_guid": "zyx789-xyz987", + "violates_policy": false, + "finding_status": { + "first_found_date": "2020-01-21T20:59:10.531Z", + "status": "CLOSED", + "resolution": "MITIGATED", + "mitigation_review_status": "NONE", + "new": false, + "resolution_status": "APPROVED", + "last_seen_date": "2021-09-10T04:06:31.614Z" + }, + "finding_details": { + "severity": 0, + "cwe": { + "id": 16, + "name": "Configuration", + "href": "https://api.veracode.com/appsec/v1/cwes/16" + }, + "path": "/?param=something-CRAZY", + "hostname": "application.insecure-company-alliance.com", + "plugin": "Server Configuration", + "port": "443", + "discovered_by_vsa": 0, + "finding_category": { + "id": 27, + "name": "Server Configuration", + "href": "https://api.veracode.com/appsec/v1/categories/27" + }, + "vulnerable_parameter": "x-content-type-options header", + "attack_vector": "Configuration", + "url": "https://application.insecure-company-alliance.com:443/?param=something-CRAZY" + }, + "build_id": 986413 + }, + { + "issue_id": 156851234646, + "scan_type": "DYNAMIC", + "description": "Weak TLSv1.2 cipher TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA is supported on application.insecure-company-alliance.com:-1. Only support strong versions of ciphers. Veracode recommends the guidance published by the National Institute of Standards and Technology (NIST), excerpted below from NIST 800-52: Cipher suites that use the Triple Data Encryption Algorithm (TDEA, also written as 3DES) are no longer allowed due to the limited amounts of data that can be processed under a single key. The server should be configured to only use cipher suites for which it has a valid certificate containing a signature providing at least 112 bits of security. Prefer ephemeral keys over static keys (i.e., prefer DHE over DH, and prefer ECDHE over ECDH). Ephemeral keys provide perfect forward secrecy. Prefer GCM or CCM modes over CBC mode. The use of an authenticated encryption mode prevents several attacks. Note that these are not available in versions prior to TLS 1.2. CWE", + "count": 1, + "context_type": "APPLICATION", + "context_guid": "zyx789-xyz987", + "violates_policy": true, + "finding_status": { + "first_found_date": "2020-06-23T06:52:22.283Z", + "status": "CLOSED", + "resolution": "MITIGATED", + "mitigation_review_status": "NONE", + "new": false, + "resolution_status": "APPROVED", + "last_seen_date": "2021-09-10T04:06:31.614Z" + }, + "finding_details": { + "severity": 3, + "cwe": { + "id": 757, + "name": "Selection of Less-Secure Algorithm During Negotiation ('Algorithm Downgrade')", + "href": "https://api.veracode.com/appsec/v1/cwes/757" + }, + "path": "/", + "hostname": "application.insecure-company-alliance.com", + "plugin": "Server Configuration", + "port": "443", + "discovered_by_vsa": 0, + "finding_category": { + "id": 27, + "name": "Server Configuration", + "href": "https://api.veracode.com/appsec/v1/categories/27" + }, + "vulnerable_parameter": "TLSv1.2 cipher TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA", + "attack_vector": "Selection of Less-Secure Algorithm During Negotiation ('Algorithm Downgrade')", + "url": "https://application.insecure-company-alliance.com:443/" + }, + "build_id": 986413 + }, + { + "issue_id": 15623414646, + "scan_type": "DYNAMIC", + "description": "Weak TLSv1.2 cipher TLS_RSA_WITH_AES_256_CBC_SHA is supported on application.insecure-company-alliance.com:-1. Only support strong versions of ciphers. Veracode recommends the guidance published by the National Institute of Standards and Technology (NIST), excerpted below from NIST 800-52: Cipher suites that use the Triple Data Encryption Algorithm (TDEA, also written as 3DES) are no longer allowed due to the limited amounts of data that can be processed under a single key. The server should be configured to only use cipher suites for which it has a valid certificate containing a signature providing at least 112 bits of security. Prefer ephemeral keys over static keys (i.e., prefer DHE over DH, and prefer ECDHE over ECDH). Ephemeral keys provide perfect forward secrecy. Prefer GCM or CCM modes over CBC mode. The use of an authenticated encryption mode prevents several attacks. Note that these are not available in versions prior to TLS 1.2. CWE", + "count": 1, + "context_type": "APPLICATION", + "context_guid": "zyx789-xyz987", + "violates_policy": true, + "finding_status": { + "first_found_date": "2020-06-23T06:52:22.283Z", + "status": "OPEN", + "resolution": "MITIGATED", + "mitigation_review_status": "NONE", + "new": false, + "resolution_status": "APPROVED", + "last_seen_date": "2021-09-10T04:06:31.614Z" + }, + "finding_details": { + "severity": 3, + "cwe": { + "id": 757, + "name": "Selection of Less-Secure Algorithm During Negotiation ('Algorithm Downgrade')", + "href": "https://api.veracode.com/appsec/v1/cwes/757" + }, + "path": "/", + "hostname": "application.insecure-company-alliance.com", + "plugin": "Server Configuration", + "port": "443", + "discovered_by_vsa": 0, + "finding_category": { + "id": 27, + "name": "Server Configuration", + "href": "https://api.veracode.com/appsec/v1/categories/27" + }, + "vulnerable_parameter": "TLSv1.2 cipher TLS_RSA_WITH_AES_256_CBC_SHA", + "attack_vector": "Selection of Less-Secure Algorithm During Negotiation ('Algorithm Downgrade')", + "url": "https://application.insecure-company-alliance.com:443/" + }, + "build_id": 986413 + } + ] +} \ No newline at end of file diff --git a/unittests/scans/veracode/sca_embedded_format.json b/unittests/scans/veracode/sca_embedded_format.json new file mode 100644 index 0000000000..ff290379f6 --- /dev/null +++ b/unittests/scans/veracode/sca_embedded_format.json @@ -0,0 +1,618 @@ +{ + "_embedded": { + "findings": [ + { + "scan_type": "SCA", + "description": "spring-boot-autoconfigure is vulnerable to Denial Of Service (DoS). The vulnerability is applicable when the application has Spring MVC auto-configuration enabled and uses the Spring Boot welcome page, which can be either static or templated, and the application is deployed behind a proxy which caches the 404 responses. An attacker can cause the application to crash by submitting a request to the welcome page which the server is unable to properly respond to.", + "count": 1, + "context_type": "APPLICATION", + "context_guid": "zyx789-xyz987", + "violates_policy": true, + "finding_status": { + "first_found_date": "2023-05-31T16:11:27.482Z", + "status": "OPEN", + "resolution": "UNRESOLVED", + "new": false, + "resolution_status": "NONE", + "last_seen_date": "2023-07-17T00:29:40.651Z" + }, + "finding_details": { + "severity": 4, + "cwe": { + "id": 400, + "name": "Uncontrolled Resource Consumption", + "href": "https://api.veracode.com/appsec/v1/cwes/400" + }, + "component_id": "efg456-gfe654", + "licenses": [ + { + "license_id": "apache-2.0", + "risk_rating": "2" + } + ], + "metadata": { + "sca_scan_mode": "UPLOAD", + "sca_dep_mode": "UNKNOWN" + }, + "cve": { + "name": "CVE-2023-20883", + "cvss": 7.1, + "href": "http://nvd.nist.gov/vuln/detail/CVE-2023-20883", + "severity": "High", + "vector": "AV:N/AC:M/Au:N/C:N/I:N/A:C", + "cvss3": { + "score": 7.5, + "severity": "High", + "vector": "AV:N/AC:L/PR:N/UI:N/S:U/C:N/I:N/A:H" + } + }, + "product_id": "abc123-bca321", + "component_filename": "spring-boot-autoconfigure-2.5.14.jar", + "language": "JAVA", + "component_path": [ + { + "path": "path/to/alpha/spring-boot-autoconfigure-2.5.14.jar" + }, + { + "path": "path/to/beta/spring-boot-autoconfigure-2.5.14.jar" + }, + { + "path": "path/to/charlie/spring-boot-autoconfigure-2.5.14.jar" + }, + { + "path": "path/to/delta/spring-boot-autoconfigure-2.5.14.jar" + } + ], + "version": "2.5.14" + } + }, + { + "scan_type": "SCA", + "description": "Okio is vulnerable to Denial of Service (DoS). The vulnerability arises due to improper handling of the `xlen` parameter in the `consumeHeader` function of `GzipSource.kt`. When the parsing value exceeds `0x7fff` in a maliciously crafted gzip buffer, an attacker can crash the application.", + "count": 1, + "context_type": "APPLICATION", + "context_guid": "zyx789-xyz987", + "violates_policy": true, + "finding_status": { + "first_found_date": "2023-07-14T16:50:18.629Z", + "status": "OPEN", + "resolution": "UNRESOLVED", + "new": false, + "resolution_status": "NONE", + "last_seen_date": "2023-07-17T00:29:40.651Z" + }, + "finding_details": { + "severity": 3, + "component_id": "efg456-gfe654", + "licenses": [ + { + "license_id": "apache-2.0", + "risk_rating": "2" + } + ], + "metadata": { + "sca_scan_mode": "UPLOAD", + "sca_dep_mode": "UNKNOWN" + }, + "cve": { + "name": "CVE-2023-3635", + "cvss": 5.4, + "href": "http://nvd.nist.gov/vuln/detail/CVE-2023-3635", + "severity": "Medium", + "vector": "AV:N/AC:H/Au:N/C:N/I:N/A:C", + "cvss3": { + "score": 5.9, + "severity": "Medium", + "vector": "AV:N/AC:H/PR:N/UI:N/S:U/C:N/I:N/A:H" + } + }, + "product_id": "abc123-bca321", + "component_filename": "okio-2.8.0.jar", + "language": "JAVA", + "component_path": [ + { + "path": "path/to/alpha/okio-2.8.0.jar" + } + ], + "version": "2.8.0" + } + }, + { + "scan_type": "SCA", + "description": "com.google.guava, guava is vulnerable to Information Disclosure. The vulnerability exits due to incorrect default file permissions in `FileBackedOutputStream`, which allow an attacker to access the temporary directory.", + "count": 1, + "context_type": "APPLICATION", + "context_guid": "zyx789-xyz987", + "violates_policy": true, + "finding_status": { + "first_found_date": "2023-06-15T19:19:21.853Z", + "status": "OPEN", + "resolution": "UNRESOLVED", + "new": false, + "resolution_status": "NONE", + "last_seen_date": "2023-07-17T00:29:40.651Z" + }, + "finding_details": { + "severity": 3, + "cwe": { + "id": 552, + "name": "Files or Directories Accessible to External Parties", + "href": "https://api.veracode.com/appsec/v1/cwes/552" + }, + "component_id": "efg456-gfe654", + "licenses": [ + { + "license_id": "apache-2.0", + "risk_rating": "2" + } + ], + "metadata": { + "sca_scan_mode": "UPLOAD", + "sca_dep_mode": "UNKNOWN" + }, + "cve": { + "name": "CVE-2023-2976", + "cvss": 5.2, + "href": "http://nvd.nist.gov/vuln/detail/CVE-2023-2976", + "severity": "Medium", + "vector": "AV:L/AC:L/Au:S/C:C/I:P/A:N", + "cvss3": { + "score": 7.1, + "severity": "High", + "vector": "AV:L/AC:L/PR:L/UI:N/S:U/C:H/I:H/A:N" + } + }, + "product_id": "abc123-bca321", + "component_filename": "guava-30.0-jre.jar", + "language": "JAVA", + "component_path": [ + { + "path": "path/to/alpha/guava-30.0-jre.jar" + }, + { + "path": "path/to/beta/guava-30.0-jre.jar" + }, + { + "path": "path/to/charlie/guava-30.0-jre.jar" + }, + { + "path": "path/to/delta/guava-30.0-jre.jar" + } + ], + "version": "30.0-jre" + } + }, + { + "scan_type": "SCA", + "description": "semver is vulnerable to Regular Expression Denial Of Service (ReDoS) attacks. A malicious user is able to cause parsing slowdowns when untrusted user data is provided as a range via the function `parseRange` due to the usage of regex expression with inefficient time complexity.", + "count": 1, + "context_type": "APPLICATION", + "context_guid": "zyx789-xyz987", + "violates_policy": true, + "finding_status": { + "first_found_date": "2023-06-29T21:07:28.570Z", + "status": "CLOSED", + "resolution": "UNRESOLVED", + "new": false, + "resolution_status": "NONE", + "last_seen_date": "2023-07-17T00:29:40.651Z" + }, + "finding_details": { + "severity": 3, + "cwe": { + "id": 1333, + "name": "Inefficient Regular Expression Complexity", + "href": "https://api.veracode.com/appsec/v1/cwes/1333" + }, + "component_id": "efg456-gfe654", + "licenses": [ + { + "license_id": "isc", + "risk_rating": "2" + } + ], + "metadata": { + "sca_scan_mode": "UPLOAD", + "sca_dep_mode": "UNKNOWN" + }, + "cve": { + "name": "CVE-2022-25883", + "cvss": 5.0, + "href": "http://nvd.nist.gov/vuln/detail/CVE-2022-25883", + "severity": "Medium", + "vector": "AV:N/AC:L/Au:N/C:N/I:N/A:P", + "cvss3": { + "score": 7.5, + "severity": "High", + "vector": "AV:N/AC:L/PR:N/UI:N/S:U/C:N/I:N/A:H" + } + }, + "product_id": "abc123-bca321", + "component_filename": "semver", + "language": "JAVASCRIPT", + "component_path": [ + { + "path": "path/to/alpha/node_modules:semver" + } + ], + "version": "5.7.1" + } + }, + { + "scan_type": "SCA", + "description": "inflight is vulnerable to a Memory Leak. The vulnerability is caused by improper memory management due to a lack of resource freeing, which can result in Denial of Service conditions.", + "count": 1, + "context_type": "APPLICATION", + "context_guid": "zyx789-xyz987", + "violates_policy": true, + "finding_status": { + "first_found_date": "2023-07-06T14:17:13.144Z", + "status": "OPEN", + "resolution": "UNRESOLVED", + "new": false, + "resolution_status": "NONE", + "last_seen_date": "2023-07-17T00:29:40.651Z" + }, + "finding_details": { + "severity": 3, + "component_id": "efg456-gfe654", + "licenses": [ + { + "license_id": "isc", + "risk_rating": "2" + } + ], + "metadata": { + "sca_scan_mode": "UPLOAD", + "sca_dep_mode": "UNKNOWN" + }, + "cve": { + "name": "SRCCLR-SID-41137", + "cvss": 5.0, + "href": "https://sca.analysiscenter.veracode.com/vulnerability-database/search#query=SRCCLR-SID-41137", + "severity": "Medium", + "vector": "AV:N/AC:L/Au:N/C:N/I:N/A:P", + "cvss3": { + "score": 7.5, + "severity": "High", + "vector": "AV:N/AC:L/PR:N/UI:N/S:U/C:N/I:N/A:H" + } + }, + "product_id": "abc123-bca321", + "component_filename": "inflight", + "language": "JAVASCRIPT", + "component_path": [ + { + "path": "path/to/alpha/node_modules:inflight" + } + ], + "version": "1.0.6" + } + }, + { + "scan_type": "SCA", + "description": "org.bouncycastle is vulnerable to LDAP injection. The vulnerability exists because the `engineGetCRLs` function in `X509LDAPCertStoreSpi.java` does not properly validate the `X.509` certificates, allowing an attacker to insert malicious certificate's subject name with special characters such as `CN=Subject*)(objectclass=.` into an LDAP search filter without any escape mechanism.", + "count": 1, + "context_type": "APPLICATION", + "context_guid": "zyx789-xyz987", + "violates_policy": true, + "finding_status": { + "first_found_date": "2023-07-06T16:10:34.656Z", + "status": "OPEN", + "resolution": "UNRESOLVED", + "new": false, + "resolution_status": "NONE", + "last_seen_date": "2023-07-17T00:29:40.651Z" + }, + "finding_details": { + "severity": 3, + "cwe": { + "id": 295, + "name": "Improper Certificate Validation", + "href": "https://api.veracode.com/appsec/v1/cwes/295" + }, + "component_id": "efg456-gfe654", + "licenses": [ + { + "license_id": "mit", + "risk_rating": "2" + } + ], + "metadata": { + "sca_scan_mode": "UPLOAD", + "sca_dep_mode": "UNKNOWN" + }, + "cve": { + "name": "CVE-2023-33201", + "cvss": 4.6, + "href": "http://nvd.nist.gov/vuln/detail/CVE-2023-33201", + "severity": "Medium", + "vector": "AV:L/AC:L/Au:S/C:C/I:N/A:N", + "cvss3": { + "score": 5.3, + "severity": "Medium", + "vector": "AV:N/AC:L/PR:N/UI:N/S:U/C:L/I:N/A:N" + } + }, + "product_id": "abc123-bca321", + "component_filename": "bcprov-ext-jdk15on-1.69.jar", + "language": "JAVA", + "component_path": [ + { + "path": "path/to/alpha/bcprov-ext-jdk15on-1.69.jar" + }, + { + "path": "path/to/beta/bcprov-ext-jdk15on-1.69.jar" + } + ], + "version": "1.69" + } + }, + { + "scan_type": "SCA", + "description": "org.bouncycastle is vulnerable to LDAP injection. The vulnerability exists because the `engineGetCRLs` function in `X509LDAPCertStoreSpi.java` does not properly validate the `X.509` certificates, allowing an attacker to insert malicious certificate's subject name with special characters such as `CN=Subject*)(objectclass=.` into an LDAP search filter without any escape mechanism.", + "count": 1, + "context_type": "APPLICATION", + "context_guid": "zyx789-xyz987", + "violates_policy": true, + "finding_status": { + "first_found_date": "2023-07-06T16:10:32.992Z", + "status": "OPEN", + "resolution": "UNRESOLVED", + "new": false, + "resolution_status": "NONE", + "last_seen_date": "2023-07-17T00:29:40.651Z" + }, + "finding_details": { + "severity": 3, + "cwe": { + "id": 295, + "name": "Improper Certificate Validation", + "href": "https://api.veracode.com/appsec/v1/cwes/295" + }, + "component_id": "efg456-gfe654", + "licenses": [ + { + "license_id": "mit", + "risk_rating": "2" + } + ], + "metadata": { + "sca_scan_mode": "UPLOAD", + "sca_dep_mode": "UNKNOWN" + }, + "cve": { + "name": "CVE-2023-33201", + "cvss": 4.6, + "href": "http://nvd.nist.gov/vuln/detail/CVE-2023-33201", + "severity": "Medium", + "vector": "AV:L/AC:L/Au:S/C:C/I:N/A:N", + "cvss3": { + "score": 5.3, + "severity": "Medium", + "vector": "AV:N/AC:L/PR:N/UI:N/S:U/C:L/I:N/A:N" + } + }, + "product_id": "abc123-bca321", + "component_filename": "bcprov-jdk15on-1.69.jar", + "language": "JAVA", + "component_path": [ + { + "path": "path/to/alpha/bcprov-jdk15on-1.69.jar" + }, + { + "path": "path/to/beta/bcprov-jdk15on-1.69.jar" + }, + { + "path": "path/to/charlie/bcprov-jdk15on-1.69.jar" + }, + { + "path": "path/to/delta/bcprov-jdk15on-1.69.jar" + } + ], + "version": "1.69" + } + }, + { + "scan_type": "SCA", + "description": "org.bouncycastle is vulnerable to LDAP injection. The vulnerability exists because the `engineGetCRLs` function in `X509LDAPCertStoreSpi.java` does not properly validate the `X.509` certificates, allowing an attacker to insert malicious certificate's subject name with special characters such as `CN=Subject*)(objectclass=.` into an LDAP search filter without any escape mechanism.", + "count": 1, + "context_type": "APPLICATION", + "context_guid": "zyx789-xyz987", + "violates_policy": true, + "finding_status": { + "first_found_date": "2023-07-06T16:10:32.943Z", + "status": "CLOSED", + "resolution": "UNRESOLVED", + "new": false, + "resolution_status": "NONE", + "last_seen_date": "2023-07-17T00:29:40.651Z" + }, + "finding_details": { + "severity": 3, + "cwe": { + "id": 295, + "name": "Improper Certificate Validation", + "href": "https://api.veracode.com/appsec/v1/cwes/295" + }, + "component_id": "efg456-gfe654", + "licenses": [ + { + "license_id": "mit", + "risk_rating": "2" + } + ], + "metadata": { + "sca_scan_mode": "UPLOAD", + "sca_dep_mode": "UNKNOWN" + }, + "cve": { + "name": "CVE-2023-33201", + "cvss": 4.6, + "href": "http://nvd.nist.gov/vuln/detail/CVE-2023-33201", + "severity": "Medium", + "vector": "AV:L/AC:L/Au:S/C:C/I:N/A:N", + "cvss3": { + "score": 5.3, + "severity": "Medium", + "vector": "AV:N/AC:L/PR:N/UI:N/S:U/C:L/I:N/A:N" + } + }, + "product_id": "abc123-bca321", + "component_filename": "bcprov-jdk15to18-1.69.jar", + "language": "JAVA", + "component_path": [ + { + "path": "path/to/alpha/bcprov-jdk15to18-1.69.jar" + } + ], + "version": "1.69" + } + }, + { + "scan_type": "SCA", + "description": "org.bouncycastle is vulnerable to LDAP injection. The vulnerability exists because the `engineGetCRLs` function in `X509LDAPCertStoreSpi.java` does not properly validate the `X.509` certificates, allowing an attacker to insert malicious certificate's subject name with special characters such as `CN=Subject*)(objectclass=.` into an LDAP search filter without any escape mechanism.", + "count": 1, + "context_type": "APPLICATION", + "context_guid": "zyx789-xyz987", + "violates_policy": true, + "finding_status": { + "first_found_date": "2023-07-06T16:10:32.649Z", + "status": "CLOSED", + "resolution": "UNRESOLVED", + "new": false, + "resolution_status": "NONE", + "last_seen_date": "2023-07-17T00:29:40.651Z" + }, + "finding_details": { + "severity": 3, + "cwe": { + "id": 295, + "name": "Improper Certificate Validation", + "href": "https://api.veracode.com/appsec/v1/cwes/295" + }, + "component_id": "efg456-gfe654", + "licenses": [ + { + "license_id": "mit", + "risk_rating": "2" + } + ], + "metadata": { + "sca_scan_mode": "UPLOAD", + "sca_dep_mode": "UNKNOWN" + }, + "cve": { + "name": "CVE-2023-33201", + "cvss": 4.6, + "href": "http://nvd.nist.gov/vuln/detail/CVE-2023-33201", + "severity": "Medium", + "vector": "AV:L/AC:L/Au:S/C:C/I:N/A:N", + "cvss3": { + "score": 5.3, + "severity": "Medium", + "vector": "AV:N/AC:L/PR:N/UI:N/S:U/C:L/I:N/A:N" + } + }, + "product_id": "abc123-bca321", + "component_filename": "bcprov-ext-jdk15to18-1.69.jar", + "language": "JAVA", + "component_path": [ + { + "path": "path/to/alpha/bcprov-ext-jdk15to18-1.69.jar" + } + ], + "version": "1.69" + } + }, + { + "scan_type": "SCA", + "description": "esapi is vulnerable to cross-site scripting. The vulnerability exists due to the lack of sanitization use in the `onsiteURL` regular expression of `antisamy-esapi.xml`, allowing an attacker to inject and execute malicious javascript", + "count": 1, + "context_type": "APPLICATION", + "context_guid": "zyx789-xyz987", + "violates_policy": false, + "finding_status": { + "first_found_date": "2023-02-17T02:54:55.674Z", + "status": "OPEN", + "resolution": "POTENTIAL_FALSE_POSITIVE", + "new": false, + "resolution_status": "APPROVED", + "last_seen_date": "2023-07-17T00:29:40.651Z" + }, + "finding_details": { + "severity": 3, + "cwe": { + "id": 79, + "name": "Improper Neutralization of Input During Web Page Generation ('Cross-site Scripting')", + "href": "https://api.veracode.com/appsec/v1/cwes/79" + }, + "component_id": "efg456-gfe654", + "licenses": [ + { + "license_id": "bsd-3-clause", + "risk_rating": "2" + }, + { + "license_id": "cc-by-sa-3.0", + "risk_rating": "4" + } + ], + "metadata": { + "sca_scan_mode": "UPLOAD", + "sca_dep_mode": "UNKNOWN" + }, + "cve": { + "name": "CVE-2022-24891", + "cvss": 4.3, + "href": "http://nvd.nist.gov/vuln/detail/CVE-2022-24891", + "severity": "Medium", + "vector": "AV:N/AC:M/Au:N/C:N/I:P/A:N", + "cvss3": { + "score": 6.1, + "severity": "Medium", + "vector": "AV:N/AC:L/PR:N/UI:R/S:C/C:L/I:L/A:N" + } + }, + "product_id": "abc123-bca321", + "component_filename": "esapi-2.2.0.0.jar", + "language": "JAVA", + "component_path": [ + { + "path": "path/to/alpha//esapi-2.2.0.0.jar" + } + ], + "version": "2.2.0.0" + } + } + ] + }, + "_links": { + "first": { + "href": "link" + }, + "self": { + "href": "link", + "templated": true + }, + "next": { + "href": "link" + }, + "last": { + "href": "link" + }, + "application": { + "href": "link" + }, + "non-sca": { + "href": "link", + "templated": true + } + }, + "page": { + "size": 5, + "total_elements": 10, + "total_pages": 2, + "number": 0 + } +} \ No newline at end of file diff --git a/unittests/scans/veracode/sca_findings_list_format.json b/unittests/scans/veracode/sca_findings_list_format.json new file mode 100644 index 0000000000..af5cce4a1a --- /dev/null +++ b/unittests/scans/veracode/sca_findings_list_format.json @@ -0,0 +1,588 @@ +{ + "findings": [ + { + "scan_type": "SCA", + "description": "spring-boot-autoconfigure is vulnerable to Denial Of Service (DoS). The vulnerability is applicable when the application has Spring MVC auto-configuration enabled and uses the Spring Boot welcome page, which can be either static or templated, and the application is deployed behind a proxy which caches the 404 responses. An attacker can cause the application to crash by submitting a request to the welcome page which the server is unable to properly respond to.", + "count": 1, + "context_type": "APPLICATION", + "context_guid": "zyx789-xyz987", + "violates_policy": true, + "finding_status": { + "first_found_date": "2023-05-31T16:11:27.482Z", + "status": "OPEN", + "resolution": "UNRESOLVED", + "new": false, + "resolution_status": "NONE", + "last_seen_date": "2023-07-17T00:29:40.651Z" + }, + "finding_details": { + "severity": 4, + "cwe": { + "id": 400, + "name": "Uncontrolled Resource Consumption", + "href": "https://api.veracode.com/appsec/v1/cwes/400" + }, + "component_id": "efg456-gfe654", + "licenses": [ + { + "license_id": "apache-2.0", + "risk_rating": "2" + } + ], + "metadata": { + "sca_scan_mode": "UPLOAD", + "sca_dep_mode": "UNKNOWN" + }, + "cve": { + "name": "CVE-2023-20883", + "cvss": 7.1, + "href": "http://nvd.nist.gov/vuln/detail/CVE-2023-20883", + "severity": "High", + "vector": "AV:N/AC:M/Au:N/C:N/I:N/A:C", + "cvss3": { + "score": 7.5, + "severity": "High", + "vector": "AV:N/AC:L/PR:N/UI:N/S:U/C:N/I:N/A:H" + } + }, + "product_id": "abc123-bca321", + "component_filename": "spring-boot-autoconfigure-2.5.14.jar", + "language": "JAVA", + "component_path": [ + { + "path": "path/to/alpha/spring-boot-autoconfigure-2.5.14.jar" + }, + { + "path": "path/to/beta/spring-boot-autoconfigure-2.5.14.jar" + }, + { + "path": "path/to/charlie/spring-boot-autoconfigure-2.5.14.jar" + }, + { + "path": "path/to/delta/spring-boot-autoconfigure-2.5.14.jar" + } + ], + "version": "2.5.14" + } + }, + { + "scan_type": "SCA", + "description": "Okio is vulnerable to Denial of Service (DoS). The vulnerability arises due to improper handling of the `xlen` parameter in the `consumeHeader` function of `GzipSource.kt`. When the parsing value exceeds `0x7fff` in a maliciously crafted gzip buffer, an attacker can crash the application.", + "count": 1, + "context_type": "APPLICATION", + "context_guid": "zyx789-xyz987", + "violates_policy": true, + "finding_status": { + "first_found_date": "2023-07-14T16:50:18.629Z", + "status": "OPEN", + "resolution": "UNRESOLVED", + "new": false, + "resolution_status": "NONE", + "last_seen_date": "2023-07-17T00:29:40.651Z" + }, + "finding_details": { + "severity": 3, + "component_id": "efg456-gfe654", + "licenses": [ + { + "license_id": "apache-2.0", + "risk_rating": "2" + } + ], + "metadata": { + "sca_scan_mode": "UPLOAD", + "sca_dep_mode": "UNKNOWN" + }, + "cve": { + "name": "CVE-2023-3635", + "cvss": 5.4, + "href": "http://nvd.nist.gov/vuln/detail/CVE-2023-3635", + "severity": "Medium", + "vector": "AV:N/AC:H/Au:N/C:N/I:N/A:C", + "cvss3": { + "score": 5.9, + "severity": "Medium", + "vector": "AV:N/AC:H/PR:N/UI:N/S:U/C:N/I:N/A:H" + } + }, + "product_id": "abc123-bca321", + "component_filename": "okio-2.8.0.jar", + "language": "JAVA", + "component_path": [ + { + "path": "path/to/alpha/okio-2.8.0.jar" + } + ], + "version": "2.8.0" + } + }, + { + "scan_type": "SCA", + "description": "com.google.guava, guava is vulnerable to Information Disclosure. The vulnerability exits due to incorrect default file permissions in `FileBackedOutputStream`, which allow an attacker to access the temporary directory.", + "count": 1, + "context_type": "APPLICATION", + "context_guid": "zyx789-xyz987", + "violates_policy": true, + "finding_status": { + "first_found_date": "2023-06-15T19:19:21.853Z", + "status": "OPEN", + "resolution": "UNRESOLVED", + "new": false, + "resolution_status": "NONE", + "last_seen_date": "2023-07-17T00:29:40.651Z" + }, + "finding_details": { + "severity": 3, + "cwe": { + "id": 552, + "name": "Files or Directories Accessible to External Parties", + "href": "https://api.veracode.com/appsec/v1/cwes/552" + }, + "component_id": "efg456-gfe654", + "licenses": [ + { + "license_id": "apache-2.0", + "risk_rating": "2" + } + ], + "metadata": { + "sca_scan_mode": "UPLOAD", + "sca_dep_mode": "UNKNOWN" + }, + "cve": { + "name": "CVE-2023-2976", + "cvss": 5.2, + "href": "http://nvd.nist.gov/vuln/detail/CVE-2023-2976", + "severity": "Medium", + "vector": "AV:L/AC:L/Au:S/C:C/I:P/A:N", + "cvss3": { + "score": 7.1, + "severity": "High", + "vector": "AV:L/AC:L/PR:L/UI:N/S:U/C:H/I:H/A:N" + } + }, + "product_id": "abc123-bca321", + "component_filename": "guava-30.0-jre.jar", + "language": "JAVA", + "component_path": [ + { + "path": "path/to/alpha/guava-30.0-jre.jar" + }, + { + "path": "path/to/beta/guava-30.0-jre.jar" + }, + { + "path": "path/to/charlie/guava-30.0-jre.jar" + }, + { + "path": "path/to/delta/guava-30.0-jre.jar" + } + ], + "version": "30.0-jre" + } + }, + { + "scan_type": "SCA", + "description": "semver is vulnerable to Regular Expression Denial Of Service (ReDoS) attacks. A malicious user is able to cause parsing slowdowns when untrusted user data is provided as a range via the function `parseRange` due to the usage of regex expression with inefficient time complexity.", + "count": 1, + "context_type": "APPLICATION", + "context_guid": "zyx789-xyz987", + "violates_policy": true, + "finding_status": { + "first_found_date": "2023-06-29T21:07:28.570Z", + "status": "CLOSED", + "resolution": "UNRESOLVED", + "new": false, + "resolution_status": "NONE", + "last_seen_date": "2023-07-17T00:29:40.651Z" + }, + "finding_details": { + "severity": 3, + "cwe": { + "id": 1333, + "name": "Inefficient Regular Expression Complexity", + "href": "https://api.veracode.com/appsec/v1/cwes/1333" + }, + "component_id": "efg456-gfe654", + "licenses": [ + { + "license_id": "isc", + "risk_rating": "2" + } + ], + "metadata": { + "sca_scan_mode": "UPLOAD", + "sca_dep_mode": "UNKNOWN" + }, + "cve": { + "name": "CVE-2022-25883", + "cvss": 5.0, + "href": "http://nvd.nist.gov/vuln/detail/CVE-2022-25883", + "severity": "Medium", + "vector": "AV:N/AC:L/Au:N/C:N/I:N/A:P", + "cvss3": { + "score": 7.5, + "severity": "High", + "vector": "AV:N/AC:L/PR:N/UI:N/S:U/C:N/I:N/A:H" + } + }, + "product_id": "abc123-bca321", + "component_filename": "semver", + "language": "JAVASCRIPT", + "component_path": [ + { + "path": "path/to/alpha/node_modules:semver" + } + ], + "version": "5.7.1" + } + }, + { + "scan_type": "SCA", + "description": "inflight is vulnerable to a Memory Leak. The vulnerability is caused by improper memory management due to a lack of resource freeing, which can result in Denial of Service conditions.", + "count": 1, + "context_type": "APPLICATION", + "context_guid": "zyx789-xyz987", + "violates_policy": true, + "finding_status": { + "first_found_date": "2023-07-06T14:17:13.144Z", + "status": "OPEN", + "resolution": "UNRESOLVED", + "new": false, + "resolution_status": "NONE", + "last_seen_date": "2023-07-17T00:29:40.651Z" + }, + "finding_details": { + "severity": 3, + "component_id": "efg456-gfe654", + "licenses": [ + { + "license_id": "isc", + "risk_rating": "2" + } + ], + "metadata": { + "sca_scan_mode": "UPLOAD", + "sca_dep_mode": "UNKNOWN" + }, + "cve": { + "name": "SRCCLR-SID-41137", + "cvss": 5.0, + "href": "https://sca.analysiscenter.veracode.com/vulnerability-database/search#query=SRCCLR-SID-41137", + "severity": "Medium", + "vector": "AV:N/AC:L/Au:N/C:N/I:N/A:P", + "cvss3": { + "score": 7.5, + "severity": "High", + "vector": "AV:N/AC:L/PR:N/UI:N/S:U/C:N/I:N/A:H" + } + }, + "product_id": "abc123-bca321", + "component_filename": "inflight", + "language": "JAVASCRIPT", + "component_path": [ + { + "path": "path/to/alpha/node_modules:inflight" + } + ], + "version": "1.0.6" + } + }, + { + "scan_type": "SCA", + "description": "org.bouncycastle is vulnerable to LDAP injection. The vulnerability exists because the `engineGetCRLs` function in `X509LDAPCertStoreSpi.java` does not properly validate the `X.509` certificates, allowing an attacker to insert malicious certificate's subject name with special characters such as `CN=Subject*)(objectclass=.` into an LDAP search filter without any escape mechanism.", + "count": 1, + "context_type": "APPLICATION", + "context_guid": "zyx789-xyz987", + "violates_policy": true, + "finding_status": { + "first_found_date": "2023-07-06T16:10:34.656Z", + "status": "OPEN", + "resolution": "UNRESOLVED", + "new": false, + "resolution_status": "NONE", + "last_seen_date": "2023-07-17T00:29:40.651Z" + }, + "finding_details": { + "severity": 3, + "cwe": { + "id": 295, + "name": "Improper Certificate Validation", + "href": "https://api.veracode.com/appsec/v1/cwes/295" + }, + "component_id": "efg456-gfe654", + "licenses": [ + { + "license_id": "mit", + "risk_rating": "2" + } + ], + "metadata": { + "sca_scan_mode": "UPLOAD", + "sca_dep_mode": "UNKNOWN" + }, + "cve": { + "name": "CVE-2023-33201", + "cvss": 4.6, + "href": "http://nvd.nist.gov/vuln/detail/CVE-2023-33201", + "severity": "Medium", + "vector": "AV:L/AC:L/Au:S/C:C/I:N/A:N", + "cvss3": { + "score": 5.3, + "severity": "Medium", + "vector": "AV:N/AC:L/PR:N/UI:N/S:U/C:L/I:N/A:N" + } + }, + "product_id": "abc123-bca321", + "component_filename": "bcprov-ext-jdk15on-1.69.jar", + "language": "JAVA", + "component_path": [ + { + "path": "path/to/alpha/bcprov-ext-jdk15on-1.69.jar" + }, + { + "path": "path/to/beta/bcprov-ext-jdk15on-1.69.jar" + } + ], + "version": "1.69" + } + }, + { + "scan_type": "SCA", + "description": "org.bouncycastle is vulnerable to LDAP injection. The vulnerability exists because the `engineGetCRLs` function in `X509LDAPCertStoreSpi.java` does not properly validate the `X.509` certificates, allowing an attacker to insert malicious certificate's subject name with special characters such as `CN=Subject*)(objectclass=.` into an LDAP search filter without any escape mechanism.", + "count": 1, + "context_type": "APPLICATION", + "context_guid": "zyx789-xyz987", + "violates_policy": true, + "finding_status": { + "first_found_date": "2023-07-06T16:10:32.992Z", + "status": "OPEN", + "resolution": "UNRESOLVED", + "new": false, + "resolution_status": "NONE", + "last_seen_date": "2023-07-17T00:29:40.651Z" + }, + "finding_details": { + "severity": 3, + "cwe": { + "id": 295, + "name": "Improper Certificate Validation", + "href": "https://api.veracode.com/appsec/v1/cwes/295" + }, + "component_id": "efg456-gfe654", + "licenses": [ + { + "license_id": "mit", + "risk_rating": "2" + } + ], + "metadata": { + "sca_scan_mode": "UPLOAD", + "sca_dep_mode": "UNKNOWN" + }, + "cve": { + "name": "CVE-2023-33201", + "cvss": 4.6, + "href": "http://nvd.nist.gov/vuln/detail/CVE-2023-33201", + "severity": "Medium", + "vector": "AV:L/AC:L/Au:S/C:C/I:N/A:N", + "cvss3": { + "score": 5.3, + "severity": "Medium", + "vector": "AV:N/AC:L/PR:N/UI:N/S:U/C:L/I:N/A:N" + } + }, + "product_id": "abc123-bca321", + "component_filename": "bcprov-jdk15on-1.69.jar", + "language": "JAVA", + "component_path": [ + { + "path": "path/to/alpha/bcprov-jdk15on-1.69.jar" + }, + { + "path": "path/to/beta/bcprov-jdk15on-1.69.jar" + }, + { + "path": "path/to/charlie/bcprov-jdk15on-1.69.jar" + }, + { + "path": "path/to/delta/bcprov-jdk15on-1.69.jar" + } + ], + "version": "1.69" + } + }, + { + "scan_type": "SCA", + "description": "org.bouncycastle is vulnerable to LDAP injection. The vulnerability exists because the `engineGetCRLs` function in `X509LDAPCertStoreSpi.java` does not properly validate the `X.509` certificates, allowing an attacker to insert malicious certificate's subject name with special characters such as `CN=Subject*)(objectclass=.` into an LDAP search filter without any escape mechanism.", + "count": 1, + "context_type": "APPLICATION", + "context_guid": "zyx789-xyz987", + "violates_policy": true, + "finding_status": { + "first_found_date": "2023-07-06T16:10:32.943Z", + "status": "CLOSED", + "resolution": "UNRESOLVED", + "new": false, + "resolution_status": "NONE", + "last_seen_date": "2023-07-17T00:29:40.651Z" + }, + "finding_details": { + "severity": 3, + "cwe": { + "id": 295, + "name": "Improper Certificate Validation", + "href": "https://api.veracode.com/appsec/v1/cwes/295" + }, + "component_id": "efg456-gfe654", + "licenses": [ + { + "license_id": "mit", + "risk_rating": "2" + } + ], + "metadata": { + "sca_scan_mode": "UPLOAD", + "sca_dep_mode": "UNKNOWN" + }, + "cve": { + "name": "CVE-2023-33201", + "cvss": 4.6, + "href": "http://nvd.nist.gov/vuln/detail/CVE-2023-33201", + "severity": "Medium", + "vector": "AV:L/AC:L/Au:S/C:C/I:N/A:N", + "cvss3": { + "score": 5.3, + "severity": "Medium", + "vector": "AV:N/AC:L/PR:N/UI:N/S:U/C:L/I:N/A:N" + } + }, + "product_id": "abc123-bca321", + "component_filename": "bcprov-jdk15to18-1.69.jar", + "language": "JAVA", + "component_path": [ + { + "path": "path/to/alpha/bcprov-jdk15to18-1.69.jar" + } + ], + "version": "1.69" + } + }, + { + "scan_type": "SCA", + "description": "org.bouncycastle is vulnerable to LDAP injection. The vulnerability exists because the `engineGetCRLs` function in `X509LDAPCertStoreSpi.java` does not properly validate the `X.509` certificates, allowing an attacker to insert malicious certificate's subject name with special characters such as `CN=Subject*)(objectclass=.` into an LDAP search filter without any escape mechanism.", + "count": 1, + "context_type": "APPLICATION", + "context_guid": "zyx789-xyz987", + "violates_policy": true, + "finding_status": { + "first_found_date": "2023-07-06T16:10:32.649Z", + "status": "CLOSED", + "resolution": "UNRESOLVED", + "new": false, + "resolution_status": "NONE", + "last_seen_date": "2023-07-17T00:29:40.651Z" + }, + "finding_details": { + "severity": 3, + "cwe": { + "id": 295, + "name": "Improper Certificate Validation", + "href": "https://api.veracode.com/appsec/v1/cwes/295" + }, + "component_id": "efg456-gfe654", + "licenses": [ + { + "license_id": "mit", + "risk_rating": "2" + } + ], + "metadata": { + "sca_scan_mode": "UPLOAD", + "sca_dep_mode": "UNKNOWN" + }, + "cve": { + "name": "CVE-2023-33201", + "cvss": 4.6, + "href": "http://nvd.nist.gov/vuln/detail/CVE-2023-33201", + "severity": "Medium", + "vector": "AV:L/AC:L/Au:S/C:C/I:N/A:N", + "cvss3": { + "score": 5.3, + "severity": "Medium", + "vector": "AV:N/AC:L/PR:N/UI:N/S:U/C:L/I:N/A:N" + } + }, + "product_id": "abc123-bca321", + "component_filename": "bcprov-ext-jdk15to18-1.69.jar", + "language": "JAVA", + "component_path": [ + { + "path": "path/to/alpha/bcprov-ext-jdk15to18-1.69.jar" + } + ], + "version": "1.69" + } + }, + { + "scan_type": "SCA", + "description": "esapi is vulnerable to cross-site scripting. The vulnerability exists due to the lack of sanitization use in the `onsiteURL` regular expression of `antisamy-esapi.xml`, allowing an attacker to inject and execute malicious javascript", + "count": 1, + "context_type": "APPLICATION", + "context_guid": "zyx789-xyz987", + "violates_policy": false, + "finding_status": { + "first_found_date": "2023-02-17T02:54:55.674Z", + "status": "OPEN", + "resolution": "POTENTIAL_FALSE_POSITIVE", + "new": false, + "resolution_status": "APPROVED", + "last_seen_date": "2023-07-17T00:29:40.651Z" + }, + "finding_details": { + "severity": 3, + "cwe": { + "id": 79, + "name": "Improper Neutralization of Input During Web Page Generation ('Cross-site Scripting')", + "href": "https://api.veracode.com/appsec/v1/cwes/79" + }, + "component_id": "efg456-gfe654", + "licenses": [ + { + "license_id": "bsd-3-clause", + "risk_rating": "2" + }, + { + "license_id": "cc-by-sa-3.0", + "risk_rating": "4" + } + ], + "metadata": { + "sca_scan_mode": "UPLOAD", + "sca_dep_mode": "UNKNOWN" + }, + "cve": { + "name": "CVE-2022-24891", + "cvss": 4.3, + "href": "http://nvd.nist.gov/vuln/detail/CVE-2022-24891", + "severity": "Medium", + "vector": "AV:N/AC:M/Au:N/C:N/I:P/A:N", + "cvss3": { + "score": 6.1, + "severity": "Medium", + "vector": "AV:N/AC:L/PR:N/UI:R/S:C/C:L/I:L/A:N" + } + }, + "product_id": "abc123-bca321", + "component_filename": "esapi-2.2.0.0.jar", + "language": "JAVA", + "component_path": [ + { + "path": "path/to/alpha//esapi-2.2.0.0.jar" + } + ], + "version": "2.2.0.0" + } + } + ] +} \ No newline at end of file diff --git a/unittests/scans/veracode/static_embedded_format.json b/unittests/scans/veracode/static_embedded_format.json new file mode 100644 index 0000000000..f375ff84d3 --- /dev/null +++ b/unittests/scans/veracode/static_embedded_format.json @@ -0,0 +1,194 @@ +{ + "_embedded": { + "findings": [ + { + "issue_id": 16516872, + "scan_type": "STATIC", + "description": "This call to page.html() contains a cross-site scripting (XSS) flaw. The application populates the HTTP response with untrusted input, allowing an attacker to embed malicious content, such as Javascript code, which will be executed in the context of the victim's browser. XSS vulnerabilities are commonly exploited to steal or manipulate cookies, modify presentation of content, and compromise confidential information, with new attack vectors being discovered on a regular basis. Use contextual escaping on all untrusted data before using it to construct any portion of an HTTP response. The escaping method should be chosen based on the specific use case of the untrusted data, otherwise it may not protect fully against the attack. For example, if the data is being written to the body of an HTML page, use HTML entity escaping; if the data is being written to an attribute, use attribute escaping; etc. Both the OWASP Java Encoder library and the Microsoft AntiXSS library provide contextual escaping methods. For more details on contextual escaping, see https://github.com/OWASP/CheatSheetSeries/blob/master/cheatsheets/Cross_Site_Scripting_Prevention_Cheat_Sheet.md. In addition, as a best practice, always validate untrusted input to ensure that it conforms to the expected format, using centralized data validation routines when possible. References: CWE OWASP Supported Cleansers", + "count": 1, + "context_type": "APPLICATION", + "context_guid": "zyx789-xyz987", + "violates_policy": true, + "finding_status": { + "first_found_date": "2021-08-27T07:33:40.989Z", + "status": "OPEN", + "resolution": "UNRESOLVED", + "mitigation_review_status": "NONE", + "new": false, + "resolution_status": "NONE", + "last_seen_date": "2021-09-10T04:06:31.614Z" + }, + "finding_details": { + "severity": 3, + "cwe": { + "id": 80, + "name": "Improper Neutralization of Script-Related HTML Tags in a Web Page (Basic XSS)", + "href": "https://api.veracode.com/appsec/v1/cwes/80" + }, + "file_path": "/WEB-INF/views/contact.jsp", + "file_name": "update.jsp", + "module": "CoolProduct.jsa", + "relative_location": -1, + "finding_category": { + "id": 20, + "name": "Cross-Site Scripting (XSS)", + "href": "https://api.veracode.com/appsec/v1/categories/20" + }, + "procedure": "lambda_3", + "exploitability": 1, + "attack_vector": "page.html", + "file_line_number": 50 + }, + "build_id": 1268436506 + }, + { + "issue_id": 16516873, + "scan_type": "STATIC", + "description": "This call to page.html() contains a cross-site scripting (XSS) flaw. The application populates the HTTP response with untrusted input, allowing an attacker to embed malicious content, such as Javascript code, which will be executed in the context of the victim's browser. XSS vulnerabilities are commonly exploited to steal or manipulate cookies, modify presentation of content, and compromise confidential information, with new attack vectors being discovered on a regular basis. Use contextual escaping on all untrusted data before using it to construct any portion of an HTTP response. The escaping method should be chosen based on the specific use case of the untrusted data, otherwise it may not protect fully against the attack. For example, if the data is being written to the body of an HTML page, use HTML entity escaping; if the data is being written to an attribute, use attribute escaping; etc. Both the OWASP Java Encoder library and the Microsoft AntiXSS library provide contextual escaping methods. For more details on contextual escaping, see https://github.com/OWASP/CheatSheetSeries/blob/master/cheatsheets/Cross_Site_Scripting_Prevention_Cheat_Sheet.md. In addition, as a best practice, always validate untrusted input to ensure that it conforms to the expected format, using centralized data validation routines when possible. References: CWE OWASP Supported Cleansers", + "count": 1, + "context_type": "APPLICATION", + "context_guid": "zyx789-xyz987", + "violates_policy": true, + "finding_status": { + "first_found_date": "2021-08-27T07:33:40.989Z", + "status": "CLOSED", + "resolution": "UNRESOLVED", + "mitigation_review_status": "NONE", + "new": false, + "resolution_status": "NONE", + "last_seen_date": "2021-09-10T04:06:31.614Z" + }, + "finding_details": { + "severity": 3, + "cwe": { + "id": 80, + "name": "Improper Neutralization of Script-Related HTML Tags in a Web Page (Basic XSS)", + "href": "https://api.veracode.com/appsec/v1/cwes/80" + }, + "file_path": "/WEB-INF/views/contact.jsp", + "file_name": "update.jsp", + "module": "CoolProduct.jsa", + "relative_location": -1, + "finding_category": { + "id": 20, + "name": "Cross-Site Scripting (XSS)", + "href": "https://api.veracode.com/appsec/v1/categories/20" + }, + "procedure": "lambda_4", + "exploitability": 1, + "attack_vector": "page.html", + "file_line_number": 59 + }, + "build_id": 1268436506 + }, + { + "issue_id": 16516853, + "scan_type": "STATIC", + "description": "The application contains hard-coded information that may contain credentials to an external service. The use of hard-coded credentials significantly increases the possibility that the account being protected will be compromised. set Store credentials out-of-band from the application code. Follow best practices for protecting credentials stored in locations such as configuration or properties files. References: CWE", + "count": 1, + "context_type": "APPLICATION", + "context_guid": "zyx789-xyz987", + "violates_policy": true, + "finding_status": { + "first_found_date": "2021-08-27T07:33:40.989Z", + "status": "OPEN", + "resolution": "UNRESOLVED", + "mitigation_review_status": "NONE", + "new": false, + "resolution_status": "NONE", + "last_seen_date": "2021-09-10T04:06:31.614Z" + }, + "finding_details": { + "severity": 3, + "cwe": { + "id": 798, + "name": "Use of Hard-coded Credentials", + "href": "https://api.veracode.com/appsec/v1/cwes/798" + }, + "file_path": "/WEB-INF/layouts/default.jsp", + "file_name": "default.jsp", + "module": "CoolProduct.jsa", + "relative_location": -1, + "finding_category": { + "id": 10, + "name": "Credentials Management", + "href": "https://api.veracode.com/appsec/v1/categories/10" + }, + "procedure": "!main", + "exploitability": 1, + "attack_vector": "set", + "file_line_number": 135 + }, + "build_id": 1268436506 + }, + { + "issue_id": 16516871, + "scan_type": "STATIC", + "description": "SSRF attacks works when an attacker can pass a URL to access other upstream resources. This happens when resource urls aren't validated for expected destination. Attackers can craft a vulnerable URL to access variety of sensitive information like AWS metadata, database services and other web enabled services. As a mitigation, create a list of allowed hosts and protocols and validate every request against it. As far as possible, don't allow URLs coming directly from user to be passed on these APIs.References : CWE 918
    SSRF Attacks
    ", + "count": 1, + "context_type": "APPLICATION", + "context_guid": "zyx789-xyz987", + "violates_policy": true, + "finding_status": { + "first_found_date": "2021-08-27T07:33:40.989Z", + "status": "OPEN", + "resolution": "UNRESOLVED", + "mitigation_review_status": "NONE", + "new": false, + "resolution_status": "NONE", + "last_seen_date": "2021-09-10T04:06:31.614Z" + }, + "finding_details": { + "severity": 3, + "cwe": { + "id": 918, + "name": "Server-Side Request Forgery (SSRF)", + "href": "https://api.veracode.com/appsec/v1/cwes/918" + }, + "file_path": "com/insecure-company-alliance/CoolProduct/class.java", + "file_name": "class.java", + "module": "CoolProduct.jar", + "relative_location": 1, + "finding_category": { + "id": 8, + "name": "Information Leakage", + "href": "https://api.veracode.com/appsec/v1/categories/8" + }, + "procedure": "com.insecure-company-alliance.CoolProduct.Class.execute", + "exploitability": 1, + "attack_vector": "org.apache.http.client.HttpClient.execute", + "file_line_number": 147 + }, + "build_id": 1268436506 + } + ] + }, + "_links": { + "first": { + "href": "link" + }, + "self": { + "href": "link", + "templated": true + }, + "next": { + "href": "link" + }, + "last": { + "href": "link" + }, + "application": { + "href": "link" + }, + "non-sca": { + "href": "link", + "templated": true + } + }, + "page": { + "size": 5, + "total_elements": 10, + "total_pages": 2, + "number": 0 + } +} \ No newline at end of file diff --git a/unittests/scans/veracode/static_findings_list_format.json b/unittests/scans/veracode/static_findings_list_format.json new file mode 100644 index 0000000000..a790a13ce6 --- /dev/null +++ b/unittests/scans/veracode/static_findings_list_format.json @@ -0,0 +1,164 @@ +{ + "findings": [ + { + "issue_id": 16516872, + "scan_type": "STATIC", + "description": "This call to page.html() contains a cross-site scripting (XSS) flaw. The application populates the HTTP response with untrusted input, allowing an attacker to embed malicious content, such as Javascript code, which will be executed in the context of the victim's browser. XSS vulnerabilities are commonly exploited to steal or manipulate cookies, modify presentation of content, and compromise confidential information, with new attack vectors being discovered on a regular basis. Use contextual escaping on all untrusted data before using it to construct any portion of an HTTP response. The escaping method should be chosen based on the specific use case of the untrusted data, otherwise it may not protect fully against the attack. For example, if the data is being written to the body of an HTML page, use HTML entity escaping; if the data is being written to an attribute, use attribute escaping; etc. Both the OWASP Java Encoder library and the Microsoft AntiXSS library provide contextual escaping methods. For more details on contextual escaping, see https://github.com/OWASP/CheatSheetSeries/blob/master/cheatsheets/Cross_Site_Scripting_Prevention_Cheat_Sheet.md. In addition, as a best practice, always validate untrusted input to ensure that it conforms to the expected format, using centralized data validation routines when possible. References: CWE OWASP Supported Cleansers", + "count": 1, + "context_type": "APPLICATION", + "context_guid": "zyx789-xyz987", + "violates_policy": true, + "finding_status": { + "first_found_date": "2021-08-27T07:33:40.989Z", + "status": "OPEN", + "resolution": "UNRESOLVED", + "mitigation_review_status": "NONE", + "new": false, + "resolution_status": "NONE", + "last_seen_date": "2021-09-10T04:06:31.614Z" + }, + "finding_details": { + "severity": 3, + "cwe": { + "id": 80, + "name": "Improper Neutralization of Script-Related HTML Tags in a Web Page (Basic XSS)", + "href": "https://api.veracode.com/appsec/v1/cwes/80" + }, + "file_path": "/WEB-INF/views/contact.jsp", + "file_name": "update.jsp", + "module": "CoolProduct.jsa", + "relative_location": -1, + "finding_category": { + "id": 20, + "name": "Cross-Site Scripting (XSS)", + "href": "https://api.veracode.com/appsec/v1/categories/20" + }, + "procedure": "lambda_3", + "exploitability": 1, + "attack_vector": "page.html", + "file_line_number": 50 + }, + "build_id": 1268436506 + }, + { + "issue_id": 16516873, + "scan_type": "STATIC", + "description": "This call to page.html() contains a cross-site scripting (XSS) flaw. The application populates the HTTP response with untrusted input, allowing an attacker to embed malicious content, such as Javascript code, which will be executed in the context of the victim's browser. XSS vulnerabilities are commonly exploited to steal or manipulate cookies, modify presentation of content, and compromise confidential information, with new attack vectors being discovered on a regular basis. Use contextual escaping on all untrusted data before using it to construct any portion of an HTTP response. The escaping method should be chosen based on the specific use case of the untrusted data, otherwise it may not protect fully against the attack. For example, if the data is being written to the body of an HTML page, use HTML entity escaping; if the data is being written to an attribute, use attribute escaping; etc. Both the OWASP Java Encoder library and the Microsoft AntiXSS library provide contextual escaping methods. For more details on contextual escaping, see https://github.com/OWASP/CheatSheetSeries/blob/master/cheatsheets/Cross_Site_Scripting_Prevention_Cheat_Sheet.md. In addition, as a best practice, always validate untrusted input to ensure that it conforms to the expected format, using centralized data validation routines when possible. References: CWE OWASP Supported Cleansers", + "count": 1, + "context_type": "APPLICATION", + "context_guid": "zyx789-xyz987", + "violates_policy": true, + "finding_status": { + "first_found_date": "2021-08-27T07:33:40.989Z", + "status": "CLOSED", + "resolution": "UNRESOLVED", + "mitigation_review_status": "NONE", + "new": false, + "resolution_status": "NONE", + "last_seen_date": "2021-09-10T04:06:31.614Z" + }, + "finding_details": { + "severity": 3, + "cwe": { + "id": 80, + "name": "Improper Neutralization of Script-Related HTML Tags in a Web Page (Basic XSS)", + "href": "https://api.veracode.com/appsec/v1/cwes/80" + }, + "file_path": "/WEB-INF/views/contact.jsp", + "file_name": "update.jsp", + "module": "CoolProduct.jsa", + "relative_location": -1, + "finding_category": { + "id": 20, + "name": "Cross-Site Scripting (XSS)", + "href": "https://api.veracode.com/appsec/v1/categories/20" + }, + "procedure": "lambda_4", + "exploitability": 1, + "attack_vector": "page.html", + "file_line_number": 59 + }, + "build_id": 1268436506 + }, + { + "issue_id": 16516853, + "scan_type": "STATIC", + "description": "The application contains hard-coded information that may contain credentials to an external service. The use of hard-coded credentials significantly increases the possibility that the account being protected will be compromised. set Store credentials out-of-band from the application code. Follow best practices for protecting credentials stored in locations such as configuration or properties files. References: CWE", + "count": 1, + "context_type": "APPLICATION", + "context_guid": "zyx789-xyz987", + "violates_policy": true, + "finding_status": { + "first_found_date": "2021-08-27T07:33:40.989Z", + "status": "OPEN", + "resolution": "UNRESOLVED", + "mitigation_review_status": "NONE", + "new": false, + "resolution_status": "NONE", + "last_seen_date": "2021-09-10T04:06:31.614Z" + }, + "finding_details": { + "severity": 3, + "cwe": { + "id": 798, + "name": "Use of Hard-coded Credentials", + "href": "https://api.veracode.com/appsec/v1/cwes/798" + }, + "file_path": "/WEB-INF/layouts/default.jsp", + "file_name": "default.jsp", + "module": "CoolProduct.jsa", + "relative_location": -1, + "finding_category": { + "id": 10, + "name": "Credentials Management", + "href": "https://api.veracode.com/appsec/v1/categories/10" + }, + "procedure": "!main", + "exploitability": 1, + "attack_vector": "set", + "file_line_number": 135 + }, + "build_id": 1268436506 + }, + { + "issue_id": 16516871, + "scan_type": "STATIC", + "description": "SSRF attacks works when an attacker can pass a URL to access other upstream resources. This happens when resource urls aren't validated for expected destination. Attackers can craft a vulnerable URL to access variety of sensitive information like AWS metadata, database services and other web enabled services. As a mitigation, create a list of allowed hosts and protocols and validate every request against it. As far as possible, don't allow URLs coming directly from user to be passed on these APIs.References : CWE 918
    SSRF Attacks
    ", + "count": 1, + "context_type": "APPLICATION", + "context_guid": "zyx789-xyz987", + "violates_policy": true, + "finding_status": { + "first_found_date": "2021-08-27T07:33:40.989Z", + "status": "OPEN", + "resolution": "UNRESOLVED", + "mitigation_review_status": "NONE", + "new": false, + "resolution_status": "NONE", + "last_seen_date": "2021-09-10T04:06:31.614Z" + }, + "finding_details": { + "severity": 3, + "cwe": { + "id": 918, + "name": "Server-Side Request Forgery (SSRF)", + "href": "https://api.veracode.com/appsec/v1/cwes/918" + }, + "file_path": "com/insecure-company-alliance/CoolProduct/class.java", + "file_name": "class.java", + "module": "CoolProduct.jar", + "relative_location": 1, + "finding_category": { + "id": 8, + "name": "Information Leakage", + "href": "https://api.veracode.com/appsec/v1/categories/8" + }, + "procedure": "com.insecure-company-alliance.CoolProduct.Class.execute", + "exploitability": 1, + "attack_vector": "org.apache.http.client.HttpClient.execute", + "file_line_number": 147 + }, + "build_id": 1268436506 + } + ] +} \ No newline at end of file diff --git a/unittests/tools/test_veracode_parser.py b/unittests/tools/test_veracode_parser.py index 4469e11eca..7566077fd2 100644 --- a/unittests/tools/test_veracode_parser.py +++ b/unittests/tools/test_veracode_parser.py @@ -2,7 +2,7 @@ from ..dojo_test_case import DojoTestCase from dojo.tools.veracode.parser import VeracodeParser -from dojo.models import Test, Product_Type, Product, Engagement +from dojo.models import Test, Product_Type, Product, Engagement, Endpoint class TestVeracodeScannerParser(DojoTestCase): @@ -189,3 +189,198 @@ def test_maven_component_name(self): self.assertEqual("commons-jxpath", finding.component_name) self.assertEqual("1.3", finding.component_version) self.assertEqual(9.8, finding.cvssv3_score) + + def json_static_findings_test(self, file_name): + testfile = open(file_name) + parser = VeracodeParser() + findings = parser.get_findings(testfile, Test()) + self.assertEqual(3, len(findings)) + finding = findings[0] + self.assertEqual(finding.title, "Cross-Site Scripting (XSS)") + self.assertEqual(finding.severity, "Medium") + self.assertEqual(finding.cwe, 80) + self.assertEqual(finding.description, ( + '### Meta Information\n' + '**Exploitability Predication**: Likely\n' + '**Attack Vector**: page.html\n' + '**Module**: CoolProduct.jsa\n' + '### Details\n' + 'This call to page.html() contains a cross-site scripting ' + '(XSS) flaw. The application populates the HTTP response with ' + 'untrusted input, allowing an attacker to embed malicious ' + 'content, such as Javascript code, which will be executed in ' + "the context of the victim's browser. XSS vulnerabilities are " + 'commonly exploited to steal or manipulate cookies, modify ' + 'presentation of content, and compromise confidential ' + 'information, with new attack vectors being discovered on a ' + 'regular basis.' + )) + self.assertEqual(finding.mitigation, ( + 'Use contextual escaping on all untrusted data before using it ' + 'to construct any portion of an HTTP response. The escaping ' + 'method should be chosen based on the specific use case of the ' + 'untrusted data, otherwise it may not protect fully against the ' + 'attack. For example, if the data is being written to the body ' + 'of an HTML page, use HTML entity escaping; if the data is ' + 'being written to an attribute, use attribute escaping; etc. ' + 'Both the OWASP Java Encoder library and the Microsoft AntiXSS ' + 'library provide contextual escaping methods. For more details ' + 'on contextual escaping, see ' + 'https://github.com/OWASP/CheatSheetSeries/blob/master/cheatsheets/Cross_Site_Scripting_Prevention_Cheat_Sheet.md. ' + 'In addition, as a best practice, always validate untrusted ' + 'input to ensure that it conforms to the expected format, using ' + 'centralized data validation routines when possible.' + )) + self.assertEqual(finding.references, ( + '- [CWE](https://cwe.mitre.org/data/definitions/79.html)\n' + '- [OWASP](https://owasp.org/www-community/attacks/xss/)\n' + '- [Supported Cleansers](https://docs.veracode.com/r/review_cleansers)\n' + )) + self.assertEqual(finding.line, 50) + self.assertEqual(finding.sast_source_line, 50) + self.assertEqual(finding.sast_sink_line, 50) + self.assertEqual(finding.file_path, "/WEB-INF/views/contact.jsp") + self.assertEqual(finding.sast_source_file_path, "/WEB-INF/views/contact.jsp") + self.assertEqual(finding.sast_sink_file_path, "/WEB-INF/views/contact.jsp") + self.assertEqual(finding.sast_source_object, "lambda_3") + self.assertEqual(finding.sast_sink_object, "lambda_3") + self.assertEqual(finding.unsaved_tags, ["policy-violation"]) + + def test_json_static_findings_list_format(self): + self.json_static_findings_test("unittests/scans/veracode/static_findings_list_format.json") + + def test_json_static_embedded_format(self): + self.json_static_findings_test("unittests/scans/veracode/static_embedded_format.json") + + def json_dynamic_findings_test(self, file_name): + testfile = open(file_name) + parser = VeracodeParser() + findings = parser.get_findings(testfile, Test()) + self.assertEqual(3, len(findings)) + finding = findings[0] + self.assertEqual(finding.title, "Code Injection") + self.assertEqual(finding.severity, "High") + self.assertEqual(finding.cwe, 74) + self.assertEqual(finding.description, ( + '### Meta Information\n' + '**Plugin**: Code Injection\n' + '**Attack Vector**: Improper Neutralization of Special ' + 'Elements in Output Used by a Downstream Component ' + "('Injection')\n" + '**Vulnerable Parameter**: api\n' + '### Details\n' + 'Injections happen when untrusted data is inserted into an ' + 'interpreted syntax and subsequently evaluated on the server ' + 'side. This syntax may be a SQL query, a parsed JSON or XML ' + 'document, an executed script or other syntax that may be in ' + 'use within the application. Although the target syntax has ' + 'not been identified, the application behavior demonstrates ' + 'that the input HTTP parameter may be inserted without proper ' + 'escaping. It was observed by sending valid and invalid ' + 'payloads that should throw or should not throw errors. By ' + 'inserting a proper and improper comments such as ``, `*/_/*`, ' + '`/*_*/` into the `api` parameter, the scanner was able to ' + 'spot a difference in the responses, which is a good indicator ' + 'of a potential vulnerability. Confidence: medium. Response ' + 'codes: `404`, `404`, `404`. Similarities: `` vs `*/_/*`: 0.0; ' + '`*/_/*` vs `/*_*/`: 0.0; `` vs `/*_*/`: 1.0.' + )) + self.assertEqual(finding.mitigation, ( + 'It is recommended to identify how the current parameter is ' + 'used in the application source code, and make sure it is ' + 'escaped before inserting into any syntax or query. You can add ' + 'valid values to an allowlist and invalid values to a ' + 'blocklist.' + )) + self.assertEqual(finding.references, ( + '- [CWE](http://cwe.mitre.org/cgi-bin/jumpmenu.cgi?id=74)\n' + )) + self.assertEqual(finding.unsaved_tags, ["policy-violation"]) + self.assertEqual(finding.unsaved_endpoints[0], Endpoint( + protocol="https", + host="application.insecure-company-alliance.com", + port=443, + path="api/*_*//new_user_sign_up", + query="param=wild-things" + )) + + def test_json_dynamic_findings_list_format(self): + self.json_dynamic_findings_test("unittests/scans/veracode/dynamic_findings_list_format.json") + + def test_json_dynamic_embedded_format(self): + self.json_dynamic_findings_test("unittests/scans/veracode/dynamic_embedded_format.json") + + def json_sca_findings_test(self, file_name): + testfile = open(file_name) + parser = VeracodeParser() + findings = parser.get_findings(testfile, Test()) + self.assertEqual(7, len(findings)) + finding = findings[0] + self.assertEqual(finding.title, "Uncontrolled Resource Consumption") + self.assertEqual(finding.severity, "High") + self.assertEqual(finding.cwe, 400) + self.assertEqual(finding.description, ( + '### Meta Information\n' + '**Product ID**: abc123-bca321\n' + '**Component ID**: efg456-gfe654\n' + '**Language**: JAVA\n' + '#### Component Locations\n' + '- path/to/alpha/spring-boot-autoconfigure-2.5.14.jar\n' + '- path/to/beta/spring-boot-autoconfigure-2.5.14.jar\n' + '- path/to/charlie/spring-boot-autoconfigure-2.5.14.jar\n' + '- path/to/delta/spring-boot-autoconfigure-2.5.14.jar\n' + '#### Licenses\n' + '- apache-2.0: Low\n' + ' - Low-risk licenses are typically permissive licenses ' + 'that require you to preserve the copyright and license ' + 'notices, but allow distribution under different terms without ' + 'disclosing source code.\n' + '### Details\n' + 'spring-boot-autoconfigure is vulnerable to Denial Of Service ' + '(DoS). The vulnerability is applicable when the application ' + 'has Spring MVC auto-configuration enabled and uses the Spring ' + 'Boot welcome page, which can be either static or templated, ' + 'and the application is deployed behind a proxy which caches ' + 'the 404 responses. An attacker can cause the application to ' + 'crash by submitting a request to the welcome page which the ' + 'server is unable to properly respond to.' + )) + self.assertEqual(finding.cvssv3, "CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:U/C:N/I:N/A:H") + self.assertEqual(finding.component_name, "spring-boot-autoconfigure.jar") + self.assertEqual(finding.component_version, "2.5.14") + self.assertEqual(finding.unsaved_tags, ["policy-violation"]) + self.assertEqual(finding.unsaved_vulnerability_ids, ["CVE-2023-20883"]) + finding = findings[3] + self.assertEqual(finding.title, "inflight - SRCCLR-SID-41137") + self.assertEqual(finding.severity, "Medium") + self.assertEqual(finding.cwe, 0) + self.assertEqual(finding.description, ( + '### Meta Information\n' + '**Product ID**: abc123-bca321\n' + '**Component ID**: efg456-gfe654\n' + '**Language**: JAVASCRIPT\n' + '#### Component Locations\n' + '- path/to/alpha/node_modules:inflight\n' + '#### Licenses\n' + '- isc: Low\n' + ' - Low-risk licenses are typically permissive licenses ' + 'that require you to preserve the copyright and license ' + 'notices, but allow distribution under different terms without ' + 'disclosing source code.\n' + '### Details\n' + 'inflight is vulnerable to a Memory Leak. The vulnerability is ' + 'caused by improper memory management due to a lack of ' + 'resource freeing, which can result in Denial of Service ' + 'conditions.' + )) + self.assertEqual(finding.cvssv3, "CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:U/C:N/I:N/A:H") + self.assertEqual(finding.component_name, "inflight") + self.assertEqual(finding.component_version, "1.0.6") + self.assertEqual(finding.unsaved_tags, ["policy-violation"]) + self.assertEqual(finding.unsaved_vulnerability_ids, ["SRCCLR-SID-41137"]) + + def test_json_sca_findings_list_format(self): + self.json_sca_findings_test("unittests/scans/veracode/sca_findings_list_format.json") + + def test_json_sca_embedded_format(self): + self.json_sca_findings_test("unittests/scans/veracode/sca_embedded_format.json") From c3c72a1a073c433728dd08845e6a3b76f1b575c7 Mon Sep 17 00:00:00 2001 From: Cody Maffucci <46459665+Maffooch@users.noreply.github.com> Date: Tue, 25 Jul 2023 11:33:28 -0500 Subject: [PATCH 56/85] Update veracode.md --- docs/content/en/integrations/parsers/file/veracode.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/content/en/integrations/parsers/file/veracode.md b/docs/content/en/integrations/parsers/file/veracode.md index a155c1a9d4..54978e2305 100644 --- a/docs/content/en/integrations/parsers/file/veracode.md +++ b/docs/content/en/integrations/parsers/file/veracode.md @@ -25,7 +25,7 @@ Veracode reports can be ingested in either XML or JSON Format } ``` - Embedded - - This response can be save directly to a file and uploaded + - This response can be saved directly to a file and uploaded - Not as ideal for crafting a refined report consisting of multiple requests - Desired Format: ``` From eea612b5d25a2069a002e42e4225e894ff959c72 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 25 Jul 2023 12:08:43 -0500 Subject: [PATCH 57/85] Bump boto3 from 1.28.8 to 1.28.10 (#8424) Bumps [boto3](https://github.com/boto/boto3) from 1.28.8 to 1.28.10. - [Release notes](https://github.com/boto/boto3/releases) - [Changelog](https://github.com/boto/boto3/blob/develop/CHANGELOG.rst) - [Commits](https://github.com/boto/boto3/compare/1.28.8...1.28.10) --- updated-dependencies: - dependency-name: boto3 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index ef1c950761..002a20d8d1 100644 --- a/requirements.txt +++ b/requirements.txt @@ -78,7 +78,7 @@ django-ratelimit==4.0.0 argon2-cffi==21.3.0 blackduck==1.1.0 pycurl==7.45.2 # Required for Celery Broker AWS (SQS) support -boto3==1.28.8 # Required for Celery Broker AWS (SQS) support +boto3==1.28.10 # Required for Celery Broker AWS (SQS) support netaddr==0.8.0 vulners==2.0.10 fontawesomefree==6.4.0 From 935786197440a9807c31d842fe168ab4743b1049 Mon Sep 17 00:00:00 2001 From: Aleksandr Chebotov <47745270+al-cheb@users.noreply.github.com> Date: Tue, 25 Jul 2023 20:15:14 +0300 Subject: [PATCH 58/85] Add extraVolumes for initializer job (#8364) Co-authored-by: Alexander Chebotov --- .../defectdojo/templates/initializer-job.yaml | 24 ++++++++++++-- helm/defectdojo/values.yaml | 33 +++++++++++++++++++ 2 files changed, 55 insertions(+), 2 deletions(-) diff --git a/helm/defectdojo/templates/initializer-job.yaml b/helm/defectdojo/templates/initializer-job.yaml index f96ce99c40..5d04165220 100644 --- a/helm/defectdojo/templates/initializer-job.yaml +++ b/helm/defectdojo/templates/initializer-job.yaml @@ -1,4 +1,4 @@ -{{- if .Values.initializer.run }} +{{- if .Values.initializer.run }} {{- $fullName := include "defectdojo.fullname" . -}} apiVersion: batch/v1 kind: Job @@ -32,6 +32,19 @@ spec: imagePullSecrets: - name: {{ .Values.imagePullSecrets }} {{- end }} + volumes: + {{- range .Values.initializer.extraVolumes }} + - name: userconfig-{{ .name }} + {{ .type }}: + {{- if (eq .type "configMap") }} + name: {{ .name }} + {{- else if (eq .type "secret") }} + secretName: {{ .name }} + {{- else if (eq .type "hostPath") }} + type: {{ .pathType | default "Directory" }} + path: {{ .hostPath }} + {{- end }} + {{- end }} containers: {{- if .Values.cloudsql.enabled }} - name: cloudsql-proxy @@ -60,7 +73,14 @@ spec: {{- if .Values.securityContext.enabled }} securityContext: {{- toYaml .Values.securityContext.djangoSecurityContext | nindent 10 }} - {{- end }} + {{- end }} + volumeMounts: + {{- range .Values.initializer.extraVolumes }} + - name: userconfig-{{ .name }} + readOnly: true + mountPath: {{ .path }} + subPath: {{ .subPath }} + {{- end }} command: - /entrypoint-initializer.sh envFrom: diff --git a/helm/defectdojo/values.yaml b/helm/defectdojo/values.yaml index 33539871b0..79a0db214c 100644 --- a/helm/defectdojo/values.yaml +++ b/helm/defectdojo/values.yaml @@ -313,6 +313,39 @@ initializer: cpu: 2000m memory: 512Mi + # A list of extra volumes to mount. This + # is useful for bringing in extra data that can be referenced by other configurations + # at a well known path, such as local_settings. The + # value of this should be a list of objects. + # + # Example: + # + # ```yaml + # extraVolumes: + # - type: configMap + # name: local_settings + # path: /app/dojo/settings/local_settings.py + # subPath: local_settings.py + # - type: hostPath + # name: host_directory + # path: /tmp + # hostPath: /tmp + # ``` + # + # Each object supports the following keys: + # + # - `type` - Type of the volume, must be one of "configMap", "secret", "hostPath". Case sensitive. + # Even is supported we are highly recommending to avoid hostPath for security reasons (usually blocked by PSP) + # - `name` - Name of the configMap or secret to be mounted. This also controls + # the path that it is mounted to. The volume will be mounted to `/consul/userconfig/`. + # - `path` - defines where file should be exposed + # - `subPath` - extracts only particular file from secret or configMap + # - `pathType` - only for hostPath, can be one of the "DirectoryOrCreate", "Directory" (default), "FileOrCreate", + # "File", "Socket", "CharDevice", "BlockDevice" + # - `hostPath` - only for hostPath, file or directory from local host + # @type: array + extraVolumes: [] + mysql: enabled: false auth: From b2a1f7d21fabc5166926f8563926ab133af0a75d Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Tue, 25 Jul 2023 12:38:22 -0500 Subject: [PATCH 59/85] chore(deps): update mysql:5.7.42 docker digest from 5.7.42 to v (docker-compose.yml) (#8421) Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> --- docker-compose.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker-compose.yml b/docker-compose.yml index 841505210f..694b2deb14 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -130,7 +130,7 @@ services: source: ./docker/extra_settings target: /app/docker/extra_settings mysql: - image: mysql:5.7.42@sha256:bd873931ef20f30a5a9bf71498ce4e02c88cf48b2e8b782c337076d814deebde + image: mysql:5.7.42@sha256:2eabad08824e3120dbec9096c276e3956e1922636c06fbb383ae9ea9c499bf43 profiles: - mysql-rabbitmq - mysql-redis From 0783d44d7c3d69051f8362cd0d809eb47906415f Mon Sep 17 00:00:00 2001 From: Cody Maffucci <46459665+Maffooch@users.noreply.github.com> Date: Tue, 25 Jul 2023 12:39:24 -0500 Subject: [PATCH 60/85] Add Reporter field to Finding PATCH/PUT (#8426) --- dojo/api_v2/serializers.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/dojo/api_v2/serializers.py b/dojo/api_v2/serializers.py index 4a78631b8b..4801e8038e 100644 --- a/dojo/api_v2/serializers.py +++ b/dojo/api_v2/serializers.py @@ -1652,6 +1652,9 @@ class FindingSerializer(TaggitSerializer, serializers.ModelSerializer): vulnerability_ids = VulnerabilityIdSerializer( source="vulnerability_id_set", many=True, required=False ) + reporter = serializers.PrimaryKeyRelatedField( + required=False, queryset=User.objects.all() + ) class Meta: model = Finding @@ -1714,6 +1717,9 @@ def update(self, instance, validated_data): instance = super(TaggitSerializer, self).update( instance, validated_data ) + # Save the reporter on the finding + if reporter_id := validated_data.get("reporter"): + instance.reporter = reporter_id # If we need to push to JIRA, an extra save call is needed. # Also if we need to update the mitigation date of the finding. From ba067b6d1e2155db2ee61ef587416e3c2b88786b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 25 Jul 2023 13:07:48 -0500 Subject: [PATCH 61/85] Bump django-ratelimit from 4.0.0 to 4.1.0 (#8425) Bumps [django-ratelimit](https://github.com/jsocol/django-ratelimit) from 4.0.0 to 4.1.0. - [Release notes](https://github.com/jsocol/django-ratelimit/releases) - [Changelog](https://github.com/jsocol/django-ratelimit/blob/main/CHANGELOG) - [Commits](https://github.com/jsocol/django-ratelimit/compare/v4.0.0...v4.1.0) --- updated-dependencies: - dependency-name: django-ratelimit dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 002a20d8d1..98586a60bf 100644 --- a/requirements.txt +++ b/requirements.txt @@ -74,7 +74,7 @@ hyperlink==21.0.0 django-test-migrations==1.3.0 djangosaml2==1.7.0 drf-spectacular==0.26.4 -django-ratelimit==4.0.0 +django-ratelimit==4.1.0 argon2-cffi==21.3.0 blackduck==1.1.0 pycurl==7.45.2 # Required for Celery Broker AWS (SQS) support From 6d1d341b73c253cf9ad68714205ff1255f46e101 Mon Sep 17 00:00:00 2001 From: kiblik Date: Tue, 25 Jul 2023 23:07:05 +0000 Subject: [PATCH 62/85] Fix invalid OpenAPI schema + Add integration test (#8253) * Interation test: Test OpenAPI schema * Fix DateRangeFilter empty option * Apply suggestions from code review Co-authored-by: Charles Neill <1749665+cneill@users.noreply.github.com> * Replace also others `''` to `None` * Fix build --------- Co-authored-by: Charles Neill <1749665+cneill@users.noreply.github.com> --- .github/workflows/integration-tests.yml | 1 + Dockerfile.integration-tests-debian | 10 ++++++++- docker/entrypoint-integration-tests.sh | 28 ++++++++++++++++++++----- dojo/filters.py | 28 ++++++++++++------------- 4 files changed, 47 insertions(+), 20 deletions(-) diff --git a/.github/workflows/integration-tests.yml b/.github/workflows/integration-tests.yml index e1eaaabcd1..b3d0eb1fdb 100644 --- a/.github/workflows/integration-tests.yml +++ b/.github/workflows/integration-tests.yml @@ -34,6 +34,7 @@ jobs: "tests/check_various_pages.py", "tests/notifications_test.py", "tests/tool_config.py", + "openapi-validatator", ] profile: ["mysql-rabbitmq", "postgres-redis"] os: [alpine, debian] diff --git a/Dockerfile.integration-tests-debian b/Dockerfile.integration-tests-debian index 69dd5a754f..dc5465476a 100644 --- a/Dockerfile.integration-tests-debian +++ b/Dockerfile.integration-tests-debian @@ -11,7 +11,8 @@ RUN \ curl \ unzip \ gpg \ - jq \ + maven \ + jq \ && \ apt-get clean && \ rm -rf /var/lib/apt/lists && \ @@ -43,6 +44,13 @@ RUN \ chmod -R 0755 . WORKDIR /app +# install openapi-generator-cli +RUN \ + curl https://raw.githubusercontent.com/OpenAPITools/openapi-generator/master/bin/utils/openapi-generator-cli.sh > /usr/local/bin/openapi-generator-cli && \ + chmod +x /usr/local/bin/openapi-generator-cli && \ + echo "Latest OpenAPI Generator version (currently pinned to 6.6.0):" && \ + openapi-generator-cli version + COPY docker/wait-for-it.sh \ docker/entrypoint-integration-tests.sh \ / diff --git a/docker/entrypoint-integration-tests.sh b/docker/entrypoint-integration-tests.sh index a14f9fb635..168e2c6f2d 100755 --- a/docker/entrypoint-integration-tests.sh +++ b/docker/entrypoint-integration-tests.sh @@ -41,12 +41,22 @@ function success() { echo "IT FILENAME: $DD_INTEGRATION_TEST_FILENAME" if [[ ! -z "$DD_INTEGRATION_TEST_FILENAME" ]]; then - test=$DD_INTEGRATION_TEST_FILENAME - echo "Running: $test" - if python3 $DD_INTEGRATION_TEST_FILENAME; then - success $test + if [[ "$DD_INTEGRATION_TEST_FILENAME" == "openapi-validatator" ]]; then + test="OpenAPI schema validation" + echo "Running: $test" + if OPENAPI_GENERATOR_VERSION=6.6.0 openapi-generator-cli validate -i "$DD_BASE_URL/api/v2/oa3/schema/?format=json" --recommend; then + success $test + else fail + fail $test + fi else - fail $test + test=$DD_INTEGRATION_TEST_FILENAME + echo "Running: $test" + if python3 $DD_INTEGRATION_TEST_FILENAME; then + success $test + else + fail $test + fi fi else @@ -279,5 +289,13 @@ else fail $test fi + test="OpenAPI schema validation" + echo "Running: $test" + if OPENAPI_GENERATOR_VERSION=6.6.0 openapi-generator-cli validate -i "$DD_BASE_URL/api/v2/oa3/schema/?format=json" --recommend; then + success $test + else fail + fail $test + fi + exec echo "Done Running all configured integration tests." fi diff --git a/dojo/filters.py b/dojo/filters.py index e99665293b..7b249c53b4 100644 --- a/dojo/filters.py +++ b/dojo/filters.py @@ -111,7 +111,7 @@ def under_review(self, qs, name): return qs.filter(UNDER_REVIEW_QUERY) options = { - '': (_('Any'), any), + None: (_('Any'), any), 0: (_('Open'), open), 1: (_('Verified'), verified), 2: (_('Out Of Scope'), out_of_scope), @@ -138,7 +138,7 @@ def filter(self, qs, value): try: value = int(value) except (ValueError, TypeError): - value = '' + value = None return self.options[value][1](self, qs, self.field_name) @@ -364,7 +364,7 @@ def __init__(self, *args, **kwargs): class DateRangeFilter(ChoiceFilter): options = { - '': (_('Any date'), lambda qs, name: qs.all()), + None: (_('Any date'), lambda qs, name: qs.all()), 1: (_('Today'), lambda qs, name: qs.filter(**{ '%s__year' % name: now().year, '%s__month' % name: now().month, @@ -404,13 +404,13 @@ def filter(self, qs, value): try: value = int(value) except (ValueError, TypeError): - value = '' + value = None return self.options[value][1](qs, self.field_name) class DateRangeOmniFilter(ChoiceFilter): options = { - '': (_('Any date'), lambda qs, name: qs.all()), + None: (_('Any date'), lambda qs, name: qs.all()), 1: (_('Today'), lambda qs, name: qs.filter(**{ '%s__year' % name: now().year, '%s__month' % name: now().month, @@ -466,13 +466,13 @@ def filter(self, qs, value): try: value = int(value) except (ValueError, TypeError): - value = '' + value = None return self.options[value][1](qs, self.field_name) class ReportBooleanFilter(ChoiceFilter): options = { - '': (_('Either'), lambda qs, name: qs.all()), + None: (_('Either'), lambda qs, name: qs.all()), 1: (_('Yes'), lambda qs, name: qs.filter(**{ '%s' % name: True })), @@ -490,7 +490,7 @@ def filter(self, qs, value): try: value = int(value) except (ValueError, TypeError): - value = '' + value = None return self.options[value][1](qs, self.field_name) @@ -510,7 +510,7 @@ def was_accepted(self, qs, name): return qs.filter(WAS_ACCEPTED_FINDINGS_QUERY) options = { - '': (_('Either'), any), + None: (_('Either'), any), 1: (_('Yes'), accepted), 2: (_('No'), not_accepted), 3: (_('Was'), was_accepted), @@ -525,7 +525,7 @@ def filter(self, qs, value): try: value = int(value) except (ValueError, TypeError): - value = '' + value = None return self.options[value][1](self, qs, self.field_name) @@ -581,7 +581,7 @@ def past_year(self, qs, name): return self.past_x_days(qs, name, 365) options = { - '': (_('Past 30 days'), past_thirty_days), + None: (_('Past 30 days'), past_thirty_days), 1: (_('Past 7 days'), past_seven_days), 2: (_('Past 90 days'), past_ninety_days), 3: (_('Current month'), current_month), @@ -609,7 +609,7 @@ def filter(self, qs, value): try: value = int(value) except (ValueError, TypeError): - value = '' + value = None return self.options[value][1](self, qs, self.field_name) @@ -2270,7 +2270,7 @@ def choice_question(self, qs, name): return qs.filter(polymorphic_ctype=ContentType.objects.get_for_model(ChoiceQuestion)) options = { - '': (_('Any'), any), + None: (_('Any'), any), 1: (_('Text Question'), text_question), 2: (_('Choice Question'), choice_question), } @@ -2284,7 +2284,7 @@ def filter(self, qs, value): try: value = int(value) except (ValueError, TypeError): - value = '' + value = None return self.options[value][1](self, qs, self.options[value][0]) From 8ad54d2cd4ad121bccf9979918ae39f93f6efbff Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 26 Jul 2023 10:37:00 -0500 Subject: [PATCH 63/85] Bump boto3 from 1.28.10 to 1.28.11 (#8430) Bumps [boto3](https://github.com/boto/boto3) from 1.28.10 to 1.28.11. - [Release notes](https://github.com/boto/boto3/releases) - [Changelog](https://github.com/boto/boto3/blob/develop/CHANGELOG.rst) - [Commits](https://github.com/boto/boto3/compare/1.28.10...1.28.11) --- updated-dependencies: - dependency-name: boto3 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 98586a60bf..8d1a4b7ce3 100644 --- a/requirements.txt +++ b/requirements.txt @@ -78,7 +78,7 @@ django-ratelimit==4.1.0 argon2-cffi==21.3.0 blackduck==1.1.0 pycurl==7.45.2 # Required for Celery Broker AWS (SQS) support -boto3==1.28.10 # Required for Celery Broker AWS (SQS) support +boto3==1.28.11 # Required for Celery Broker AWS (SQS) support netaddr==0.8.0 vulners==2.0.10 fontawesomefree==6.4.0 From 9c98b1cd0b9dd7dfc927c5b298f6300db7a7372a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 26 Jul 2023 10:37:22 -0500 Subject: [PATCH 64/85] Bump packageurl-python from 0.11.1 to 0.11.2 (#8429) Bumps [packageurl-python](https://github.com/package-url/packageurl-python) from 0.11.1 to 0.11.2. - [Release notes](https://github.com/package-url/packageurl-python/releases) - [Changelog](https://github.com/package-url/packageurl-python/blob/main/CHANGELOG.rst) - [Commits](https://github.com/package-url/packageurl-python/compare/v0.11.1...v0.11.2) --- updated-dependencies: - dependency-name: packageurl-python dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 8d1a4b7ce3..3953b7b271 100644 --- a/requirements.txt +++ b/requirements.txt @@ -58,7 +58,7 @@ debugpy==1.6.7 python-gitlab==3.15.0 drf_yasg==1.21.5 cpe==1.2.1 -packageurl-python==0.11.1 +packageurl-python==0.11.2 django-crum==0.7.9 JSON-log-formatter==0.5.2 django-split-settings==1.2.0 From a62062468fbd967d78e1a4f8235c551b7d205eeb Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 26 Jul 2023 10:37:44 -0500 Subject: [PATCH 65/85] Bump markdown from 3.4.3 to 3.4.4 (#8428) Bumps [markdown](https://github.com/Python-Markdown/markdown) from 3.4.3 to 3.4.4. - [Changelog](https://github.com/Python-Markdown/markdown/blob/master/docs/change_log/release-2.6.md) - [Commits](https://github.com/Python-Markdown/markdown/compare/3.4.3...3.4.4) --- updated-dependencies: - dependency-name: markdown dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 3953b7b271..6d02ca1dda 100644 --- a/requirements.txt +++ b/requirements.txt @@ -32,7 +32,7 @@ humanize==4.7.0 jira==3.5.2 PyGithub==1.58.2 lxml==4.9.3 -Markdown==3.4.3 +Markdown==3.4.4 mysqlclient==2.1.1 openpyxl==3.1.2 xlrd==1.2.0 From 2114c0b107c071145135d5ab78aa1efc81e793ef Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 27 Jul 2023 09:00:05 -0500 Subject: [PATCH 66/85] Bump boto3 from 1.28.11 to 1.28.12 (#8433) Bumps [boto3](https://github.com/boto/boto3) from 1.28.11 to 1.28.12. - [Release notes](https://github.com/boto/boto3/releases) - [Changelog](https://github.com/boto/boto3/blob/develop/CHANGELOG.rst) - [Commits](https://github.com/boto/boto3/compare/1.28.11...1.28.12) --- updated-dependencies: - dependency-name: boto3 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 6d02ca1dda..fa93766814 100644 --- a/requirements.txt +++ b/requirements.txt @@ -78,7 +78,7 @@ django-ratelimit==4.1.0 argon2-cffi==21.3.0 blackduck==1.1.0 pycurl==7.45.2 # Required for Celery Broker AWS (SQS) support -boto3==1.28.11 # Required for Celery Broker AWS (SQS) support +boto3==1.28.12 # Required for Celery Broker AWS (SQS) support netaddr==0.8.0 vulners==2.0.10 fontawesomefree==6.4.0 From 9db94de83cdce0e2c5a3853422eb1b6ae8bc1099 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 28 Jul 2023 12:12:03 -0500 Subject: [PATCH 67/85] Bump boto3 from 1.28.12 to 1.28.14 (#8436) Bumps [boto3](https://github.com/boto/boto3) from 1.28.12 to 1.28.14. - [Release notes](https://github.com/boto/boto3/releases) - [Changelog](https://github.com/boto/boto3/blob/develop/CHANGELOG.rst) - [Commits](https://github.com/boto/boto3/compare/1.28.12...1.28.14) --- updated-dependencies: - dependency-name: boto3 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index fa93766814..8cd88c6fe4 100644 --- a/requirements.txt +++ b/requirements.txt @@ -78,7 +78,7 @@ django-ratelimit==4.1.0 argon2-cffi==21.3.0 blackduck==1.1.0 pycurl==7.45.2 # Required for Celery Broker AWS (SQS) support -boto3==1.28.12 # Required for Celery Broker AWS (SQS) support +boto3==1.28.14 # Required for Celery Broker AWS (SQS) support netaddr==0.8.0 vulners==2.0.10 fontawesomefree==6.4.0 From fc7ef9bde9898c74ed3fef294131361b0dbff3e8 Mon Sep 17 00:00:00 2001 From: DefectDojo release bot Date: Mon, 31 Jul 2023 14:06:39 +0000 Subject: [PATCH 68/85] Update versions in application files --- components/package.json | 2 +- dojo/__init__.py | 2 +- helm/defectdojo/Chart.yaml | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/components/package.json b/components/package.json index 2112f659aa..4e8aea8365 100644 --- a/components/package.json +++ b/components/package.json @@ -1,6 +1,6 @@ { "name": "defectdojo", - "version": "2.24.4", + "version": "2.25.0-dev", "license" : "BSD-3-Clause", "private": true, "dependencies": { diff --git a/dojo/__init__.py b/dojo/__init__.py index b305a0bd05..4c1f6f5856 100644 --- a/dojo/__init__.py +++ b/dojo/__init__.py @@ -4,6 +4,6 @@ # Django starts so that shared_task will use this app. from .celery import app as celery_app # noqa -__version__ = '2.24.4' +__version__ = '2.25.0-dev' __url__ = 'https://github.com/DefectDojo/django-DefectDojo' __docs__ = 'https://documentation.defectdojo.com' diff --git a/helm/defectdojo/Chart.yaml b/helm/defectdojo/Chart.yaml index cf2bba8ed6..4f16ff9c2f 100644 --- a/helm/defectdojo/Chart.yaml +++ b/helm/defectdojo/Chart.yaml @@ -1,8 +1,8 @@ apiVersion: v2 -appVersion: "2.24.4" +appVersion: "2.25.0-dev" description: A Helm chart for Kubernetes to install DefectDojo name: defectdojo -version: 1.6.78 +version: 1.6.79-dev icon: https://www.defectdojo.org/img/favicon.ico maintainers: - name: madchap From ee6018ac810f78682419cffe376422733dc8bfd5 Mon Sep 17 00:00:00 2001 From: DefectDojo release bot Date: Mon, 31 Jul 2023 14:06:46 +0000 Subject: [PATCH 69/85] Update versions in application files --- components/package.json | 2 +- dojo/__init__.py | 2 +- helm/defectdojo/Chart.yaml | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/components/package.json b/components/package.json index 2112f659aa..4e8aea8365 100644 --- a/components/package.json +++ b/components/package.json @@ -1,6 +1,6 @@ { "name": "defectdojo", - "version": "2.24.4", + "version": "2.25.0-dev", "license" : "BSD-3-Clause", "private": true, "dependencies": { diff --git a/dojo/__init__.py b/dojo/__init__.py index b305a0bd05..4c1f6f5856 100644 --- a/dojo/__init__.py +++ b/dojo/__init__.py @@ -4,6 +4,6 @@ # Django starts so that shared_task will use this app. from .celery import app as celery_app # noqa -__version__ = '2.24.4' +__version__ = '2.25.0-dev' __url__ = 'https://github.com/DefectDojo/django-DefectDojo' __docs__ = 'https://documentation.defectdojo.com' diff --git a/helm/defectdojo/Chart.yaml b/helm/defectdojo/Chart.yaml index cf2bba8ed6..4f16ff9c2f 100644 --- a/helm/defectdojo/Chart.yaml +++ b/helm/defectdojo/Chart.yaml @@ -1,8 +1,8 @@ apiVersion: v2 -appVersion: "2.24.4" +appVersion: "2.25.0-dev" description: A Helm chart for Kubernetes to install DefectDojo name: defectdojo -version: 1.6.78 +version: 1.6.79-dev icon: https://www.defectdojo.org/img/favicon.ico maintainers: - name: madchap From 999bf9c65cce055c8bf82787c245928e7681c241 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 31 Jul 2023 09:49:47 -0500 Subject: [PATCH 70/85] Bump uwsgi from 2.0.21 to 2.0.22 (#8437) Bumps [uwsgi](https://github.com/unbit/uwsgi-docs) from 2.0.21 to 2.0.22. - [Commits](https://github.com/unbit/uwsgi-docs/commits) --- updated-dependencies: - dependency-name: uwsgi dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 8cd88c6fe4..6d528d373d 100644 --- a/requirements.txt +++ b/requirements.txt @@ -46,7 +46,7 @@ requests==2.31.0 sqlalchemy==2.0.19 # Required by Celery broker transport supervisor==4.2.5 urllib3==1.26.11 -uWSGI==2.0.21 +uWSGI==2.0.22 vobject==0.9.6.1 whitenoise==5.2.0 titlecase==2.3 From fcab9640845c3e15174f6ac6b56f43d4818a4194 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 31 Jul 2023 13:36:47 -0500 Subject: [PATCH 71/85] Bump vcrpy from 5.0.0 to 5.1.0 (#8443) Bumps [vcrpy](https://github.com/kevin1024/vcrpy) from 5.0.0 to 5.1.0. - [Release notes](https://github.com/kevin1024/vcrpy/releases) - [Changelog](https://github.com/kevin1024/vcrpy/blob/master/docs/changelog.rst) - [Commits](https://github.com/kevin1024/vcrpy/compare/v5.0.0...v5.1.0) --- updated-dependencies: - dependency-name: vcrpy dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 6d528d373d..b8e47f90db 100644 --- a/requirements.txt +++ b/requirements.txt @@ -64,7 +64,7 @@ JSON-log-formatter==0.5.2 django-split-settings==1.2.0 django-debug-toolbar==4.1.0 django-debug-toolbar-request-history==0.1.4 -vcrpy==5.0.0 +vcrpy==5.1.0 vcrpy-unittest==0.1.7 django-tagulous==1.3.3 PyJWT==2.8.0 From 25b6846ae29b9a5f8993fa321cb0194d7ef67fb4 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 31 Jul 2023 13:37:24 -0500 Subject: [PATCH 72/85] Bump boto3 from 1.28.14 to 1.28.15 (#8442) Bumps [boto3](https://github.com/boto/boto3) from 1.28.14 to 1.28.15. - [Release notes](https://github.com/boto/boto3/releases) - [Changelog](https://github.com/boto/boto3/blob/develop/CHANGELOG.rst) - [Commits](https://github.com/boto/boto3/compare/1.28.14...1.28.15) --- updated-dependencies: - dependency-name: boto3 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index b8e47f90db..8e17f9521d 100644 --- a/requirements.txt +++ b/requirements.txt @@ -78,7 +78,7 @@ django-ratelimit==4.1.0 argon2-cffi==21.3.0 blackduck==1.1.0 pycurl==7.45.2 # Required for Celery Broker AWS (SQS) support -boto3==1.28.14 # Required for Celery Broker AWS (SQS) support +boto3==1.28.15 # Required for Celery Broker AWS (SQS) support netaddr==0.8.0 vulners==2.0.10 fontawesomefree==6.4.0 From 3ff122d1ef8b6a49b3c4ac460439e556793dea00 Mon Sep 17 00:00:00 2001 From: kiblik Date: Mon, 31 Jul 2023 18:48:29 +0000 Subject: [PATCH 73/85] Fetch binary from the official docker image (#8431) --- Dockerfile.integration-tests-debian | 9 +++------ docker/entrypoint-integration-tests.sh | 4 ++-- 2 files changed, 5 insertions(+), 8 deletions(-) diff --git a/Dockerfile.integration-tests-debian b/Dockerfile.integration-tests-debian index dc5465476a..9f613ba366 100644 --- a/Dockerfile.integration-tests-debian +++ b/Dockerfile.integration-tests-debian @@ -1,6 +1,7 @@ # code: language=Dockerfile +FROM openapitools/openapi-generator-cli:v6.6.0@sha256:54381220aecf2e77bb4b6694c4e1a03e733b49453292cd1af6f48b510f1f008a as openapitools FROM python:3.11.4-slim-bullseye@sha256:9b4d90af2003eef5d862f8118d8645d37d170402645a09e48241a3e492a0d4dc as build WORKDIR /app RUN \ @@ -11,7 +12,7 @@ RUN \ curl \ unzip \ gpg \ - maven \ + default-jre-headless \ jq \ && \ apt-get clean && \ @@ -45,11 +46,7 @@ RUN \ WORKDIR /app # install openapi-generator-cli -RUN \ - curl https://raw.githubusercontent.com/OpenAPITools/openapi-generator/master/bin/utils/openapi-generator-cli.sh > /usr/local/bin/openapi-generator-cli && \ - chmod +x /usr/local/bin/openapi-generator-cli && \ - echo "Latest OpenAPI Generator version (currently pinned to 6.6.0):" && \ - openapi-generator-cli version +COPY --from=openapitools /opt/openapi-generator/modules/openapi-generator-cli/target/openapi-generator-cli.jar /usr/local/bin/openapi-generator-cli.jar COPY docker/wait-for-it.sh \ docker/entrypoint-integration-tests.sh \ diff --git a/docker/entrypoint-integration-tests.sh b/docker/entrypoint-integration-tests.sh index 168e2c6f2d..3da46f9bc6 100755 --- a/docker/entrypoint-integration-tests.sh +++ b/docker/entrypoint-integration-tests.sh @@ -44,7 +44,7 @@ if [[ ! -z "$DD_INTEGRATION_TEST_FILENAME" ]]; then if [[ "$DD_INTEGRATION_TEST_FILENAME" == "openapi-validatator" ]]; then test="OpenAPI schema validation" echo "Running: $test" - if OPENAPI_GENERATOR_VERSION=6.6.0 openapi-generator-cli validate -i "$DD_BASE_URL/api/v2/oa3/schema/?format=json" --recommend; then + if java -jar /usr/local/bin/openapi-generator-cli.jar validate -i "$DD_BASE_URL/api/v2/oa3/schema/?format=json" --recommend; then success $test else fail fail $test @@ -291,7 +291,7 @@ else test="OpenAPI schema validation" echo "Running: $test" - if OPENAPI_GENERATOR_VERSION=6.6.0 openapi-generator-cli validate -i "$DD_BASE_URL/api/v2/oa3/schema/?format=json" --recommend; then + if java -jar /usr/local/bin/openapi-generator-cli.jar validate -i "$DD_BASE_URL/api/v2/oa3/schema/?format=json" --recommend; then success $test else fail fail $test From b0f512281fa0a8254ec62b3c61d32b34eac47c5f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 31 Jul 2023 14:19:35 -0500 Subject: [PATCH 74/85] Bump python from `9efc6e1` to `9efc6e1` (#8444) Bumps python from `9efc6e1` to `9efc6e1`. --- updated-dependencies: - dependency-name: python dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Dockerfile.django-debian | 2 +- Dockerfile.integration-tests-debian | 2 +- Dockerfile.nginx-debian | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Dockerfile.django-debian b/Dockerfile.django-debian index 5228d3d79f..4a5bb5d667 100644 --- a/Dockerfile.django-debian +++ b/Dockerfile.django-debian @@ -5,7 +5,7 @@ # Dockerfile.nginx to use the caching mechanism of Docker. # Ref: https://devguide.python.org/#branchstatus -FROM python:3.11.4-slim-bullseye@sha256:9b4d90af2003eef5d862f8118d8645d37d170402645a09e48241a3e492a0d4dc as base +FROM python:3.11.4-slim-bullseye@sha256:52c7a54aa5e5068ce76edaf3f8652a64fb99e378fb89fb0bfbe21a8756d0013c as base FROM base as build WORKDIR /app RUN \ diff --git a/Dockerfile.integration-tests-debian b/Dockerfile.integration-tests-debian index 9f613ba366..a40f146bb7 100644 --- a/Dockerfile.integration-tests-debian +++ b/Dockerfile.integration-tests-debian @@ -2,7 +2,7 @@ # code: language=Dockerfile FROM openapitools/openapi-generator-cli:v6.6.0@sha256:54381220aecf2e77bb4b6694c4e1a03e733b49453292cd1af6f48b510f1f008a as openapitools -FROM python:3.11.4-slim-bullseye@sha256:9b4d90af2003eef5d862f8118d8645d37d170402645a09e48241a3e492a0d4dc as build +FROM python:3.11.4-slim-bullseye@sha256:52c7a54aa5e5068ce76edaf3f8652a64fb99e378fb89fb0bfbe21a8756d0013c as build WORKDIR /app RUN \ apt-get -y update && \ diff --git a/Dockerfile.nginx-debian b/Dockerfile.nginx-debian index 4a8314c604..65da07c805 100644 --- a/Dockerfile.nginx-debian +++ b/Dockerfile.nginx-debian @@ -5,7 +5,7 @@ # Dockerfile.django-debian to use the caching mechanism of Docker. # Ref: https://devguide.python.org/#branchstatus -FROM python:3.11.4-slim-bullseye@sha256:9b4d90af2003eef5d862f8118d8645d37d170402645a09e48241a3e492a0d4dc as base +FROM python:3.11.4-slim-bullseye@sha256:52c7a54aa5e5068ce76edaf3f8652a64fb99e378fb89fb0bfbe21a8756d0013c as base FROM base as build WORKDIR /app RUN \ From fc1844c101cf32ee8fd88c60e23692e953f3e77a Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Tue, 1 Aug 2023 08:18:17 -0500 Subject: [PATCH 75/85] Update rabbitmq:3.12.2-alpine Docker digest from 3.12.2 to 3.12.2-alpine (docker-compose.yml) (#8451) Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> --- docker-compose.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker-compose.yml b/docker-compose.yml index 694b2deb14..df4f0e6a86 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -154,7 +154,7 @@ services: volumes: - defectdojo_postgres:/var/lib/postgresql/data rabbitmq: - image: rabbitmq:3.12.2-alpine@sha256:6c0d0405858c736586c171ce1538acdbe78430c8e0405a0fb1b3b05c193e8899 + image: rabbitmq:3.12.2-alpine@sha256:28580a859510121cf05d8865bf6a5d6c0b205e096ee55d85bfb7de7cf631a384 profiles: - mysql-rabbitmq - postgres-rabbitmq From 6699dc98f939d244382afae80502079710e1afc1 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 1 Aug 2023 11:53:19 -0500 Subject: [PATCH 76/85] Bump boto3 from 1.28.15 to 1.28.16 (#8455) Bumps [boto3](https://github.com/boto/boto3) from 1.28.15 to 1.28.16. - [Release notes](https://github.com/boto/boto3/releases) - [Changelog](https://github.com/boto/boto3/blob/develop/CHANGELOG.rst) - [Commits](https://github.com/boto/boto3/compare/1.28.15...1.28.16) --- updated-dependencies: - dependency-name: boto3 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 8e17f9521d..cf9e81a96c 100644 --- a/requirements.txt +++ b/requirements.txt @@ -78,7 +78,7 @@ django-ratelimit==4.1.0 argon2-cffi==21.3.0 blackduck==1.1.0 pycurl==7.45.2 # Required for Celery Broker AWS (SQS) support -boto3==1.28.15 # Required for Celery Broker AWS (SQS) support +boto3==1.28.16 # Required for Celery Broker AWS (SQS) support netaddr==0.8.0 vulners==2.0.10 fontawesomefree==6.4.0 From e0bd79cf4ae2071d3067c19f68649e68e866d83a Mon Sep 17 00:00:00 2001 From: Cody Maffucci <46459665+Maffooch@users.noreply.github.com> Date: Tue, 1 Aug 2023 11:53:48 -0500 Subject: [PATCH 77/85] Replace "Nessus" with "Tenable" in Dedupe settings (#8449) --- dojo/settings/settings.dist.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/dojo/settings/settings.dist.py b/dojo/settings/settings.dist.py index 24064ba5be..17483fbda6 100644 --- a/dojo/settings/settings.dist.py +++ b/dojo/settings/settings.dist.py @@ -1194,7 +1194,7 @@ def saml2_attrib_map_format(dict): 'Dockle Scan': ['title', 'description', 'vuln_id_from_tool'], 'Dependency Track Finding Packaging Format (FPF) Export': ['component_name', 'component_version', 'vulnerability_ids'], 'Mobsfscan Scan': ['title', 'severity', 'cwe'], - 'Nessus Scan': ['title', 'severity', 'vulnerability_ids', 'cwe'], + 'Tenable Scan': ['title', 'severity', 'vulnerability_ids', 'cwe'], 'Nexpose Scan': ['title', 'severity', 'vulnerability_ids', 'cwe'], # possible improvement: in the scanner put the library name into file_path, then dedup on cwe + file_path + severity 'NPM Audit Scan': ['title', 'severity', 'file_path', 'vulnerability_ids', 'cwe'], @@ -1282,7 +1282,7 @@ def saml2_attrib_map_format(dict): 'SonarQube Scan': False, 'Dependency Check Scan': True, 'Mobsfscan Scan': False, - 'Nessus Scan': True, + 'Tenable Scan': True, 'Nexpose Scan': True, 'NPM Audit Scan': True, 'Yarn Audit Scan': True, @@ -1377,7 +1377,7 @@ def saml2_attrib_map_format(dict): 'SonarQube API Import': DEDUPE_ALGO_HASH_CODE, 'Dependency Check Scan': DEDUPE_ALGO_HASH_CODE, 'Dockle Scan': DEDUPE_ALGO_HASH_CODE, - 'Nessus Scan': DEDUPE_ALGO_HASH_CODE, + 'Tenable Scan': DEDUPE_ALGO_HASH_CODE, 'Nexpose Scan': DEDUPE_ALGO_HASH_CODE, 'NPM Audit Scan': DEDUPE_ALGO_HASH_CODE, 'Yarn Audit Scan': DEDUPE_ALGO_HASH_CODE, From 3ea64aa4273315c1cdd15c8f2c2c1e653b7e46f7 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Thu, 3 Aug 2023 12:10:38 -0500 Subject: [PATCH 78/85] Update rabbitmq:3.12.2-alpine Docker digest from 3.12.2 to 3.12.2-alpine (docker-compose.yml) (#8463) Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> --- docker-compose.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker-compose.yml b/docker-compose.yml index df4f0e6a86..462e4ff508 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -154,7 +154,7 @@ services: volumes: - defectdojo_postgres:/var/lib/postgresql/data rabbitmq: - image: rabbitmq:3.12.2-alpine@sha256:28580a859510121cf05d8865bf6a5d6c0b205e096ee55d85bfb7de7cf631a384 + image: rabbitmq:3.12.2-alpine@sha256:e6ed332cd6598a62d119b5e37e5a665ddb082586e3de5eef626a68a392d924ab profiles: - mysql-rabbitmq - postgres-rabbitmq From 506b81f046023eb6709e94a3a0055930dae594a3 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 3 Aug 2023 12:11:07 -0500 Subject: [PATCH 79/85] Bump fontawesomefree from 6.4.0 to 6.4.2 (#8467) Bumps [fontawesomefree](https://github.com/FortAwesome/Font-Awesome) from 6.4.0 to 6.4.2. - [Release notes](https://github.com/FortAwesome/Font-Awesome/releases) - [Changelog](https://github.com/FortAwesome/Font-Awesome/blob/6.x/CHANGELOG.md) - [Commits](https://github.com/FortAwesome/Font-Awesome/compare/6.4.0...6.4.2) --- updated-dependencies: - dependency-name: fontawesomefree dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index cf9e81a96c..d73e4a068c 100644 --- a/requirements.txt +++ b/requirements.txt @@ -81,4 +81,4 @@ pycurl==7.45.2 # Required for Celery Broker AWS (SQS) support boto3==1.28.16 # Required for Celery Broker AWS (SQS) support netaddr==0.8.0 vulners==2.0.10 -fontawesomefree==6.4.0 +fontawesomefree==6.4.2 From 64d9ea3413df2f9f1a7caa8bf5fee2c7244c2797 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 3 Aug 2023 12:12:07 -0500 Subject: [PATCH 80/85] Bump debugpy from 1.6.7 to 1.6.8 (#8466) Bumps [debugpy](https://github.com/microsoft/debugpy) from 1.6.7 to 1.6.8. - [Release notes](https://github.com/microsoft/debugpy/releases) - [Commits](https://github.com/microsoft/debugpy/compare/v1.6.7...v1.6.8) --- updated-dependencies: - dependency-name: debugpy dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index d73e4a068c..24ce05d821 100644 --- a/requirements.txt +++ b/requirements.txt @@ -54,7 +54,7 @@ social-auth-app-django==5.2.0 social-auth-core==4.4.2 Python-jose==3.3.0 gitpython==3.1.32 -debugpy==1.6.7 +debugpy==1.6.8 python-gitlab==3.15.0 drf_yasg==1.21.5 cpe==1.2.1 From e6ab545696c22c260e0e4bb0f74f2e298e96130b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 3 Aug 2023 13:29:50 -0500 Subject: [PATCH 81/85] Bump boto3 from 1.28.16 to 1.28.18 (#8465) Bumps [boto3](https://github.com/boto/boto3) from 1.28.16 to 1.28.18. - [Release notes](https://github.com/boto/boto3/releases) - [Changelog](https://github.com/boto/boto3/blob/develop/CHANGELOG.rst) - [Commits](https://github.com/boto/boto3/compare/1.28.16...1.28.18) --- updated-dependencies: - dependency-name: boto3 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 24ce05d821..cd289be755 100644 --- a/requirements.txt +++ b/requirements.txt @@ -78,7 +78,7 @@ django-ratelimit==4.1.0 argon2-cffi==21.3.0 blackduck==1.1.0 pycurl==7.45.2 # Required for Celery Broker AWS (SQS) support -boto3==1.28.16 # Required for Celery Broker AWS (SQS) support +boto3==1.28.18 # Required for Celery Broker AWS (SQS) support netaddr==0.8.0 vulners==2.0.10 fontawesomefree==6.4.2 From c5ec8fe46166fae7cac13a12b1d4a9ed99adbcef Mon Sep 17 00:00:00 2001 From: Cody Maffucci <46459665+Maffooch@users.noreply.github.com> Date: Mon, 7 Aug 2023 10:26:46 -0500 Subject: [PATCH 82/85] Revert "Bump debugpy from 1.6.7 to 1.6.8 (#8466)" (#8478) This reverts commit 64d9ea3413df2f9f1a7caa8bf5fee2c7244c2797. --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index cd289be755..acf1cd6789 100644 --- a/requirements.txt +++ b/requirements.txt @@ -54,7 +54,7 @@ social-auth-app-django==5.2.0 social-auth-core==4.4.2 Python-jose==3.3.0 gitpython==3.1.32 -debugpy==1.6.8 +debugpy==1.6.7 python-gitlab==3.15.0 drf_yasg==1.21.5 cpe==1.2.1 From 66786c9008a7a1110cc0a4f89dd8b916138a5078 Mon Sep 17 00:00:00 2001 From: Charles Neill <1749665+cneill@users.noreply.github.com> Date: Mon, 7 Aug 2023 14:34:58 -0500 Subject: [PATCH 83/85] fixing some typos (#8472) --- helm/defectdojo/templates/secret-postgresql-ha.yaml | 4 ++-- unittests/tools/test_arachni_parser.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/helm/defectdojo/templates/secret-postgresql-ha.yaml b/helm/defectdojo/templates/secret-postgresql-ha.yaml index 142b1cfef5..8e884fa048 100644 --- a/helm/defectdojo/templates/secret-postgresql-ha.yaml +++ b/helm/defectdojo/templates/secret-postgresql-ha.yaml @@ -22,8 +22,8 @@ data: postgresql-password: {{ $postgresRandomPassword }} postgresql-postgres-password: {{ $postgresRandomPassword }} {{- end }} -{{- if .Values.postgresqlha.postgresql.repmgrpassword }} - repmgr-password: {{ .Values.postgresqlha.postgresql.repmgrpassword | b64enc | quote }} +{{- if .Values.postgresqlha.postgresql.repmgrPassword }} + repmgr-password: {{ .Values.postgresqlha.postgresql.repmgrPassword | b64enc | quote }} {{- else }} {{- $repmgrRandomPassword := randAlphaNum 16 | b64enc | quote }} repmgr-password: {{ $repmgrRandomPassword }} diff --git a/unittests/tools/test_arachni_parser.py b/unittests/tools/test_arachni_parser.py index f9eb448ad9..ea647fff38 100644 --- a/unittests/tools/test_arachni_parser.py +++ b/unittests/tools/test_arachni_parser.py @@ -4,7 +4,7 @@ from dojo.models import Test -class TestAquaParser(DojoTestCase): +class TestArachniParser(DojoTestCase): def test_parser_has_one_finding(self): with open("unittests/scans/arachni/arachni.afr.json") as testfile: From 0f91e5b26ee8d4d3317c4ba2f51d977cc4afd0c5 Mon Sep 17 00:00:00 2001 From: Cody Maffucci <46459665+Maffooch@users.noreply.github.com> Date: Mon, 7 Aug 2023 14:42:43 -0500 Subject: [PATCH 84/85] Deprecation: OpenAPI 2.0 Documentation page (#8473) * Deprecation: OpenAPI 2.0 Documentation page * Update base.html --- docs/content/en/getting_started/upgrading.md | 13 +++++++++++++ dojo/templates/base.html | 2 +- dojo/urls.py | 2 +- 3 files changed, 15 insertions(+), 2 deletions(-) diff --git a/docs/content/en/getting_started/upgrading.md b/docs/content/en/getting_started/upgrading.md index 33872b487c..445d3c2ff6 100644 --- a/docs/content/en/getting_started/upgrading.md +++ b/docs/content/en/getting_started/upgrading.md @@ -72,6 +72,19 @@ godojo installations If you have installed DefectDojo on "iron" and wish to upgrade the installation, please see the [instructions in the repo](https://github.com/DefectDojo/godojo/blob/master/docs-and-scripts/upgrading.md). +## Upgrading to DefectDojo Version 2.25.x. + +There are no special instruction for upgrading to 2.25.0. Check the [Release Notes](https://github.com/DefectDojo/django-DefectDojo/releases/tag/2.25.0) for the contents of the release. + +**Deprecation** + +The OpenAPI 2.0 Swagger API documentation is being deprecated in favor of the existing +OpenAPI 3.0 API documentation page. The OpenAPI 2.0 Swagger API documentation page is +slated for removal in version 2.30.0 + +*Note*: The API has not changed in any way and behaves the same between OAPI2 and OAPI3 + + ## Upgrading to DefectDojo Version 2.24.x. There are no special instruction for upgrading to 2.24.0. Check the [Release Notes](https://github.com/DefectDojo/django-DefectDojo/releases/tag/2.24.0) for the contents of the release. diff --git a/dojo/templates/base.html b/dojo/templates/base.html index 3d5eca04df..7157a73896 100644 --- a/dojo/templates/base.html +++ b/dojo/templates/base.html @@ -173,7 +173,7 @@
  • - {% trans "API v2 OpenAPI2 Docs" %} + {% trans "API v2 OpenAPI2 Docs (Deprecated)" %}
  • diff --git a/dojo/urls.py b/dojo/urls.py index 4fcd0473b3..8f52ece4bd 100755 --- a/dojo/urls.py +++ b/dojo/urls.py @@ -184,7 +184,7 @@ openapi.Info( title="Defect Dojo API", default_version='v2', - description="To use the API you need be authorized.", + description="To use the API you need be authorized.\n\n## Deprecated - Removal in v2.30.0\n#### Please use the [OpenAPI3 version](/api/v2/oa3/swagger-ui/)", ), # if public=False, includes only endpoints the current user has access to public=True, From 67e03854147b0f9e01523742a315c378b8247212 Mon Sep 17 00:00:00 2001 From: DefectDojo release bot Date: Mon, 7 Aug 2023 20:25:22 +0000 Subject: [PATCH 85/85] Update versions in application files --- components/package.json | 2 +- dojo/__init__.py | 2 +- helm/defectdojo/Chart.yaml | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/components/package.json b/components/package.json index 4e8aea8365..9d9a76aa27 100644 --- a/components/package.json +++ b/components/package.json @@ -1,6 +1,6 @@ { "name": "defectdojo", - "version": "2.25.0-dev", + "version": "2.25.0", "license" : "BSD-3-Clause", "private": true, "dependencies": { diff --git a/dojo/__init__.py b/dojo/__init__.py index 4c1f6f5856..784d73914c 100644 --- a/dojo/__init__.py +++ b/dojo/__init__.py @@ -4,6 +4,6 @@ # Django starts so that shared_task will use this app. from .celery import app as celery_app # noqa -__version__ = '2.25.0-dev' +__version__ = '2.25.0' __url__ = 'https://github.com/DefectDojo/django-DefectDojo' __docs__ = 'https://documentation.defectdojo.com' diff --git a/helm/defectdojo/Chart.yaml b/helm/defectdojo/Chart.yaml index 4f16ff9c2f..42e478b86c 100644 --- a/helm/defectdojo/Chart.yaml +++ b/helm/defectdojo/Chart.yaml @@ -1,8 +1,8 @@ apiVersion: v2 -appVersion: "2.25.0-dev" +appVersion: "2.25.0" description: A Helm chart for Kubernetes to install DefectDojo name: defectdojo -version: 1.6.79-dev +version: 1.6.79 icon: https://www.defectdojo.org/img/favicon.ico maintainers: - name: madchap