-
Notifications
You must be signed in to change notification settings - Fork 1.6k
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Merge pull request #10885 from DefectDojo/release/2.38.1
Release: Merge release into master from: release/2.38.1
- Loading branch information
Showing
34 changed files
with
5,117 additions
and
15 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,9 @@ | ||
--- | ||
title: "Legitify" | ||
toc_hide: true | ||
--- | ||
### File Types | ||
This DefectDojo parser accepts JSON files (in flattened format) from Legitify. For further details regarding the results, please consult the relevant [documentation](https://github.com/Legit-Labs/legitify?tab=readme-ov-file#output-options). | ||
|
||
### Sample Scan Data | ||
Sample scan data for testing purposes can be found [here](https://github.com/DefectDojo/django-DefectDojo/tree/master/unittests/scans/legitify). |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,9 @@ | ||
--- | ||
title: "Threat Composer" | ||
toc_hide: true | ||
--- | ||
### File Types | ||
This DefectDojo parser accepts JSON files from Threat Composer. The tool supports the [export](https://github.com/awslabs/threat-composer/tree/main?#features) of JSON report out of the browser local storage to a local file. | ||
|
||
### Sample Scan Data | ||
Sample scan data for testing purposes can be found [here](https://github.com/DefectDojo/django-DefectDojo/tree/master/unittests/scans/threat_composer). |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
18 changes: 18 additions & 0 deletions
18
dojo/db_migrations/0214_test_type_dynamically_generated.py
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,18 @@ | ||
# Generated by Django 5.0.8 on 2024-09-04 19:23 | ||
|
||
from django.db import migrations, models | ||
|
||
|
||
class Migration(migrations.Migration): | ||
|
||
dependencies = [ | ||
('dojo', '0213_system_settings_enable_ui_table_based_searching'), | ||
] | ||
|
||
operations = [ | ||
migrations.AddField( | ||
model_name='test_type', | ||
name='dynamically_generated', | ||
field=models.BooleanField(default=False, help_text='Set to True for test types that are created at import time'), | ||
), | ||
] |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1 +1 @@ | ||
38096a82c7cdeec6ca9c663c1ec3d6a5692a0e7bbfdea8fd2f05c58f753430d4 | ||
5adedc433a342d675492b86dc18786f72e167115f9718a397dc9b91c5fdc9c94 |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Empty file.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,69 @@ | ||
import json | ||
|
||
from dojo.models import Endpoint, Finding | ||
|
||
|
||
class LegitifyParser: | ||
|
||
def get_scan_types(self): | ||
return ["Legitify Scan"] | ||
|
||
def get_label_for_scan_types(self, scan_type): | ||
return scan_type # no custom label for now | ||
|
||
def get_description_for_scan_types(self, scan_type): | ||
return "Legitify output file can be imported in JSON format." | ||
|
||
def severity_mapper(self, severity): | ||
mapping = { | ||
"LOW": "Low", | ||
"MEDIUM": "Medium", | ||
"HIGH": "High", | ||
"CRITICAL": "Critical", | ||
} | ||
return mapping.get(severity, "Low") | ||
|
||
def parse_json(self, file): | ||
try: | ||
data = file.read() | ||
try: | ||
tree = json.loads(str(data, "utf-8")) | ||
except Exception: | ||
tree = json.loads(data) | ||
except Exception: | ||
msg = "Invalid format" | ||
raise ValueError(msg) | ||
return tree | ||
|
||
def get_findings(self, file, test): | ||
report_tree = self.parse_json(file) | ||
|
||
findings = [] | ||
for content_key, content_value in report_tree.get("content", {}).items(): | ||
policy_info = content_value.get("policyInfo", {}) | ||
is_finding = False | ||
endpoints = set() | ||
references = set() | ||
for violation in content_value.get("violations", []): | ||
if violation.get("status", None) == "FAILED": | ||
is_finding = True | ||
url = violation.get("canonicalLink", None) | ||
if url: | ||
references.add(url) | ||
endpoints.add(Endpoint.from_uri(url)) | ||
|
||
if is_finding: | ||
finding = Finding( | ||
description=policy_info.get("description", ""), | ||
dynamic_finding=False, | ||
impact="\n".join(policy_info.get("threat", [])), | ||
mitigation="\n".join(policy_info.get("remediationSteps", [])), | ||
references="\n".join(references), | ||
severity=self.severity_mapper(policy_info.get("severity", "LOW")), | ||
static_finding=True, | ||
title=f'{policy_info.get("namespace", "").capitalize()} | {policy_info.get("title", "")}', | ||
vuln_id_from_tool=policy_info.get("policyName", None), | ||
) | ||
finding.unsaved_endpoints = list(endpoints) | ||
findings.append(finding) | ||
return findings |
Empty file.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,152 @@ | ||
import json | ||
from collections import defaultdict | ||
from os import linesep | ||
|
||
from dojo.models import Finding | ||
|
||
|
||
class ThreatComposerParser: | ||
""" | ||
Threat Composer JSON can be imported. See here for more info on this JSON format. | ||
""" | ||
|
||
PRIORITY_VALUES = ["Low", "Medium", "High"] | ||
STRIDE_VALUES = { | ||
"S": "Spoofing", | ||
"T": "Tampering", | ||
"R": "Repudiation", | ||
"I": "Information Disclosure", | ||
"D": "Denial of Service", | ||
"E": "Elevation of Privilege", | ||
} | ||
|
||
def get_scan_types(self): | ||
return ["ThreatComposer Scan"] | ||
|
||
def get_label_for_scan_types(self, scan_type): | ||
return "ThreatComposer Scan" | ||
|
||
def get_description_for_scan_types(self, scan_type): | ||
return "ThreatComposer report file can be imported in JSON format." | ||
|
||
def get_findings(self, file, test): | ||
data = json.load(file) | ||
findings = [] | ||
|
||
if "threats" not in data: | ||
msg = "Invalid ThreatComposer data" | ||
raise ValueError(msg) | ||
|
||
if "assumptionLinks" in data: | ||
assumptions = {assumption["id"]: assumption for assumption in data["assumptions"]} | ||
assumption_mitigation_links = defaultdict(list) | ||
assumption_threat_links = defaultdict(list) | ||
for link in data["assumptionLinks"]: | ||
linked_id = link["linkedId"] | ||
assumption_id = link["assumptionId"] | ||
assumption_type = link["type"] | ||
if assumption_id in assumptions: | ||
if assumption_type == "Threat": | ||
assumption_threat_links[linked_id].append(assumptions[assumption_id]) | ||
elif assumption_type == "Mitigation": | ||
assumption_mitigation_links[linked_id].append(assumptions[assumption_id]) | ||
|
||
if "mitigationLinks" in data: | ||
mitigations = { | ||
mitigation["id"]: { | ||
"mitigation": mitigation, | ||
"assumptions": assumption_mitigation_links[mitigation["id"]], | ||
} | ||
for mitigation in data["mitigations"] | ||
} | ||
mitigation_links = defaultdict(list) | ||
for link in data["mitigationLinks"]: | ||
linked_id = link["linkedId"] | ||
mitigation_id = link["mitigationId"] | ||
if mitigation_id in mitigations: | ||
mitigation_links[linked_id].append(mitigations[mitigation_id]) | ||
|
||
for threat in data["threats"]: | ||
|
||
if "threatAction" in threat: | ||
title = threat["threatAction"] | ||
severity, impact, comments = self.parse_threat_metadata(threat["metadata"]) | ||
description = self.to_description_text(threat, comments, assumption_threat_links[threat["id"]]) | ||
mitigation = self.to_mitigation_text(mitigation_links[threat["id"]]) | ||
unique_id_from_tool = threat["id"] | ||
vuln_id_from_tool = threat["numericId"] | ||
tags = threat["tags"] if "tags" in threat else [] | ||
|
||
finding = Finding( | ||
title=title, | ||
description=description, | ||
severity=severity, | ||
vuln_id_from_tool=vuln_id_from_tool, | ||
unique_id_from_tool=unique_id_from_tool, | ||
mitigation=mitigation, | ||
impact=impact, | ||
tags=tags, | ||
static_finding=True, | ||
dynamic_finding=False, | ||
) | ||
|
||
match threat.get("status", "threatIdentified"): | ||
case "threatResolved": | ||
finding.active = False | ||
finding.is_mitigated = True | ||
finding.false_p = False | ||
case "threatResolvedNotUseful": | ||
finding.active = False | ||
finding.is_mitigated = True | ||
finding.false_p = True | ||
|
||
findings.append(finding) | ||
|
||
return findings | ||
|
||
def to_mitigation_text(self, mitigations): | ||
text = "" | ||
for i, current in enumerate(mitigations): | ||
mitigation = current["mitigation"] | ||
assumption_links = current["assumptions"] | ||
counti = i + 1 | ||
text += f"**Mitigation {counti} (ID: {mitigation['numericId']}, Status: {mitigation.get('status', 'Not defined')})**: {mitigation['content']}" | ||
|
||
for item in mitigation["metadata"]: | ||
if item["key"] == "Comments": | ||
text += f"\n*Comments*: {item['value'].replace(linesep, ' ')} " | ||
break | ||
|
||
for j, assumption in enumerate(assumption_links): | ||
countj = j + 1 | ||
text += f"\n- *Assumption {countj} (ID: {assumption['numericId']})*: {assumption['content'].replace(linesep, ' ')}" | ||
|
||
text += "\n" | ||
|
||
return text | ||
|
||
def parse_threat_metadata(self, metadata): | ||
severity = "Info" | ||
impact = None | ||
comments = None | ||
|
||
for item in metadata: | ||
if item["key"] == "Priority" and item["value"] in self.PRIORITY_VALUES: | ||
severity = item["value"] | ||
elif item["key"] == "STRIDE" and all(element in self.STRIDE_VALUES for element in item["value"]): | ||
impact = ", ".join([self.STRIDE_VALUES[element] for element in item["value"]]) | ||
elif item["key"] == "Comments": | ||
comments = item["value"] | ||
|
||
return severity, impact, comments | ||
|
||
def to_description_text(self, threat, comments, assumption_links): | ||
text = f"**Threat**: {threat['statement']}" | ||
if comments: | ||
text += f"\n*Comments*: {comments}" | ||
|
||
for i, assumption in enumerate(assumption_links): | ||
counti = i + 1 | ||
text += f"\n- *Assumption {counti} (ID: {assumption['numericId']})*: {assumption['content'].replace(linesep, ' ')}" | ||
|
||
return text |
Oops, something went wrong.