Skip to content

Commit

Permalink
Merge pull request #10885 from DefectDojo/release/2.38.1
Browse files Browse the repository at this point in the history
Release: Merge release into master from: release/2.38.1
  • Loading branch information
Maffooch authored Sep 9, 2024
2 parents 1386330 + ed08101 commit 7533448
Show file tree
Hide file tree
Showing 34 changed files with 5,117 additions and 15 deletions.
2 changes: 1 addition & 1 deletion components/package.json
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
{
"name": "defectdojo",
"version": "2.38.0",
"version": "2.38.1",
"license" : "BSD-3-Clause",
"private": true,
"dependencies": {
Expand Down
9 changes: 9 additions & 0 deletions docs/content/en/integrations/parsers/file/legitify.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
---
title: "Legitify"
toc_hide: true
---
### File Types
This DefectDojo parser accepts JSON files (in flattened format) from Legitify. For further details regarding the results, please consult the relevant [documentation](https://github.com/Legit-Labs/legitify?tab=readme-ov-file#output-options).

### Sample Scan Data
Sample scan data for testing purposes can be found [here](https://github.com/DefectDojo/django-DefectDojo/tree/master/unittests/scans/legitify).
9 changes: 9 additions & 0 deletions docs/content/en/integrations/parsers/file/threat_composer.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
---
title: "Threat Composer"
toc_hide: true
---
### File Types
This DefectDojo parser accepts JSON files from Threat Composer. The tool supports the [export](https://github.com/awslabs/threat-composer/tree/main?#features) of JSON report out of the browser local storage to a local file.

### Sample Scan Data
Sample scan data for testing purposes can be found [here](https://github.com/DefectDojo/django-DefectDojo/tree/master/unittests/scans/threat_composer).
2 changes: 1 addition & 1 deletion dojo/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,6 @@
# Django starts so that shared_task will use this app.
from .celery import app as celery_app # noqa: F401

__version__ = "2.38.0"
__version__ = "2.38.1"
__url__ = "https://github.com/DefectDojo/django-DefectDojo"
__docs__ = "https://documentation.defectdojo.com"
2 changes: 1 addition & 1 deletion dojo/api_v2/serializers.py
Original file line number Diff line number Diff line change
Expand Up @@ -1411,7 +1411,7 @@ class TestTypeSerializer(TaggitSerializer, serializers.ModelSerializer):

class Meta:
model = Test_Type
fields = "__all__"
exclude = ("dynamically_generated",)


class TestToNotesSerializer(serializers.Serializer):
Expand Down
18 changes: 18 additions & 0 deletions dojo/db_migrations/0214_test_type_dynamically_generated.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
# Generated by Django 5.0.8 on 2024-09-04 19:23

from django.db import migrations, models


class Migration(migrations.Migration):

dependencies = [
('dojo', '0213_system_settings_enable_ui_table_based_searching'),
]

operations = [
migrations.AddField(
model_name='test_type',
name='dynamically_generated',
field=models.BooleanField(default=False, help_text='Set to True for test types that are created at import time'),
),
]
4 changes: 3 additions & 1 deletion dojo/forms.py
Original file line number Diff line number Diff line change
Expand Up @@ -285,7 +285,7 @@ def __init__(self, *args, **kwargs):
class Test_TypeForm(forms.ModelForm):
class Meta:
model = Test_Type
exclude = [""]
exclude = ["dynamically_generated"]


class Development_EnvironmentForm(forms.ModelForm):
Expand Down Expand Up @@ -321,6 +321,8 @@ class ProductForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields["prod_type"].queryset = get_authorized_product_types(Permissions.Product_Type_Add_Product)
if prod_type_id := getattr(kwargs.get("instance", Product()), "prod_type_id"): # we are editing existing instance
self.fields["prod_type"].queryset |= Product_Type.objects.filter(pk=prod_type_id) # even if user does not have permission for any other ProdType we need to add at least assign ProdType to make form submittable (otherwise empty list was here which generated invalid form)

# if this product has findings being asynchronously updated, disable the sla config field
if self.instance.async_updating:
Expand Down
2 changes: 2 additions & 0 deletions dojo/importers/base_importer.py
Original file line number Diff line number Diff line change
Expand Up @@ -491,6 +491,8 @@ def get_or_create_test_type(
test_type, created = Test_Type.objects.get_or_create(name=test_type_name)
if created:
logger.info(f"Created new Test_Type with name {test_type.name} because a report is being imported")
test_type.dynamically_generated = True
test_type.save()
return test_type

def verify_tool_configuration_from_test(self):
Expand Down
2 changes: 2 additions & 0 deletions dojo/importers/default_reimporter.py
Original file line number Diff line number Diff line change
Expand Up @@ -704,6 +704,8 @@ def finding_post_processing(
finding.unsaved_files = finding_from_report.unsaved_files
self.process_files(finding)
# Process vulnerability IDs
if finding_from_report.unsaved_vulnerability_ids:
finding.unsaved_vulnerability_ids = finding_from_report.unsaved_vulnerability_ids
finding = self.process_vulnerability_ids(finding)

return finding
Expand Down
3 changes: 3 additions & 0 deletions dojo/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -817,6 +817,9 @@ class Test_Type(models.Model):
static_tool = models.BooleanField(default=False)
dynamic_tool = models.BooleanField(default=False)
active = models.BooleanField(default=True)
dynamically_generated = models.BooleanField(
default=False,
help_text=_("Set to True for test types that are created at import time"))

class Meta:
ordering = ("name",)
Expand Down
2 changes: 1 addition & 1 deletion dojo/settings/.settings.dist.py.sha256sum
Original file line number Diff line number Diff line change
@@ -1 +1 @@
38096a82c7cdeec6ca9c663c1ec3d6a5692a0e7bbfdea8fd2f05c58f753430d4
5adedc433a342d675492b86dc18786f72e167115f9718a397dc9b91c5fdc9c94
11 changes: 6 additions & 5 deletions dojo/settings/settings.dist.py
Original file line number Diff line number Diff line change
Expand Up @@ -1277,6 +1277,8 @@ def saml2_attrib_map_format(dict):
"Kiuwan SCA Scan": ["description", "severity", "component_name", "component_version", "cwe"],
"Rapplex Scan": ["title", "endpoints", "severity"],
"AppCheck Web Application Scanner": ["title", "severity"],
"Legitify Scan": ["title", "endpoints", "severity"],
"ThreatComposer Scan": ["title", "description"],
}

# Override the hardcoded settings here via the env var
Expand Down Expand Up @@ -1499,6 +1501,8 @@ def saml2_attrib_map_format(dict):
"Kiuwan SCA Scan": DEDUPE_ALGO_HASH_CODE,
"Rapplex Scan": DEDUPE_ALGO_HASH_CODE,
"AppCheck Web Application Scanner": DEDUPE_ALGO_HASH_CODE,
"Legitify Scan": DEDUPE_ALGO_HASH_CODE,
"ThreatComposer Scan": DEDUPE_ALGO_UNIQUE_ID_FROM_TOOL_OR_HASH_CODE,
}

# Override the hardcoded settings here via the env var
Expand Down Expand Up @@ -1532,11 +1536,8 @@ def saml2_attrib_map_format(dict):
)

if env("DD_JIRA_EXTRA_ISSUE_TYPES") != "":
if env("DD_JIRA_EXTRA_ISSUE_TYPES").count(",") > 0:
for extra_type in env("DD_JIRA_EXTRA_ISSUE_TYPES").split(","):
JIRA_ISSUE_TYPE_CHOICES_CONFIG += (extra_type, extra_type)
else:
JIRA_ISSUE_TYPE_CHOICES_CONFIG += (env("DD_JIRA_EXTRA_ISSUE_TYPES"), env("DD_JIRA_EXTRA_ISSUE_TYPES"))
for extra_type in env("DD_JIRA_EXTRA_ISSUE_TYPES").split(","):
JIRA_ISSUE_TYPE_CHOICES_CONFIG += ((extra_type, extra_type),)

JIRA_SSL_VERIFY = env("DD_JIRA_SSL_VERIFY")

Expand Down
2 changes: 1 addition & 1 deletion dojo/templates/dojo/view_product_details.html
Original file line number Diff line number Diff line change
Expand Up @@ -461,7 +461,7 @@ <h3 class="panel-title"><span class="fa-solid fa-circle-info fa-fw" aria-hidden=
</tr>
<tr>
<td><strong>{% trans "Product Type" %}</strong></td>
<td>{{ prod.prod_type|notspecified }}</td>
<td><a title="{% trans "Product Type" %}" href="{% url 'view_product_type' prod.prod_type.id %}">{{ prod.prod_type }}</a></td>
</tr>
<tr>
<td><strong>{% trans "Platform" %}</strong></td>
Expand Down
5 changes: 4 additions & 1 deletion dojo/tools/aqua/parser.py
Original file line number Diff line number Diff line change
Expand Up @@ -136,7 +136,10 @@ def get_item(resource, vuln, test):
)
if vulnerability_id != "No CVE":
finding.unsaved_vulnerability_ids = [vulnerability_id]

if vuln.get("epss_score"):
finding.epss_score = vuln.get("epss_score")
if vuln.get("epss_percentile"):
finding.epss_percentile = vuln.get("epss_percentile")
return finding


Expand Down
Empty file added dojo/tools/legitify/__init__.py
Empty file.
69 changes: 69 additions & 0 deletions dojo/tools/legitify/parser.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,69 @@
import json

from dojo.models import Endpoint, Finding


class LegitifyParser:

def get_scan_types(self):
return ["Legitify Scan"]

def get_label_for_scan_types(self, scan_type):
return scan_type # no custom label for now

def get_description_for_scan_types(self, scan_type):
return "Legitify output file can be imported in JSON format."

def severity_mapper(self, severity):
mapping = {
"LOW": "Low",
"MEDIUM": "Medium",
"HIGH": "High",
"CRITICAL": "Critical",
}
return mapping.get(severity, "Low")

def parse_json(self, file):
try:
data = file.read()
try:
tree = json.loads(str(data, "utf-8"))
except Exception:
tree = json.loads(data)
except Exception:
msg = "Invalid format"
raise ValueError(msg)
return tree

def get_findings(self, file, test):
report_tree = self.parse_json(file)

findings = []
for content_key, content_value in report_tree.get("content", {}).items():
policy_info = content_value.get("policyInfo", {})
is_finding = False
endpoints = set()
references = set()
for violation in content_value.get("violations", []):
if violation.get("status", None) == "FAILED":
is_finding = True
url = violation.get("canonicalLink", None)
if url:
references.add(url)
endpoints.add(Endpoint.from_uri(url))

if is_finding:
finding = Finding(
description=policy_info.get("description", ""),
dynamic_finding=False,
impact="\n".join(policy_info.get("threat", [])),
mitigation="\n".join(policy_info.get("remediationSteps", [])),
references="\n".join(references),
severity=self.severity_mapper(policy_info.get("severity", "LOW")),
static_finding=True,
title=f'{policy_info.get("namespace", "").capitalize()} | {policy_info.get("title", "")}',
vuln_id_from_tool=policy_info.get("policyName", None),
)
finding.unsaved_endpoints = list(endpoints)
findings.append(finding)
return findings
Empty file.
152 changes: 152 additions & 0 deletions dojo/tools/threat_composer/parser.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,152 @@
import json
from collections import defaultdict
from os import linesep

from dojo.models import Finding


class ThreatComposerParser:
"""
Threat Composer JSON can be imported. See here for more info on this JSON format.
"""

PRIORITY_VALUES = ["Low", "Medium", "High"]
STRIDE_VALUES = {
"S": "Spoofing",
"T": "Tampering",
"R": "Repudiation",
"I": "Information Disclosure",
"D": "Denial of Service",
"E": "Elevation of Privilege",
}

def get_scan_types(self):
return ["ThreatComposer Scan"]

def get_label_for_scan_types(self, scan_type):
return "ThreatComposer Scan"

def get_description_for_scan_types(self, scan_type):
return "ThreatComposer report file can be imported in JSON format."

def get_findings(self, file, test):
data = json.load(file)
findings = []

if "threats" not in data:
msg = "Invalid ThreatComposer data"
raise ValueError(msg)

if "assumptionLinks" in data:
assumptions = {assumption["id"]: assumption for assumption in data["assumptions"]}
assumption_mitigation_links = defaultdict(list)
assumption_threat_links = defaultdict(list)
for link in data["assumptionLinks"]:
linked_id = link["linkedId"]
assumption_id = link["assumptionId"]
assumption_type = link["type"]
if assumption_id in assumptions:
if assumption_type == "Threat":
assumption_threat_links[linked_id].append(assumptions[assumption_id])
elif assumption_type == "Mitigation":
assumption_mitigation_links[linked_id].append(assumptions[assumption_id])

if "mitigationLinks" in data:
mitigations = {
mitigation["id"]: {
"mitigation": mitigation,
"assumptions": assumption_mitigation_links[mitigation["id"]],
}
for mitigation in data["mitigations"]
}
mitigation_links = defaultdict(list)
for link in data["mitigationLinks"]:
linked_id = link["linkedId"]
mitigation_id = link["mitigationId"]
if mitigation_id in mitigations:
mitigation_links[linked_id].append(mitigations[mitigation_id])

for threat in data["threats"]:

if "threatAction" in threat:
title = threat["threatAction"]
severity, impact, comments = self.parse_threat_metadata(threat["metadata"])
description = self.to_description_text(threat, comments, assumption_threat_links[threat["id"]])
mitigation = self.to_mitigation_text(mitigation_links[threat["id"]])
unique_id_from_tool = threat["id"]
vuln_id_from_tool = threat["numericId"]
tags = threat["tags"] if "tags" in threat else []

finding = Finding(
title=title,
description=description,
severity=severity,
vuln_id_from_tool=vuln_id_from_tool,
unique_id_from_tool=unique_id_from_tool,
mitigation=mitigation,
impact=impact,
tags=tags,
static_finding=True,
dynamic_finding=False,
)

match threat.get("status", "threatIdentified"):
case "threatResolved":
finding.active = False
finding.is_mitigated = True
finding.false_p = False
case "threatResolvedNotUseful":
finding.active = False
finding.is_mitigated = True
finding.false_p = True

findings.append(finding)

return findings

def to_mitigation_text(self, mitigations):
text = ""
for i, current in enumerate(mitigations):
mitigation = current["mitigation"]
assumption_links = current["assumptions"]
counti = i + 1
text += f"**Mitigation {counti} (ID: {mitigation['numericId']}, Status: {mitigation.get('status', 'Not defined')})**: {mitigation['content']}"

for item in mitigation["metadata"]:
if item["key"] == "Comments":
text += f"\n*Comments*: {item['value'].replace(linesep, ' ')} "
break

for j, assumption in enumerate(assumption_links):
countj = j + 1
text += f"\n- *Assumption {countj} (ID: {assumption['numericId']})*: {assumption['content'].replace(linesep, ' ')}"

text += "\n"

return text

def parse_threat_metadata(self, metadata):
severity = "Info"
impact = None
comments = None

for item in metadata:
if item["key"] == "Priority" and item["value"] in self.PRIORITY_VALUES:
severity = item["value"]
elif item["key"] == "STRIDE" and all(element in self.STRIDE_VALUES for element in item["value"]):
impact = ", ".join([self.STRIDE_VALUES[element] for element in item["value"]])
elif item["key"] == "Comments":
comments = item["value"]

return severity, impact, comments

def to_description_text(self, threat, comments, assumption_links):
text = f"**Threat**: {threat['statement']}"
if comments:
text += f"\n*Comments*: {comments}"

for i, assumption in enumerate(assumption_links):
counti = i + 1
text += f"\n- *Assumption {counti} (ID: {assumption['numericId']})*: {assumption['content'].replace(linesep, ' ')}"

return text
Loading

0 comments on commit 7533448

Please sign in to comment.