Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

chore(ci_visibility): separate pytest outcome processing into its own function #10960

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
54 changes: 34 additions & 20 deletions ddtrace/contrib/pytest/_plugin_v2.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@
from ddtrace.ext import test
from ddtrace.ext.test_visibility import ITR_SKIPPING_LEVEL
from ddtrace.ext.test_visibility.api import TestExcInfo
from ddtrace.ext.test_visibility.api import TestStatus
from ddtrace.ext.test_visibility.api import disable_test_visibility
from ddtrace.ext.test_visibility.api import enable_test_visibility
from ddtrace.ext.test_visibility.api import is_test_visibility_enabled
Expand All @@ -54,6 +55,12 @@
_NODEID_REGEX = re.compile("^((?P<module>.*)/(?P<suite>[^/]*?))::(?P<name>.*?)$")


class _TestOutcome(t.NamedTuple):
status: t.Optional[TestStatus] = None
skip_reason: t.Optional[str] = None
exc_info: t.Optional[TestExcInfo] = None


def _handle_itr_should_skip(item, test_id) -> bool:
"""Checks whether a test should be skipped

Expand All @@ -71,11 +78,11 @@ def _handle_itr_should_skip(item, test_id) -> bool:
# Marking the test as forced run also applies to its hierarchy
InternalTest.mark_itr_forced_run(test_id)
return False
else:
InternalTest.mark_itr_skipped(test_id)
# Marking the test as skipped by ITR so that it appears in pytest's output
item.add_marker(pytest.mark.skip(reason=SKIPPED_BY_ITR_REASON)) # TODO don't rely on internal for reason
return True

InternalTest.mark_itr_skipped(test_id)
# Marking the test as skipped by ITR so that it appears in pytest's output
item.add_marker(pytest.mark.skip(reason=SKIPPED_BY_ITR_REASON)) # TODO don't rely on internal for reason
romainkomorndatadog marked this conversation as resolved.
Show resolved Hide resolved
return True

return False

Expand Down Expand Up @@ -311,7 +318,7 @@ def pytest_runtest_protocol(item, nextitem) -> None:
return


def _pytest_runtest_makereport(item, call, outcome):
def _process_outcome(item, call, outcome) -> _TestOutcome:
result = outcome.get_result()

test_id = _get_test_id_from_item(item)
Expand All @@ -322,7 +329,7 @@ def _pytest_runtest_makereport(item, call, outcome):
# - it was skipped by ITR
# - it was marked with skipif
if InternalTest.is_finished(test_id):
return
return _TestOutcome()

# In cases where a test was marked as XFAIL, the reason is only available during when call.when == "call", so we
# add it as a tag immediately:
Expand All @@ -339,7 +346,7 @@ def _pytest_runtest_makereport(item, call, outcome):
# DEV NOTE: some skip scenarios (eg: skipif) have an exception during setup

if call.when != "teardown" and not (has_exception or result.failed):
return
return _TestOutcome()

xfail = hasattr(result, "wasxfail") or "xfail" in result.keywords
xfail_reason_tag = InternalTest.get_tag(test_id, XFAIL_REASON) if xfail else None
Expand All @@ -349,20 +356,18 @@ def _pytest_runtest_makereport(item, call, outcome):
# that's why no XFAIL_REASON or test.RESULT tags will be added.
if result.skipped:
if InternalTest.was_skipped_by_itr(test_id):
# Items that were skipped by ITR already have their status set
return
# Items that were skipped by ITR already have their status and reason set
return _TestOutcome()

if xfail and not has_skip_keyword:
# XFail tests that fail are recorded skipped by pytest, should be passed instead
if not item.config.option.runxfail:
InternalTest.set_tag(test_id, test.RESULT, test.Status.XFAIL.value)
if xfail_reason_tag is None:
InternalTest.set_tag(test_id, XFAIL_REASON, getattr(result, "wasxfail", "XFail"))
InternalTest.mark_pass(test_id)
return
return _TestOutcome(TestStatus.PASS)

InternalTest.mark_skip(test_id, _extract_reason(call))
return
return _TestOutcome(TestStatus.SKIP, _extract_reason(call))

if result.passed:
if xfail and not has_skip_keyword and not item.config.option.runxfail:
Expand All @@ -371,24 +376,33 @@ def _pytest_runtest_makereport(item, call, outcome):
InternalTest.set_tag(test_id, XFAIL_REASON, "XFail")
InternalTest.set_tag(test_id, test.RESULT, test.Status.XPASS.value)

InternalTest.mark_pass(test_id)
return
return _TestOutcome(TestStatus.PASS)

if xfail and not has_skip_keyword and not item.config.option.runxfail:
# XPass (strict=True) are recorded failed by pytest, longrepr contains reason
if xfail_reason_tag is None:
InternalTest.set_tag(test_id, XFAIL_REASON, getattr(result, "longrepr", "XFail"))
InternalTest.set_tag(test_id, test.RESULT, test.Status.XPASS.value)
InternalTest.mark_fail(test_id)
return
return _TestOutcome(TestStatus.FAIL)

exc_info = TestExcInfo(call.excinfo.type, call.excinfo.value, call.excinfo.tb) if call.excinfo else None

InternalTest.mark_fail(test_id, exc_info)
return _TestOutcome(status=TestStatus.FAIL, exc_info=exc_info)


def _pytest_runtest_makereport(item: pytest.Item, call: pytest.CallInfo, outcome: pytest.TestReport) -> None:
test_id = _get_test_id_from_item(item)

test_outcome = _process_outcome(item, call, outcome)

if test_outcome.status is None and call.when != "teardown":
return

InternalTest.finish(test_id, test_outcome.status, test_outcome.skip_reason, test_outcome.exc_info)


@pytest.hookimpl(hookwrapper=True)
def pytest_runtest_makereport(item, call) -> None:
def pytest_runtest_makereport(item: pytest.Item, call: pytest.CallInfo) -> None:
"""Store outcome for tracing."""
outcome: pytest.TestReport
outcome = yield
Expand Down
5 changes: 3 additions & 2 deletions ddtrace/internal/ci_visibility/api/_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -126,13 +126,14 @@ def _telemetry_record_event_finished(self):

def finish_test(
self,
status: TestStatus,
status: Optional[TestStatus] = None,
reason: Optional[str] = None,
exc_info: Optional[TestExcInfo] = None,
override_finish_time: Optional[float] = None,
) -> None:
log.debug("Test Visibility: finishing %s, with status: %s, reason: %s", self, status, reason)
self.set_status(status)
if status is not None:
self.set_status(status)
if reason is not None:
self.set_tag(test.SKIP_REASON, reason)
if exc_info is not None:
Expand Down
4 changes: 2 additions & 2 deletions ddtrace/internal/test_visibility/api.py
Original file line number Diff line number Diff line change
Expand Up @@ -115,7 +115,7 @@ class FinishArgs(NamedTuple):
"""InternalTest allows finishing with an overridden finish time (for EFD and other retry purposes)"""

test_id: InternalTestId
status: TestStatus
status: t.Optional[TestStatus] = None
skip_reason: t.Optional[str] = None
exc_info: t.Optional[TestExcInfo] = None
override_finish_time: t.Optional[float] = None
Expand All @@ -124,7 +124,7 @@ class FinishArgs(NamedTuple):
@_catch_and_log_exceptions
def finish(
item_id: InternalTestId,
status: ext_api.TestStatus,
status: t.Optional[ext_api.TestStatus] = None,
reason: t.Optional[str] = None,
exc_info: t.Optional[ext_api.TestExcInfo] = None,
override_finish_time: t.Optional[float] = None,
Expand Down
7 changes: 5 additions & 2 deletions static-analysis.datadog.yml
Original file line number Diff line number Diff line change
@@ -1,9 +1,12 @@
rulesets:
- python-best-practices:
rules:
nested-blocks:
comment-fixme-todo-ownership:
ignore:
- "**"
max-function-lines:
ignore:
ignore:
- "tests/ci_visibility/api/fake_runner_*.py"
nested-blocks:
ignore:
- "**"
romainkomorndatadog marked this conversation as resolved.
Show resolved Hide resolved
Loading