Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

trunner: add support for pytest #345

Open
wants to merge 2 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
32 changes: 32 additions & 0 deletions sample/test/test-pytest.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
import pytest


def test_always_passes():
assert True


@pytest.mark.skip(reason="CI CHECK")
def test_always_fails(ctx):
assert False, "Always Fail"


@pytest.mark.skip(reason="Test")
def test_always_skip():
assert True


def test_always_xfail():
pytest.xfail("xfail")


@pytest.mark.xfail(reason="always xfail")
def test_always_xpass():
pass


def test_ctx(ctx):
assert ctx is not None


def test_dut(dut):
assert dut is not None
7 changes: 7 additions & 0 deletions sample/test/test.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -11,3 +11,10 @@ test:
type: unity
targets:
include: [host-generic-pc]

- name: pytest
type: pytest
kwargs:
path: phoenix-rtos-tests/sample/test/test-pytest.py
targets:
include: [host-generic-pc]
8 changes: 7 additions & 1 deletion trunner/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
import yaml

from trunner.ctx import TestContext
from trunner.harness import PyHarness, unity_harness
from trunner.harness import PyHarness, unity_harness, pytest_harness
from trunner.types import AppOptions, BootloaderOptions, TestOptions, ShellOptions


Expand Down Expand Up @@ -62,8 +62,11 @@ def _parse_type(self, config: dict):
self._parse_pyharness(config)
elif test_type == "unity":
self._parse_unity()
elif test_type == "pytest":
self._parse_pytest()
else:
raise ParserError("unknown key!")
self.test.type = test_type

def _parse_pyharness(self, config: dict):
path = config.get("harness", self.raw_main.get("harness"))
Expand Down Expand Up @@ -99,6 +102,9 @@ def _parse_pyharness(self, config: dict):
def _parse_unity(self):
self.test.harness = PyHarness(self.ctx.target.dut, self.ctx, unity_harness, self.test.kwargs)

def _parse_pytest(self):
self.test.harness = PyHarness(self.ctx.target.dut, self.ctx, pytest_harness, self.test.kwargs)

def _parse_load(self, config: dict):
apps = config.get("load", [])
apps_to_boot = []
Expand Down
2 changes: 2 additions & 0 deletions trunner/harness/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@
from .psh import ShellHarness
from .pyharness import PyHarness
from .unity import unity_harness
from .pytest import pytest_harness

__all__ = [
"HarnessBuilder",
Expand All @@ -46,4 +47,5 @@
"PloImageProperty",
"PloJffsImageProperty",
"unity_harness",
"pytest_harness"
]
88 changes: 88 additions & 0 deletions trunner/harness/pytest.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,88 @@
import io
import re
from typing import Optional
from contextlib import redirect_stdout

from trunner.ctx import TestContext
from trunner.dut import Dut
from trunner.types import Status, TestResult

import pytest

RESULT_TYPES = ["failed", "passed", "skipped", "xfailed", "xpassed", "error", "warnings"]


def pytest_harness(dut: Dut, ctx: TestContext, result: TestResult, **kwargs) -> Optional[TestResult]:
test_re = r"::(?P<name>[^\x1b]+?) (?P<status>PASSED|SKIPPED|FAILED|XFAIL|XPASS|ERROR)"
error_re = r"(FAILED|ERROR).*?::(?P<name>.*) - (?P<msg>.*)"
summary_re = r"=+ " + "".join([rf"(?:(?P<{rt}>\d+) {rt}.*?)?" for rt in RESULT_TYPES]) + " in"

test_path = ctx.project_path / kwargs.get("path")
options = kwargs.get("options", "").split()
status = Status.OK
subresults = []
tests = 0

class TestContextPlugin:
@pytest.fixture
def dut(self):
return dut

@pytest.fixture
def ctx(self):
return ctx

@pytest.fixture
def kwargs(self):
return kwargs

test_args = [
f"{test_path}", # Path to test
*options,
"-v", # Verbose output
"--tb=no",
]

output_buffer = io.StringIO()
with redirect_stdout(output_buffer):
pytest.main(test_args, plugins=[TestContextPlugin()])

output = output_buffer.getvalue()

if ctx.stream_output:
print(output)

for line in output.splitlines():
match = re.search(test_re, line)
error = re.search(error_re, line)
final = re.search(summary_re, line)
if match:
parsed = match.groupdict()

sub_status = Status.from_str(parsed["status"])
if sub_status == Status.FAIL:
status = sub_status

subname = parsed["name"]
test = result.add_subresult(subname, sub_status)
subresults.append(test)
tests += 1

elif error:
parsed = error.groupdict()
for subresult in subresults:
if parsed["name"] in subresult.subname:
subresult.msg = parsed["msg"]

elif final:
parsed = final.groupdict()
parsed_tests = sum(int(value) for value in parsed.values() if value is not None)
assert tests == parsed_tests, "".join(
(
"There is a mismatch between the number of parsed tests and overall results!\n",
f"Parsed results from the tests: {parsed_tests}",
f"Found test in summary line: {tests}",
)
)

return TestResult(status=status, msg="")
4 changes: 3 additions & 1 deletion trunner/target/armv7a7.py
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,9 @@ def build_test(self, test: TestOptions) -> Callable[[TestResult], TestResult]:
if test.should_reboot:
builder.add(RebooterHarness(self.rebooter))

if test.shell is not None:
if test.type == "pytest":
builder.add(TestStartRunningHarness())
elif test.shell is not None:
builder.add(ShellHarness(self.dut, self.shell_prompt, test.shell.cmd))
else:
builder.add(TestStartRunningHarness())
Expand Down
4 changes: 3 additions & 1 deletion trunner/target/armv7a9.py
Original file line number Diff line number Diff line change
Expand Up @@ -100,7 +100,9 @@ def build_test(self, test: TestOptions) -> Callable[[TestResult], TestResult]:
if test.should_reboot:
builder.add(RebooterHarness(self.rebooter))

if test.shell is not None:
if test.type == "pytest":
builder.add(TestStartRunningHarness())
elif test.shell is not None:
builder.add(ShellHarness(self.dut, self.shell_prompt, test.shell.cmd))
else:
builder.add(TestStartRunningHarness())
Expand Down
4 changes: 3 additions & 1 deletion trunner/target/armv7m4.py
Original file line number Diff line number Diff line change
Expand Up @@ -175,7 +175,9 @@ def build_test(self, test: TestOptions):
builder = HarnessBuilder()
builder.add(STM32L4x6OpenocdGdbServerHarness(setup))

if test.shell is not None:
if test.type == "pytest":
builder.add(TestStartRunningHarness())
elif test.shell is not None:
builder.add(
ShellHarness(
self.dut,
Expand Down
4 changes: 3 additions & 1 deletion trunner/target/armv7m7.py
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,9 @@ def build_test(self, test: TestOptions) -> Callable[[TestResult], TestResult]:

builder.add(PloHarness(self.dut, app_loader=app_loader))

if test.shell is not None:
if test.type == "pytest":
builder.add(TestStartRunningHarness())
elif test.shell is not None:
builder.add(ShellHarness(self.dut, self.shell_prompt, test.shell.cmd))
else:
builder.add(TestStartRunningHarness())
Expand Down
4 changes: 3 additions & 1 deletion trunner/target/emulated.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,9 @@ def build_test(self, test: TestOptions) -> Callable[[TestResult], TestResult]:
if test.should_reboot:
builder.add(RebooterHarness(self.rebooter))

if test.shell is not None:
if test.type == "pytest":
builder.add(TestStartRunningHarness())
elif test.shell is not None:
builder.add(
ShellHarness(
self.dut,
Expand Down
17 changes: 6 additions & 11 deletions trunner/target/host.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@

from trunner.ctx import TestContext
from trunner.dut import HostDut
from trunner.harness import IntermediateHarness, HarnessBuilder
from trunner.harness import IntermediateHarness, HarnessBuilder, TestStartRunningHarness
from trunner.types import TestOptions, TestResult, TestStage
from .base import TargetBase

Expand Down Expand Up @@ -51,17 +51,12 @@ def flash_dut(self):
def build_test(self, test: TestOptions) -> Callable[[TestResult], TestResult]:
builder = HarnessBuilder()

if test.shell is None or test.shell.cmd is None:
# TODO we should detect it in parsing step, now force fail
def fail(result: TestResult):
result.fail(msg="There is no command to execute")
return result
if test.type == "pytest":
builder.add(TestStartRunningHarness())
if test.shell.cmd is not None:
test.shell.cmd[0] = f"{self.root_dir()}{test.shell.cmd[0]}"
builder.add(self.ExecHarness(self.dut, test.shell.cmd))

builder.add(fail)
return builder.get_harness()

test.shell.cmd[0] = f"{self.root_dir()}{test.shell.cmd[0]}"
builder.add(self.ExecHarness(self.dut, test.shell.cmd))
builder.add(test.harness)

return builder.get_harness()
4 changes: 2 additions & 2 deletions trunner/types.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,9 +34,9 @@ class Status(Enum):

@classmethod
def from_str(cls, s):
if s in ("FAIL", "FAILED", "BAD"):
if s in ("FAIL", "FAILED", "BAD", "ERROR"):
return Status.FAIL
if s in ("OK", "PASS", "PASSED"):
if s in ("OK", "PASS", "PASSED", "XFAIL", "XPASS"):
return Status.OK
if s in ("SKIP", "SKIPPED", "IGNORE", "IGNORED", "UNTESTED"):
return Status.SKIP
Expand Down
Loading