Tests: Add automated testing framework with coverage support

Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com>
This commit is contained in:
Pragyansh Chaturvedi
2026-03-30 16:12:57 +05:30
parent 0498885f71
commit da57911122
12 changed files with 425 additions and 2 deletions

View File

@ -1,10 +1,22 @@
install:
pip install -e .
uv pip install -e ".[test]"
clean:
rm -rf build dist *.egg-info
rm -rf examples/*.ll examples/*.o
rm -rf htmlcov .coverage
test:
pytest tests/ -v --tb=short -m "not verifier"
test-cov:
pytest tests/ -v --tb=short -m "not verifier" \
--cov=pythonbpf --cov-report=term-missing --cov-report=html
test-verifier:
@echo "NOTE: verifier tests require sudo and bpftool. Uses sudo .venv/bin/python3."
pytest tests/test_verifier.py -v --tb=short -m verifier
all: clean install
.PHONY: all clean
.PHONY: all clean install test test-cov test-verifier

View File

@ -41,7 +41,31 @@ docs = [
"sphinx-rtd-theme>=2.0",
"sphinx-copybutton",
]
test = [
"pytest>=8.0",
"pytest-cov>=5.0",
"tomli>=2.0; python_version < '3.11'",
]
[tool.setuptools.packages.find]
where = ["."]
include = ["pythonbpf*"]
[tool.pytest.ini_options]
testpaths = ["tests"]
python_files = ["test_*.py"]
python_classes = ["Test*"]
python_functions = ["test_*"]
markers = [
"verifier: requires sudo/root for kernel verifier tests (not run by default)",
"vmlinux: requires vmlinux.py for current kernel",
]
log_cli = false
[tool.coverage.run]
source = ["pythonbpf"]
omit = ["*/vmlinux*", "*/__pycache__/*"]
[tool.coverage.report]
show_missing = true
skip_covered = false

103
tests/conftest.py Normal file
View File

@ -0,0 +1,103 @@
"""
pytest configuration for the PythonBPF test suite.
Test discovery:
All .py files under tests/passing_tests/ and tests/failing_tests/ are
collected as parametrized BPF test cases.
Markers applied automatically from test_config.toml:
- xfail (strict=True): failing_tests/ entries that are expected to fail
- skip: vmlinux tests when vmlinux.py is not importable
Run the suite:
pytest tests/ -v -m "not verifier" # IR + LLC only (no sudo)
pytest tests/ -v --cov=pythonbpf # with coverage
pytest tests/test_verifier.py -m verifier # kernel verifier (sudo required)
"""
import logging
import pytest
from tests.framework.collector import collect_all_test_files
# ── vmlinux availability ────────────────────────────────────────────────────
try:
import vmlinux # noqa: F401
VMLINUX_AVAILABLE = True
except ImportError:
VMLINUX_AVAILABLE = False
# ── shared fixture: collected test cases ───────────────────────────────────
def _all_cases():
return collect_all_test_files()
# ── pytest_generate_tests: parametrize on bpf_test_file ───────────────────
def pytest_generate_tests(metafunc):
if "bpf_test_file" in metafunc.fixturenames:
cases = _all_cases()
metafunc.parametrize(
"bpf_test_file",
[c.path for c in cases],
ids=[c.rel_path for c in cases],
)
# ── pytest_collection_modifyitems: apply xfail / skip markers ─────────────
def pytest_collection_modifyitems(items):
case_map = {c.rel_path: c for c in _all_cases()}
for item in items:
# Resolve the test case from the parametrize ID embedded in the node id.
# Node id format: tests/test_foo.py::test_bar[passing_tests/helpers/pid.py]
case = None
for bracket in (item.callspec.id,) if hasattr(item, "callspec") else ():
case = case_map.get(bracket)
break
if case is None:
continue
# vmlinux skip
if case.needs_vmlinux and not VMLINUX_AVAILABLE:
item.add_marker(
pytest.mark.skip(reason="vmlinux.py not available for current kernel")
)
continue
# xfail (strict: XPASS counts as a test failure, alerting us to fixed bugs)
if case.is_expected_fail:
# Level "ir" → fails at IR generation: xfail both IR and LLC tests
# Level "llc" → IR succeeds but LLC fails: only xfail the LLC test
is_llc_test = item.nodeid.startswith("tests/test_llc_compilation.py")
apply_xfail = (case.xfail_level == "ir") or (
case.xfail_level == "llc" and is_llc_test
)
if apply_xfail:
item.add_marker(
pytest.mark.xfail(
reason=case.xfail_reason,
strict=True,
raises=Exception,
)
)
# ── caplog level fixture: capture ERROR+ from pythonbpf ───────────────────
@pytest.fixture(autouse=True)
def set_log_level(caplog):
with caplog.at_level(logging.ERROR, logger="pythonbpf"):
yield

View File

View File

@ -0,0 +1,17 @@
from dataclasses import dataclass
from pathlib import Path
@dataclass
class BpfTestCase:
path: Path
rel_path: str
is_expected_fail: bool = False
xfail_reason: str = ""
xfail_level: str = "ir" # "ir" or "llc"
needs_vmlinux: bool = False
skip_reason: str = ""
@property
def test_id(self) -> str:
return self.rel_path.replace("/", "::")

View File

@ -0,0 +1,60 @@
import sys
from pathlib import Path
if sys.version_info >= (3, 11):
import tomllib
else:
import tomli as tomllib
from .bpf_test_case import BpfTestCase
TESTS_DIR = Path(__file__).parent.parent
CONFIG_FILE = TESTS_DIR / "test_config.toml"
VMLINUX_TEST_DIRS = {"passing_tests/vmlinux"}
VMLINUX_TEST_PREFIXES = {
"failing_tests/vmlinux",
"failing_tests/xdp",
}
def _is_vmlinux_test(rel_path: str) -> bool:
for prefix in VMLINUX_TEST_DIRS | VMLINUX_TEST_PREFIXES:
if rel_path.startswith(prefix):
return True
return False
def _load_config() -> dict:
if not CONFIG_FILE.exists():
return {}
with open(CONFIG_FILE, "rb") as f:
return tomllib.load(f)
def collect_all_test_files() -> list[BpfTestCase]:
config = _load_config()
xfail_map: dict = config.get("xfail", {})
cases = []
for subdir in ("passing_tests", "failing_tests"):
for py_file in sorted((TESTS_DIR / subdir).rglob("*.py")):
rel = str(py_file.relative_to(TESTS_DIR))
needs_vmlinux = _is_vmlinux_test(rel)
xfail_entry = xfail_map.get(rel)
is_expected_fail = xfail_entry is not None
xfail_reason = xfail_entry.get("reason", "") if xfail_entry else ""
xfail_level = xfail_entry.get("level", "ir") if xfail_entry else "ir"
cases.append(
BpfTestCase(
path=py_file,
rel_path=rel,
is_expected_fail=is_expected_fail,
xfail_reason=xfail_reason,
xfail_level=xfail_level,
needs_vmlinux=needs_vmlinux,
)
)
return cases

View File

@ -0,0 +1,23 @@
import logging
from pathlib import Path
from pythonbpf.codegen import compile_to_ir, _run_llc
def run_ir_generation(test_path: Path, output_ll: Path):
"""Run compile_to_ir on a BPF test file.
Returns the (output, structs_sym_tab, maps_sym_tab) tuple from compile_to_ir.
Raises on exception. Any logging.ERROR records captured by pytest caplog
indicate a compile failure even when no exception is raised.
"""
return compile_to_ir(str(test_path), str(output_ll), loglevel=logging.WARNING)
def run_llc(ll_path: Path, obj_path: Path) -> bool:
"""Compile a .ll file to a BPF .o using llc.
Raises subprocess.CalledProcessError on failure (llc uses check=True).
Returns True on success.
"""
return _run_llc(str(ll_path), str(obj_path))

View File

@ -0,0 +1,25 @@
import subprocess
import uuid
from pathlib import Path
def verify_object(obj_path: Path) -> tuple[bool, str]:
"""Run bpftool prog load -d to verify a BPF object file against the kernel verifier.
Pins the program temporarily at /sys/fs/bpf/bpf_prog_test_<uuid>, then removes it.
Returns (success, combined_output). Requires sudo / root.
"""
pin_path = f"/sys/fs/bpf/bpf_prog_test_{uuid.uuid4().hex[:8]}"
try:
result = subprocess.run(
["sudo", "bpftool", "prog", "load", "-d", str(obj_path), pin_path],
capture_output=True,
text=True,
timeout=30,
)
output = result.stdout + result.stderr
return result.returncode == 0, output
except subprocess.TimeoutExpired:
return False, "bpftool timed out after 30s"
finally:
subprocess.run(["sudo", "rm", "-f", pin_path], check=False, capture_output=True)

33
tests/test_config.toml Normal file
View File

@ -0,0 +1,33 @@
# test_config.toml
#
# [xfail] — tests expected to fail.
# key = path relative to tests/
# value = {reason = "...", level = "ir" | "llc"}
# level "ir" = fails during pythonbpf IR generation (exception or ERROR log)
# level "llc" = IR generates but llc rejects it
#
# Tests removed from this list because they now pass (fixed by compilation-context PR):
# failing_tests/assign/retype.py
# failing_tests/conditionals/helper_cond.py
# failing_tests/conditionals/oneline.py
# failing_tests/direct_assign.py
# failing_tests/globals.py
# failing_tests/if.py
# failing_tests/license.py
# failing_tests/named_arg.py
# failing_tests/xdp/xdp_test_1.py
# These files can be moved to passing_tests/ when convenient.
[xfail]
"failing_tests/conditionals/struct_ptr.py" = {reason = "Struct pointer used directly as boolean condition not supported", level = "ir"}
"failing_tests/license.py" = {reason = "Missing LICENSE global produces IR that llc rejects — should be caught earlier with a clear error message", level = "llc"}
"failing_tests/undeclared_values.py" = {reason = "Undeclared variable used in f-string — should raise SyntaxError (correct behaviour, test documents it)", level = "ir"}
"failing_tests/vmlinux/args_test.py" = {reason = "struct_trace_event_raw_sys_enter args field access not supported", level = "ir"}
"failing_tests/vmlinux/assignment_handling.py" = {reason = "Assigning vmlinux enum value (XDP_PASS) to a local variable not yet supported", level = "ir"}
"failing_tests/xdp_pass.py" = {reason = "XDP program using vmlinux structs (struct_xdp_md) and complex map/struct interaction not yet supported", level = "ir"}

View File

@ -0,0 +1,29 @@
"""
Level 1 — IR Generation tests.
For every BPF test file, calls compile_to_ir() and asserts:
1. No exception is raised by the pythonbpf compiler.
2. No logging.ERROR records are emitted during compilation.
3. A .ll file is produced.
Tests in failing_tests/ are marked xfail (strict=True) by conftest.py —
they must raise an exception or produce an ERROR log to pass the suite.
"""
import logging
from pathlib import Path
from tests.framework.compiler import run_ir_generation
def test_ir_generation(bpf_test_file: Path, tmp_path, caplog):
ll_path = tmp_path / "output.ll"
run_ir_generation(bpf_test_file, ll_path)
error_records = [r for r in caplog.records if r.levelno >= logging.ERROR]
assert not error_records, "IR generation produced ERROR log(s):\n" + "\n".join(
f" [{r.name}] {r.getMessage()}" for r in error_records
)
assert ll_path.exists(), "compile_to_ir() returned without writing a .ll file"

View File

@ -0,0 +1,32 @@
"""
Level 2 — LLC compilation tests.
For every BPF test file, runs the full compile_to_ir() + _run_llc() pipeline
and asserts a non-empty .o file is produced.
Tests in failing_tests/ are marked xfail (strict=True) by conftest.py.
"""
import logging
from pathlib import Path
from tests.framework.compiler import run_ir_generation, run_llc
def test_llc_compilation(bpf_test_file: Path, tmp_path, caplog):
ll_path = tmp_path / "output.ll"
obj_path = tmp_path / "output.o"
run_ir_generation(bpf_test_file, ll_path)
error_records = [r for r in caplog.records if r.levelno >= logging.ERROR]
assert not error_records, "IR generation produced ERROR log(s):\n" + "\n".join(
f" [{r.name}] {r.getMessage()}" for r in error_records
)
run_llc(ll_path, obj_path)
assert obj_path.exists() and obj_path.stat().st_size > 0, (
"llc did not produce a non-empty .o file"
)

65
tests/test_verifier.py Normal file
View File

@ -0,0 +1,65 @@
"""
Level 3 — Kernel verifier tests.
For every passing BPF test file, compiles to a .o and runs:
sudo bpftool prog load -d <file.o> /sys/fs/bpf/bpf_prog_test_<id>
These tests are opt-in: they require sudo and kernel access, and are gated
behind the `verifier` pytest mark. Run with:
pytest tests/test_verifier.py -m verifier -v
Note: uses the venv Python binary for any in-process calls, but bpftool
itself is invoked via subprocess with sudo. Ensure bpftool is installed
and the user can sudo.
"""
import logging
from pathlib import Path
import pytest
from tests.framework.collector import collect_all_test_files
from tests.framework.compiler import run_ir_generation, run_llc
from tests.framework.verifier import verify_object
def _passing_test_files():
return [
c.path
for c in collect_all_test_files()
if not c.is_expected_fail and not c.needs_vmlinux
]
def _passing_test_ids():
return [
c.rel_path
for c in collect_all_test_files()
if not c.is_expected_fail and not c.needs_vmlinux
]
@pytest.mark.verifier
@pytest.mark.parametrize(
"verifier_test_file",
_passing_test_files(),
ids=_passing_test_ids(),
)
def test_kernel_verifier(verifier_test_file: Path, tmp_path, caplog):
"""Compile the BPF test and verify it passes the kernel verifier."""
ll_path = tmp_path / "output.ll"
obj_path = tmp_path / "output.o"
run_ir_generation(verifier_test_file, ll_path)
error_records = [r for r in caplog.records if r.levelno >= logging.ERROR]
assert not error_records, "IR generation produced ERROR log(s):\n" + "\n".join(
f" [{r.name}] {r.getMessage()}" for r in error_records
)
run_llc(ll_path, obj_path)
assert obj_path.exists() and obj_path.stat().st_size > 0
ok, output = verify_object(obj_path)
assert ok, f"Kernel verifier rejected {verifier_test_file.name}:\n{output}"