12 Commits

9 changed files with 366 additions and 141 deletions

View File

@ -16,6 +16,33 @@ def get_module_symbols(module_name: str):
return [name for name in dir(imported_module)], imported_module return [name for name in dir(imported_module)], imported_module
def unwrap_pointer_type(type_obj: Any) -> Any:
"""
Recursively unwrap all pointer layers to get the base type.
This handles multiply nested pointers like LP_LP_struct_attribute_group
and returns the base type (struct_attribute_group).
Stops unwrapping when reaching a non-pointer type (one without _type_ attribute).
Args:
type_obj: The type object to unwrap
Returns:
The base type after unwrapping all pointer layers
"""
current_type = type_obj
# Keep unwrapping while it's a pointer/array type (has _type_)
# But stop if _type_ is just a string or basic type marker
while hasattr(current_type, "_type_"):
next_type = current_type._type_
# Stop if _type_ is a string (like 'c' for c_char)
if isinstance(next_type, str):
break
current_type = next_type
return current_type
def process_vmlinux_class( def process_vmlinux_class(
node, node,
llvm_module, llvm_module,
@ -158,13 +185,90 @@ def process_vmlinux_post_ast(
if hasattr(elem_type, "_length_") and is_complex_type: if hasattr(elem_type, "_length_") and is_complex_type:
type_length = elem_type._length_ type_length = elem_type._length_
if containing_type.__module__ == "vmlinux": # Unwrap all pointer layers to get the base type for dependency tracking
new_dep_node.add_dependent( base_type = unwrap_pointer_type(elem_type)
elem_type._type_.__name__ base_type_module = getattr(base_type, "__module__", None)
if hasattr(elem_type._type_, "__name__")
else str(elem_type._type_) if base_type_module == "vmlinux":
base_type_name = (
base_type.__name__
if hasattr(base_type, "__name__")
else str(base_type)
)
# ONLY add vmlinux types as dependencies
new_dep_node.add_dependent(base_type_name)
logger.debug(
f"{containing_type} containing type of parent {elem_name} with {elem_type} and ctype {ctype_complex_type} and length {type_length}"
)
new_dep_node.set_field_containing_type(
elem_name, containing_type
)
new_dep_node.set_field_type_size(elem_name, type_length)
new_dep_node.set_field_ctype_complex_type(
elem_name, ctype_complex_type
)
new_dep_node.set_field_type(elem_name, elem_type)
# Check the containing_type module to decide whether to recurse
containing_type_module = getattr(
containing_type, "__module__", None
)
if containing_type_module == "vmlinux":
# Also unwrap containing_type to get base type name
base_containing_type = unwrap_pointer_type(
containing_type
)
containing_type_name = (
base_containing_type.__name__
if hasattr(base_containing_type, "__name__")
else str(base_containing_type)
)
# Check for self-reference or already processed
if containing_type_name == current_symbol_name:
# Self-referential pointer
logger.debug(
f"Self-referential pointer in {current_symbol_name}.{elem_name}"
)
new_dep_node.set_field_ready(elem_name, True)
elif handler.has_node(containing_type_name):
# Already processed
logger.debug(
f"Reusing already processed {containing_type_name}"
)
new_dep_node.set_field_ready(elem_name, True)
else:
# Process recursively - use base containing type, not the pointer wrapper
new_dep_node.add_dependent(containing_type_name)
process_vmlinux_post_ast(
base_containing_type,
llvm_handler,
handler,
processing_stack,
)
new_dep_node.set_field_ready(elem_name, True)
elif (
containing_type_module == ctypes.__name__
or containing_type_module is None
):
logger.debug(
f"Processing ctype internal{containing_type}"
)
new_dep_node.set_field_ready(elem_name, True)
else:
raise TypeError(
f"Module not supported in recursive resolution: {containing_type_module}"
)
elif (
base_type_module == ctypes.__name__
or base_type_module is None
):
# Handle ctypes or types with no module (like some internal ctypes types)
# DO NOT add ctypes as dependencies - just set field metadata and mark ready
logger.debug(
f"Base type {base_type} is ctypes - NOT adding as dependency, just processing field"
) )
elif containing_type.__module__ == ctypes.__name__:
if isinstance(elem_type, type): if isinstance(elem_type, type):
if issubclass(elem_type, ctypes.Array): if issubclass(elem_type, ctypes.Array):
ctype_complex_type = ctypes.Array ctype_complex_type = ctypes.Array
@ -176,57 +280,20 @@ def process_vmlinux_post_ast(
) )
else: else:
raise TypeError("Unsupported ctypes subclass") raise TypeError("Unsupported ctypes subclass")
else:
raise ImportError(
f"Unsupported module of {containing_type}"
)
logger.debug(
f"{containing_type} containing type of parent {elem_name} with {elem_type} and ctype {ctype_complex_type} and length {type_length}"
)
new_dep_node.set_field_containing_type(
elem_name, containing_type
)
new_dep_node.set_field_type_size(elem_name, type_length)
new_dep_node.set_field_ctype_complex_type(
elem_name, ctype_complex_type
)
new_dep_node.set_field_type(elem_name, elem_type)
if containing_type.__module__ == "vmlinux":
containing_type_name = (
containing_type.__name__
if hasattr(containing_type, "__name__")
else str(containing_type)
)
# Check for self-reference or already processed # Set field metadata but DO NOT add dependency or recurse
if containing_type_name == current_symbol_name: new_dep_node.set_field_containing_type(
# Self-referential pointer elem_name, containing_type
logger.debug( )
f"Self-referential pointer in {current_symbol_name}.{elem_name}" new_dep_node.set_field_type_size(elem_name, type_length)
) new_dep_node.set_field_ctype_complex_type(
new_dep_node.set_field_ready(elem_name, True) elem_name, ctype_complex_type
elif handler.has_node(containing_type_name): )
# Already processed new_dep_node.set_field_type(elem_name, elem_type)
logger.debug(
f"Reusing already processed {containing_type_name}"
)
new_dep_node.set_field_ready(elem_name, True)
else:
# Process recursively - THIS WAS MISSING
new_dep_node.add_dependent(containing_type_name)
process_vmlinux_post_ast(
containing_type,
llvm_handler,
handler,
processing_stack,
)
new_dep_node.set_field_ready(elem_name, True)
elif containing_type.__module__ == ctypes.__name__:
logger.debug(f"Processing ctype internal{containing_type}")
new_dep_node.set_field_ready(elem_name, True) new_dep_node.set_field_ready(elem_name, True)
else: else:
raise TypeError( raise ImportError(
"Module not supported in recursive resolution" f"Unsupported module of {base_type}: {base_type_module}"
) )
else: else:
new_dep_node.add_dependent( new_dep_node.add_dependent(
@ -245,9 +312,12 @@ def process_vmlinux_post_ast(
raise ValueError( raise ValueError(
f"{elem_name} with type {elem_type} from module {module_name} not supported in recursive resolver" f"{elem_name} with type {elem_type} from module {module_name} not supported in recursive resolver"
) )
elif module_name == ctypes.__name__ or module_name is None:
# Handle ctypes types - these don't need processing, just return
logger.debug(f"Skipping ctypes type {current_symbol_name}")
return True
else: else:
raise ImportError("UNSUPPORTED Module") raise ImportError(f"UNSUPPORTED Module {module_name}")
logger.info( logger.info(
f"{current_symbol_name} processed and handler readiness {handler.is_ready}" f"{current_symbol_name} processed and handler readiness {handler.is_ready}"

View File

@ -11,7 +11,9 @@ from .class_handler import process_vmlinux_class
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
def detect_import_statement(tree: ast.AST) -> list[tuple[str, ast.ImportFrom]]: def detect_import_statement(
tree: ast.AST,
) -> list[tuple[str, ast.ImportFrom, str, str]]:
""" """
Parse AST and detect import statements from vmlinux. Parse AST and detect import statements from vmlinux.
@ -25,7 +27,7 @@ def detect_import_statement(tree: ast.AST) -> list[tuple[str, ast.ImportFrom]]:
List of tuples containing (module_name, imported_item) for each vmlinux import List of tuples containing (module_name, imported_item) for each vmlinux import
Raises: Raises:
SyntaxError: If multiple imports from vmlinux are attempted or import * is used SyntaxError: If import * is used
""" """
vmlinux_imports = [] vmlinux_imports = []
@ -40,28 +42,19 @@ def detect_import_statement(tree: ast.AST) -> list[tuple[str, ast.ImportFrom]]:
"Please import specific types explicitly." "Please import specific types explicitly."
) )
# Check for multiple imports: from vmlinux import A, B, C
if len(node.names) > 1:
imported_names = [alias.name for alias in node.names]
raise SyntaxError(
f"Multiple imports from vmlinux are not supported. "
f"Found: {', '.join(imported_names)}. "
f"Please use separate import statements for each type."
)
# Check if no specific import is specified (should not happen with valid Python) # Check if no specific import is specified (should not happen with valid Python)
if len(node.names) == 0: if len(node.names) == 0:
raise SyntaxError( raise SyntaxError(
"Import from vmlinux must specify at least one type." "Import from vmlinux must specify at least one type."
) )
# Valid single import # Support multiple imports: from vmlinux import A, B, C
for alias in node.names: for alias in node.names:
import_name = alias.name import_name = alias.name
# Use alias if provided, otherwise use the original name (commented) # Use alias if provided, otherwise use the original name
# as_name = alias.asname if alias.asname else alias.name as_name = alias.asname if alias.asname else alias.name
vmlinux_imports.append(("vmlinux", node)) vmlinux_imports.append(("vmlinux", node, import_name, as_name))
logger.info(f"Found vmlinux import: {import_name}") logger.info(f"Found vmlinux import: {import_name} as {as_name}")
# Handle "import vmlinux" statements (not typical but should be rejected) # Handle "import vmlinux" statements (not typical but should be rejected)
elif isinstance(node, ast.Import): elif isinstance(node, ast.Import):
@ -103,40 +96,37 @@ def vmlinux_proc(tree: ast.AST, module):
with open(source_file, "r") as f: with open(source_file, "r") as f:
mod_ast = ast.parse(f.read(), filename=source_file) mod_ast = ast.parse(f.read(), filename=source_file)
for import_mod, import_node in import_statements: for import_mod, import_node, imported_name, as_name in import_statements:
for alias in import_node.names: found = False
imported_name = alias.name for mod_node in mod_ast.body:
found = False if isinstance(mod_node, ast.ClassDef) and mod_node.name == imported_name:
for mod_node in mod_ast.body: process_vmlinux_class(mod_node, module, handler)
if ( found = True
isinstance(mod_node, ast.ClassDef) break
and mod_node.name == imported_name if isinstance(mod_node, ast.Assign):
): for target in mod_node.targets:
process_vmlinux_class(mod_node, module, handler) if isinstance(target, ast.Name) and target.id == imported_name:
found = True process_vmlinux_assign(mod_node, module, assignments, as_name)
break found = True
if isinstance(mod_node, ast.Assign): break
for target in mod_node.targets: if found:
if isinstance(target, ast.Name) and target.id == imported_name: break
process_vmlinux_assign(mod_node, module, assignments) if not found:
found = True logger.info(f"{imported_name} not found as ClassDef or Assign in vmlinux")
break
if found:
break
if not found:
logger.info(
f"{imported_name} not found as ClassDef or Assign in vmlinux"
)
IRGenerator(module, handler, assignments) IRGenerator(module, handler, assignments)
return assignments return assignments
def process_vmlinux_assign(node, module, assignments: dict[str, AssignmentInfo]): def process_vmlinux_assign(
node, module, assignments: dict[str, AssignmentInfo], target_name=None
):
"""Process assignments from vmlinux module.""" """Process assignments from vmlinux module."""
# Only handle single-target assignments # Only handle single-target assignments
if len(node.targets) == 1 and isinstance(node.targets[0], ast.Name): if len(node.targets) == 1 and isinstance(node.targets[0], ast.Name):
target_name = node.targets[0].id # Use provided target_name (for aliased imports) or fall back to original name
if target_name is None:
target_name = node.targets[0].id
# Handle constant value assignments # Handle constant value assignments
if isinstance(node.value, ast.Constant): if isinstance(node.value, ast.Constant):

View File

@ -21,7 +21,7 @@ def debug_info_generation(
generated_debug_info: List of tuples (struct, debug_info) to track generated debug info generated_debug_info: List of tuples (struct, debug_info) to track generated debug info
Returns: Returns:
The generated global variable debug info The generated global variable debug info, or None for unsupported types
""" """
# Set up debug info generator # Set up debug info generator
generator = DebugInfoGenerator(llvm_module) generator = DebugInfoGenerator(llvm_module)
@ -31,23 +31,39 @@ def debug_info_generation(
if existing_struct.name == struct.name: if existing_struct.name == struct.name:
return debug_info return debug_info
# Check if this is a union (not supported yet)
if not struct.name.startswith("struct_"):
logger.warning(f"Skipping debug info generation for union: {struct.name}")
# Create a minimal forward declaration for unions
union_type = generator.create_struct_type(
[], struct.__sizeof__() * 8, is_distinct=True
)
return union_type
# Process all fields and create members for the struct # Process all fields and create members for the struct
members = [] members = []
for field_name, field in struct.fields.items(): for field_name, field in struct.fields.items():
# Get appropriate debug type for this field try:
field_type = _get_field_debug_type( # Get appropriate debug type for this field
field_name, field, generator, struct, generated_debug_info field_type = _get_field_debug_type(
) field_name, field, generator, struct, generated_debug_info
# Create struct member with proper offset )
member = generator.create_struct_member_vmlinux(
field_name, field_type, field.offset * 8
)
members.append(member)
if struct.name.startswith("struct_"): # Ensure field_type is a tuple
struct_name = struct.name.removeprefix("struct_") if not isinstance(field_type, tuple) or len(field_type) != 2:
else: logger.error(f"Invalid field_type for {field_name}: {field_type}")
raise ValueError("Unions are not supported in the current version") continue
# Create struct member with proper offset
member = generator.create_struct_member_vmlinux(
field_name, field_type, field.offset * 8
)
members.append(member)
except Exception as e:
logger.error(f"Failed to process field {field_name} in {struct.name}: {e}")
continue
struct_name = struct.name.removeprefix("struct_")
# Create struct type with all members # Create struct type with all members
struct_type = generator.create_struct_type_with_name( struct_type = generator.create_struct_type_with_name(
struct_name, members, struct.__sizeof__() * 8, is_distinct=True struct_name, members, struct.__sizeof__() * 8, is_distinct=True
@ -74,11 +90,17 @@ def _get_field_debug_type(
generated_debug_info: List of already generated debug info generated_debug_info: List of already generated debug info
Returns: Returns:
The debug info type for this field A tuple of (debug_type, size_in_bits)
""" """
# Handle complex types (arrays, pointers) # Handle complex types (arrays, pointers, function pointers)
if field.ctype_complex_type is not None: if field.ctype_complex_type is not None:
if issubclass(field.ctype_complex_type, ctypes.Array): # Handle function pointer types (CFUNCTYPE)
if callable(field.ctype_complex_type):
# Function pointers are represented as void pointers
logger.info(f"Field {field_name} is a function pointer, using void pointer")
void_ptr = generator.create_pointer_type(None, 64)
return void_ptr, 64
elif issubclass(field.ctype_complex_type, ctypes.Array):
# Handle array types # Handle array types
element_type, base_type_size = _get_basic_debug_type( element_type, base_type_size = _get_basic_debug_type(
field.containing_type, generator field.containing_type, generator
@ -100,11 +122,13 @@ def _get_field_debug_type(
for existing_struct, debug_info in generated_debug_info: for existing_struct, debug_info in generated_debug_info:
if existing_struct.name == struct_name: if existing_struct.name == struct_name:
# Use existing debug info # Use existing debug info
return debug_info, existing_struct.__sizeof__() return debug_info, existing_struct.__sizeof__() * 8
# If not found, create a forward declaration # If not found, create a forward declaration
# This will be completed when the actual struct is processed # This will be completed when the actual struct is processed
logger.warning("Forward declaration in struct created") logger.warning(
f"Forward declaration created for {struct_name} in {parent_struct.name}"
)
forward_type = generator.create_struct_type([], 0, is_distinct=True) forward_type = generator.create_struct_type([], 0, is_distinct=True)
return forward_type, 0 return forward_type, 0

View File

@ -11,6 +11,10 @@ logger = logging.getLogger(__name__)
class IRGenerator: class IRGenerator:
# This field keeps track of the non_struct names to avoid duplicate name errors.
type_number = 0
unprocessed_store: list[str] = []
# get the assignments dict and add this stuff to it. # get the assignments dict and add this stuff to it.
def __init__(self, llvm_module, handler: DependencyHandler, assignments): def __init__(self, llvm_module, handler: DependencyHandler, assignments):
self.llvm_module = llvm_module self.llvm_module = llvm_module
@ -129,7 +133,19 @@ class IRGenerator:
for field_name, field in struct.fields.items(): for field_name, field in struct.fields.items():
# does not take arrays and similar types into consideration yet. # does not take arrays and similar types into consideration yet.
if field.ctype_complex_type is not None and issubclass( if callable(field.ctype_complex_type):
# Function pointer case - generate a simple field accessor
field_co_re_name, returned = self._struct_name_generator(
struct, field, field_index
)
field_index += 1
globvar = ir.GlobalVariable(
self.llvm_module, ir.IntType(64), name=field_co_re_name
)
globvar.linkage = "external"
globvar.set_metadata("llvm.preserve.access.index", debug_info)
self.generated_field_names[struct.name][field_name] = globvar
elif field.ctype_complex_type is not None and issubclass(
field.ctype_complex_type, ctypes.Array field.ctype_complex_type, ctypes.Array
): ):
array_size = field.type_size array_size = field.type_size
@ -137,7 +153,7 @@ class IRGenerator:
if containing_type.__module__ == ctypes.__name__: if containing_type.__module__ == ctypes.__name__:
containing_type_size = ctypes.sizeof(containing_type) containing_type_size = ctypes.sizeof(containing_type)
if array_size == 0: if array_size == 0:
field_co_re_name = self._struct_name_generator( field_co_re_name, returned = self._struct_name_generator(
struct, field, field_index, True, 0, containing_type_size struct, field, field_index, True, 0, containing_type_size
) )
globvar = ir.GlobalVariable( globvar = ir.GlobalVariable(
@ -149,7 +165,7 @@ class IRGenerator:
field_index += 1 field_index += 1
continue continue
for i in range(0, array_size): for i in range(0, array_size):
field_co_re_name = self._struct_name_generator( field_co_re_name, returned = self._struct_name_generator(
struct, field, field_index, True, i, containing_type_size struct, field, field_index, True, i, containing_type_size
) )
globvar = ir.GlobalVariable( globvar = ir.GlobalVariable(
@ -163,12 +179,28 @@ class IRGenerator:
array_size = field.type_size array_size = field.type_size
containing_type = field.containing_type containing_type = field.containing_type
if containing_type.__module__ == "vmlinux": if containing_type.__module__ == "vmlinux":
containing_type_size = self.handler[ # Unwrap all pointer layers to get the base struct type
containing_type.__name__ base_containing_type = containing_type
].current_offset while hasattr(base_containing_type, "_type_"):
for i in range(0, array_size): next_type = base_containing_type._type_
field_co_re_name = self._struct_name_generator( # Stop if _type_ is a string (like 'c' for c_char)
struct, field, field_index, True, i, containing_type_size # TODO: stacked pointers not handl;ing ctypes check here as well
if isinstance(next_type, str):
break
base_containing_type = next_type
# Get the base struct name
base_struct_name = (
base_containing_type.__name__
if hasattr(base_containing_type, "__name__")
else str(base_containing_type)
)
# Look up the size using the base struct name
containing_type_size = self.handler[base_struct_name].current_offset
if array_size == 0:
field_co_re_name, returned = self._struct_name_generator(
struct, field, field_index, True, 0, containing_type_size
) )
globvar = ir.GlobalVariable( globvar = ir.GlobalVariable(
self.llvm_module, ir.IntType(64), name=field_co_re_name self.llvm_module, ir.IntType(64), name=field_co_re_name
@ -176,9 +208,30 @@ class IRGenerator:
globvar.linkage = "external" globvar.linkage = "external"
globvar.set_metadata("llvm.preserve.access.index", debug_info) globvar.set_metadata("llvm.preserve.access.index", debug_info)
self.generated_field_names[struct.name][field_name] = globvar self.generated_field_names[struct.name][field_name] = globvar
field_index += 1 field_index += 1
else:
for i in range(0, array_size):
field_co_re_name, returned = self._struct_name_generator(
struct,
field,
field_index,
True,
i,
containing_type_size,
)
globvar = ir.GlobalVariable(
self.llvm_module, ir.IntType(64), name=field_co_re_name
)
globvar.linkage = "external"
globvar.set_metadata(
"llvm.preserve.access.index", debug_info
)
self.generated_field_names[struct.name][field_name] = (
globvar
)
field_index += 1
else: else:
field_co_re_name = self._struct_name_generator( field_co_re_name, returned = self._struct_name_generator(
struct, field, field_index struct, field, field_index
) )
field_index += 1 field_index += 1
@ -198,7 +251,7 @@ class IRGenerator:
is_indexed: bool = False, is_indexed: bool = False,
index: int = 0, index: int = 0,
containing_type_size: int = 0, containing_type_size: int = 0,
) -> str: ) -> tuple[str, bool]:
# TODO: Does not support Unions as well as recursive pointer and array type naming # TODO: Does not support Unions as well as recursive pointer and array type naming
if is_indexed: if is_indexed:
name = ( name = (
@ -208,7 +261,7 @@ class IRGenerator:
+ "$" + "$"
+ f"0:{field_index}:{index}" + f"0:{field_index}:{index}"
) )
return name return name, True
elif struct.name.startswith("struct_"): elif struct.name.startswith("struct_"):
name = ( name = (
"llvm." "llvm."
@ -217,9 +270,18 @@ class IRGenerator:
+ "$" + "$"
+ f"0:{field_index}" + f"0:{field_index}"
) )
return name return name, True
else: else:
print(self.handler[struct.name]) logger.warning(
raise TypeError( "Blindly handling non-struct type to avoid type errors in vmlinux IR generation. Possibly a union."
"Name generation cannot occur due to type name not starting with struct"
) )
self.type_number += 1
unprocessed_type = "unprocessed_type_" + str(self.handler[struct.name].name)
if self.unprocessed_store.__contains__(unprocessed_type):
return unprocessed_type + "_" + str(self.type_number), False
else:
self.unprocessed_store.append(unprocessed_type)
return unprocessed_type, False
# raise TypeError(
# "Name generation cannot occur due to type name not starting with struct"
# )

View File

@ -3,21 +3,20 @@ CFLAGS := -emit-llvm -target bpf -c
SRC := $(wildcard *.bpf.c) SRC := $(wildcard *.bpf.c)
LL := $(SRC:.bpf.c=.bpf.ll) LL := $(SRC:.bpf.c=.bpf.ll)
LL2 := $(SRC:.bpf.c=.bpf.o2.ll)
OBJ := $(SRC:.bpf.c=.bpf.o) OBJ := $(SRC:.bpf.c=.bpf.o)
LL0 := $(SRC:.bpf.c=.bpf.o0.ll)
.PHONY: all clean .PHONY: all clean
all: $(LL) $(OBJ) $(LL2) all: $(LL) $(OBJ) $(LL0)
%.bpf.o: %.bpf.c %.bpf.o: %.bpf.c
$(BPF_CLANG) -O2 -g -target bpf -c $< -o $@ $(BPF_CLANG) -O2 -g -target bpf -c $< -o $@
%.bpf.ll: %.bpf.c %.bpf.ll: %.bpf.c
$(BPF_CLANG) -O0 $(CFLAGS) -g -S $< -o $@ $(BPF_CLANG) $(CFLAGS) -O2 -g -S $< -o $@
%.bpf.o2.ll: %.bpf.c %.bpf.o0.ll: %.bpf.c
$(BPF_CLANG) -O2 $(CFLAGS) -g -S $< -o $@ $(BPF_CLANG) $(CFLAGS) -O0 -g -S $< -o $@
clean: clean:
rm -f $(LL) $(OBJ) $(LL2) rm -f $(LL) $(OBJ) $(LL0)

View File

@ -0,0 +1,15 @@
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
char LICENSE[] SEC("license") = "GPL";
SEC("kprobe/blk_mq_start_request")
int example(struct pt_regs *ctx)
{
struct request *req = (struct request *)(ctx->di);
u32 data_len = req->__data_len;
bpf_printk("data length %u\n", data_len);
return 0;
}

View File

@ -0,0 +1,22 @@
from vmlinux import XDP_PASS
from pythonbpf import bpf, section, bpfglobal, compile_to_ir
import logging
from ctypes import c_int64, c_void_p
@bpf
@section("kprobe/blk_mq_start_request")
def example(ctx: c_void_p) -> c_int64:
d = XDP_PASS # This gives an error, but
e = XDP_PASS + 0 # this does not
print(f"test1 {e} test2 {d}")
return c_int64(0)
@bpf
@bpfglobal
def LICENSE() -> str:
return "GPL"
compile_to_ir("assignment_handling.py", "assignment_handling.ll", loglevel=logging.INFO)

View File

@ -0,0 +1,22 @@
from vmlinux import struct_request, struct_pt_regs
from pythonbpf import bpf, section, bpfglobal, compile_to_ir
import logging
from ctypes import c_int64
@bpf
@section("kprobe/blk_mq_start_request")
def example(ctx: struct_pt_regs) -> c_int64:
req = struct_request(ctx.di)
c = req.__data_len
print(f"data length {c}")
return c_int64(0)
@bpf
@bpfglobal
def LICENSE() -> str:
return "GPL"
compile_to_ir("requests.py", "requests.ll", loglevel=logging.INFO)

View File

@ -0,0 +1,21 @@
from vmlinux import struct_pt_regs
from pythonbpf import bpf, section, bpfglobal, compile_to_ir
import logging
from ctypes import c_int64
@bpf
@section("kprobe/blk_mq_start_request")
def example(ctx: struct_pt_regs) -> c_int64:
req = ctx.di
print(f"data length {req}")
return c_int64(0)
@bpf
@bpfglobal
def LICENSE() -> str:
return "GPL"
compile_to_ir("requests2.py", "requests2.ll", loglevel=logging.INFO)