3 Commits

Author SHA1 Message Date
4905649700 feat:non struct field values can be cast 2025-11-26 14:18:40 +05:30
7b7b00dbe7 add disksnoop ipynb 2025-11-25 22:51:24 +05:30
102e4ca78c add disksnoop example 2025-11-24 22:50:39 +05:30
7 changed files with 3918 additions and 8 deletions

3694
BCC-Examples/disksnoop.ipynb Normal file

File diff suppressed because it is too large Load Diff

61
BCC-Examples/disksnoop.py Normal file
View File

@ -0,0 +1,61 @@
from vmlinux import struct_request, struct_pt_regs
from pythonbpf import bpf, section, bpfglobal, compile_to_ir, compile, map
from pythonbpf.helper import ktime
from pythonbpf.maps import HashMap
import logging
from ctypes import c_int64, c_uint64, c_int32
# Constants
REQ_WRITE = 1 # from include/linux/blk_types.h
@bpf
@map
def start() -> HashMap:
return HashMap(key=c_uint64, value=c_uint64, max_entries=10240)
@bpf
@section("kprobe/blk_mq_end_request")
def trace_completion(ctx: struct_pt_regs) -> c_int64:
# Get request pointer from first argument
req_ptr = ctx.di
req = struct_request(ctx.di)
# Print: data_len, cmd_flags, latency_us
data_len = req.__data_len
cmd_flags = req.cmd_flags
# Lookup start timestamp
req_tsp = start.lookup(req_ptr)
if req_tsp:
# Calculate delta in nanoseconds
delta = ktime() - req_tsp
# Convert to microseconds for printing
delta_us = delta // 1000
print(f"{data_len} {cmd_flags:x} {delta_us}\n")
# Delete the entry
start.delete(req_ptr)
return c_int64(0)
@bpf
@section("kprobe/blk_mq_start_request")
def trace_start(ctx1: struct_pt_regs) -> c_int32:
req = ctx1.di
ts = ktime()
start.update(req, ts)
return c_int32(0)
@bpf
@bpfglobal
def LICENSE() -> str:
return "GPL"
if __name__ == "__main__":
compile_to_ir("disksnoop.py", "disksnoop.ll", loglevel=logging.INFO)
compile()

View File

@ -666,13 +666,17 @@ def _handle_vmlinux_cast(
# Cast the integer/value to a pointer to the struct
# If arg_val is an integer type, we need to inttoptr it
ptr_type = ir.PointerType()
# TODO: add a integer check here later
if ctypes_to_ir(arg_type.type.__name__):
# Cast integer to pointer
casted_ptr = builder.inttoptr(arg_val, ptr_type)
# TODO: add a field value type check here
print(arg_type)
if isinstance(arg_type, Field):
if ctypes_to_ir(arg_type.type.__name__):
# Cast integer to pointer
casted_ptr = builder.inttoptr(arg_val, ptr_type)
else:
logger.error(f"Unsupported type for vmlinux cast: {arg_type}")
return None
else:
logger.error(f"Unsupported type for vmlinux cast: {arg_type}")
return None
casted_ptr = builder.inttoptr(arg_val, ptr_type)
return casted_ptr, vmlinux_struct_type

View File

@ -18,6 +18,8 @@ mapping = {
"c_longlong": ir.IntType(64),
"c_uint": ir.IntType(32),
"c_int": ir.IntType(32),
"c_ushort": ir.IntType(16),
"c_short": ir.IntType(16),
# Not so sure about this one
"str": ir.PointerType(ir.IntType(8)),
}

View File

@ -121,7 +121,12 @@ class VmlinuxHandler:
# Use bpf_probe_read_kernel for non-context struct field access
field_value = self.load_struct_field(
builder, struct_ptr, globvar_ir, field_data, struct_name
builder,
struct_ptr,
globvar_ir,
field_data,
struct_name,
local_sym_tab,
)
# Return field value and field type
return field_value, field_data
@ -130,7 +135,12 @@ class VmlinuxHandler:
@staticmethod
def load_struct_field(
builder, struct_ptr_int, offset_global, field_data, struct_name=None
builder,
struct_ptr_int,
offset_global,
field_data,
struct_name=None,
local_sym_tab=None,
):
"""
Generate LLVM IR to load a field from a regular (non-context) struct using bpf_probe_read_kernel.
@ -204,6 +214,7 @@ class VmlinuxHandler:
logger.warning("Complex vmlinux field type, using default 64 bits")
# Allocate local storage for the field value
# TODO: CRITICAL BUG. alloca cannot be used anywhere other than the basic block
local_storage = builder.alloca(ir.IntType(int_width))
local_storage_i8_ptr = builder.bitcast(local_storage, i8_ptr_type)

View File

@ -0,0 +1,66 @@
// disksnoop.bpf.c
// eBPF program (compile with: clang -O2 -g -target bpf -c disksnoop.bpf.c -o disksnoop.bpf.o)
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_core_read.h>
char LICENSE[] SEC("license") = "GPL";
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__type(key, __u64);
__type(value, __u64);
__uint(max_entries, 10240);
} start_map SEC(".maps");
/* kprobe: record start timestamp keyed by request pointer */
SEC("kprobe/blk_mq_start_request")
int trace_start(struct pt_regs *ctx)
{
/* request * is first arg */
__u64 reqp = (__u64)(ctx->di);
__u64 ts = bpf_ktime_get_ns();
bpf_map_update_elem(&start_map, &reqp, &ts, BPF_ANY);
// /* optional debug:
bpf_printk("start: req=%llu ts=%llu\n", reqp, ts);
// */
return 0;
}
/* completion: compute latency and print data_len, cmd_flags, latency_us */
SEC("kprobe/blk_mq_end_request")
int trace_completion(struct pt_regs *ctx)
{
__u64 reqp = (__u64)(ctx->di);
__u64 *tsp;
__u64 now_ns;
__u64 delta_ns;
__u64 delta_us = 0;
bpf_printk("%lld", reqp);
tsp = bpf_map_lookup_elem(&start_map, &reqp);
if (!tsp)
return 0;
now_ns = bpf_ktime_get_ns();
delta_ns = now_ns - *tsp;
delta_us = delta_ns / 1000;
/* read request fields using CO-RE; needs vmlinux.h/BTF */
__u32 data_len = 0;
__u32 cmd_flags = 0;
/* __data_len is usually a 32/64-bit; use CORE read to be safe */
data_len = ( __u32 ) BPF_CORE_READ((struct request *)reqp, __data_len);
cmd_flags = ( __u32 ) BPF_CORE_READ((struct request *)reqp, cmd_flags);
/* print: "<bytes> <flags_hex> <latency_us>" */
bpf_printk("%u %x %llu\n", data_len, cmd_flags, delta_us);
/* remove from map */
bpf_map_delete_elem(&start_map, &reqp);
return 0;
}

View File

@ -0,0 +1,72 @@
// xdp_ip_map.c
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
#include <linux/if_ether.h>
#include <linux/ip.h>
struct ip_key {
__u8 family; // 4 = IPv4
__u8 pad[3]; // padding for alignment
__u8 addr[16]; // IPv4 uses first 4 bytes
};
// key → packet count
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(max_entries, 16384);
__type(key, struct ip_key);
__type(value, __u64);
} ip_count_map SEC(".maps");
SEC("xdp")
int xdp_ip_map(struct xdp_md *ctx)
{
void *data_end = (void *)(long)ctx->data_end;
void *data = (void *)(long)ctx->data;
struct ethhdr *eth = data;
if (eth + 1 > (struct ethhdr *)data_end)
return XDP_PASS;
__u16 h_proto = eth->h_proto;
void *nh = data + sizeof(*eth);
// VLAN handling: single tag
if (h_proto == bpf_htons(ETH_P_8021Q) ||
h_proto == bpf_htons(ETH_P_8021AD)) {
if (nh + 4 > data_end)
return XDP_PASS;
h_proto = *(__u16 *)(nh + 2);
nh += 4;
}
struct ip_key key = {};
// IPv4
if (h_proto == bpf_htons(ETH_P_IP)) {
struct iphdr *iph = nh;
if (iph + 1 > (struct iphdr *)data_end)
return XDP_PASS;
key.family = 4;
// Copy 4 bytes of IPv4 address
__builtin_memcpy(key.addr, &iph->saddr, 4);
__u64 *val = bpf_map_lookup_elem(&ip_count_map, &key);
if (val)
(*val)++;
else {
__u64 init = 1;
bpf_map_update_elem(&ip_count_map, &key, &init, BPF_ANY);
}
return XDP_PASS;
}
return XDP_PASS;
}
char _license[] SEC("license") = "GPL";