mirror of
https://github.com/varun-r-mallya/Python-BPF.git
synced 2025-12-31 21:06:25 +00:00
Compare commits
77 Commits
copilot/fi
...
v0.1.8
| Author | SHA1 | Date | |
|---|---|---|---|
| 049ec55e85 | |||
| 77901accf2 | |||
| 0616a2fccb | |||
| 526425a267 | |||
| 466ecdb6a4 | |||
| 752a10fa5f | |||
| 3602b502f4 | |||
| 808db2722d | |||
| 99fc5d75cc | |||
| c91e69e2f7 | |||
| dc995a1448 | |||
| 0fd6bea211 | |||
| 01d234ac86 | |||
| c97efb2570 | |||
| 76c982e15e | |||
| 2543826e85 | |||
| 650744f843 | |||
| d73c793989 | |||
| bbe4990878 | |||
| 600993f626 | |||
| 6c55d56ef0 | |||
| 704b0d8cd3 | |||
| 0e50079d88 | |||
| d457f87410 | |||
| 4ea02745b3 | |||
| 84edddb685 | |||
| 6f017a9176 | |||
| 24e5829b80 | |||
| 2daedc5882 | |||
| 14af7ec4dd | |||
| 536ea4855e | |||
| 5ba29db362 | |||
| 0ca835079d | |||
| de8c486461 | |||
| f135cdbcc0 | |||
| a8595ff1d2 | |||
| d43d3ad637 | |||
| 9becee8f77 | |||
| 189526d5ca | |||
| 1593b7bcfe | |||
| 127852ee9f | |||
| 4905649700 | |||
| 7b7b00dbe7 | |||
| 102e4ca78c | |||
| 2fd4fefbcc | |||
| 016fd5de5c | |||
| 8ad5fb8a3a | |||
| bf9635e324 | |||
| cbe365d760 | |||
| fed6af1ed6 | |||
| 18886816fb | |||
| a2de15fb1e | |||
| 9def969592 | |||
| 081ee5cb4c | |||
| a91c3158ad | |||
| 2b3635fe20 | |||
| 6f25c554a9 | |||
| 84507b8b98 | |||
| a42a75179d | |||
| 377fa4041d | |||
| 99321c7669 | |||
| 11850d16d3 | |||
| 9ee821c7f6 | |||
| 25394059a6 | |||
| fde8eab775 | |||
| 42b8865a56 | |||
| 144d9b0ab4 | |||
| 902a52a07d | |||
| 306570953b | |||
| 740eed45e1 | |||
| c8801f4c3e | |||
| 49740598ea | |||
| 73bbf00e7c | |||
| f7dee329cb | |||
| 5031f90377 | |||
| 95a624044a | |||
| c5bef26b88 |
2
.github/workflows/format.yml
vendored
2
.github/workflows/format.yml
vendored
@ -12,7 +12,7 @@ jobs:
|
|||||||
name: Format
|
name: Format
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v5
|
- uses: actions/checkout@v6
|
||||||
- uses: actions/setup-python@v6
|
- uses: actions/setup-python@v6
|
||||||
with:
|
with:
|
||||||
python-version: "3.x"
|
python-version: "3.x"
|
||||||
|
|||||||
2
.github/workflows/python-publish.yml
vendored
2
.github/workflows/python-publish.yml
vendored
@ -20,7 +20,7 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v5
|
- uses: actions/checkout@v6
|
||||||
|
|
||||||
- uses: actions/setup-python@v6
|
- uses: actions/setup-python@v6
|
||||||
with:
|
with:
|
||||||
|
|||||||
@ -7,14 +7,25 @@ This folder contains examples of BCC tutorial examples that have been ported to
|
|||||||
- You will also need `matplotlib` for vfsreadlat.py example.
|
- You will also need `matplotlib` for vfsreadlat.py example.
|
||||||
- You will also need `rich` for vfsreadlat_rich.py example.
|
- You will also need `rich` for vfsreadlat_rich.py example.
|
||||||
- You will also need `plotly` and `dash` for vfsreadlat_plotly.py example.
|
- You will also need `plotly` and `dash` for vfsreadlat_plotly.py example.
|
||||||
|
- All of these are added to `requirements.txt` file. You can install them using the following command:
|
||||||
|
```bash
|
||||||
|
pip install -r requirements.txt
|
||||||
|
```
|
||||||
|
|
||||||
## Usage
|
## Usage
|
||||||
- You'll need root privileges to run these examples. If you are using a virtualenv, use the following command to run the scripts:
|
- You'll need root privileges to run these examples. If you are using a virtualenv, use the following command to run the scripts:
|
||||||
```bash
|
```bash
|
||||||
sudo <path_to_virtualenv>/bin/python3 <script_name>.py
|
sudo <path_to_virtualenv>/bin/python3 <script_name>.py
|
||||||
```
|
```
|
||||||
|
- For the disksnoop and container-monitor examples, you need to generate the vmlinux.py file first. Follow the instructions in the [main README](https://github.com/pythonbpf/Python-BPF/tree/master?tab=readme-ov-file#first-generate-the-vmlinuxpy-file-for-your-kernel) to generate the vmlinux.py file.
|
||||||
- For vfsreadlat_plotly.py, run the following command to start the Dash server:
|
- For vfsreadlat_plotly.py, run the following command to start the Dash server:
|
||||||
```bash
|
```bash
|
||||||
sudo <path_to_virtualenv>/bin/python3 vfsreadlat_plotly/bpf_program.py
|
sudo <path_to_virtualenv>/bin/python3 vfsreadlat_plotly/bpf_program.py
|
||||||
```
|
```
|
||||||
Then open your web browser and navigate to the given URL.
|
Then open your web browser and navigate to the given URL.
|
||||||
|
- For container-monitor, you need to first copy the vmlinux.py to `container-monitor/` directory.
|
||||||
|
Then run the following command to run the example:
|
||||||
|
```bash
|
||||||
|
cp vmlinux.py container-monitor/
|
||||||
|
sudo <path_to_virtualenv>/bin/python3 container-monitor/container_monitor.py
|
||||||
|
```
|
||||||
|
|||||||
49
BCC-Examples/container-monitor/README.md
Normal file
49
BCC-Examples/container-monitor/README.md
Normal file
@ -0,0 +1,49 @@
|
|||||||
|
# Container Monitor TUI
|
||||||
|
|
||||||
|
A beautiful terminal-based container monitoring tool that combines syscall tracking, file I/O monitoring, and network traffic analysis using eBPF.
|
||||||
|
|
||||||
|
## Features
|
||||||
|
|
||||||
|
- 🎯 **Interactive Cgroup Selection** - Navigate and select cgroups with arrow keys
|
||||||
|
- 📊 **Real-time Monitoring** - Live graphs and statistics
|
||||||
|
- 🔥 **Syscall Tracking** - Total syscall count per cgroup
|
||||||
|
- 💾 **File I/O Monitoring** - Read/write operations and bytes with graphs
|
||||||
|
- 🌐 **Network Traffic** - RX/TX packets and bytes with live graphs
|
||||||
|
- ⚡ **Efficient Caching** - Reduced /proc lookups for better performance
|
||||||
|
- 🎨 **Beautiful TUI** - Clean, colorful terminal interface
|
||||||
|
|
||||||
|
## Requirements
|
||||||
|
|
||||||
|
- Python 3.7+
|
||||||
|
- pythonbpf
|
||||||
|
- Root privileges (for eBPF)
|
||||||
|
|
||||||
|
## Installation
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Ensure you have pythonbpf installed
|
||||||
|
pip install pythonbpf
|
||||||
|
|
||||||
|
# Run the monitor
|
||||||
|
sudo $(which python) container_monitor.py
|
||||||
|
```
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
1. **Selection Screen**: Use ↑↓ arrow keys to navigate through cgroups, press ENTER to select
|
||||||
|
2. **Monitoring Screen**: View real-time graphs and statistics, press ESC or 'b' to go back
|
||||||
|
3. **Exit**: Press 'q' at any time to quit
|
||||||
|
|
||||||
|
## Architecture
|
||||||
|
|
||||||
|
- `container_monitor.py` - Main BPF program combining all three tracers
|
||||||
|
- `data_collector.py` - Data collection, caching, and history management
|
||||||
|
- `tui. py` - Terminal user interface with selection and monitoring screens
|
||||||
|
|
||||||
|
## BPF Programs
|
||||||
|
|
||||||
|
- **vfs_read/vfs_write** - Track file I/O operations
|
||||||
|
- **__netif_receive_skb/__dev_queue_xmit** - Track network traffic
|
||||||
|
- **raw_syscalls/sys_enter** - Count all syscalls
|
||||||
|
|
||||||
|
All programs filter by cgroup ID for per-container monitoring.
|
||||||
220
BCC-Examples/container-monitor/container_monitor.py
Normal file
220
BCC-Examples/container-monitor/container_monitor.py
Normal file
@ -0,0 +1,220 @@
|
|||||||
|
"""Container Monitor - TUI-based cgroup monitoring combining syscall, file I/O, and network tracking."""
|
||||||
|
|
||||||
|
from pythonbpf import bpf, map, section, bpfglobal, struct, BPF
|
||||||
|
from pythonbpf.maps import HashMap
|
||||||
|
from pythonbpf.helper import get_current_cgroup_id
|
||||||
|
from ctypes import c_int32, c_uint64, c_void_p
|
||||||
|
from vmlinux import struct_pt_regs, struct_sk_buff
|
||||||
|
|
||||||
|
from data_collection import ContainerDataCollector
|
||||||
|
from tui import ContainerMonitorTUI
|
||||||
|
|
||||||
|
|
||||||
|
# ==================== BPF Structs ====================
|
||||||
|
|
||||||
|
|
||||||
|
@bpf
|
||||||
|
@struct
|
||||||
|
class read_stats:
|
||||||
|
bytes: c_uint64
|
||||||
|
ops: c_uint64
|
||||||
|
|
||||||
|
|
||||||
|
@bpf
|
||||||
|
@struct
|
||||||
|
class write_stats:
|
||||||
|
bytes: c_uint64
|
||||||
|
ops: c_uint64
|
||||||
|
|
||||||
|
|
||||||
|
@bpf
|
||||||
|
@struct
|
||||||
|
class net_stats:
|
||||||
|
rx_packets: c_uint64
|
||||||
|
tx_packets: c_uint64
|
||||||
|
rx_bytes: c_uint64
|
||||||
|
tx_bytes: c_uint64
|
||||||
|
|
||||||
|
|
||||||
|
# ==================== BPF Maps ====================
|
||||||
|
|
||||||
|
|
||||||
|
@bpf
|
||||||
|
@map
|
||||||
|
def read_map() -> HashMap:
|
||||||
|
return HashMap(key=c_uint64, value=read_stats, max_entries=1024)
|
||||||
|
|
||||||
|
|
||||||
|
@bpf
|
||||||
|
@map
|
||||||
|
def write_map() -> HashMap:
|
||||||
|
return HashMap(key=c_uint64, value=write_stats, max_entries=1024)
|
||||||
|
|
||||||
|
|
||||||
|
@bpf
|
||||||
|
@map
|
||||||
|
def net_stats_map() -> HashMap:
|
||||||
|
return HashMap(key=c_uint64, value=net_stats, max_entries=1024)
|
||||||
|
|
||||||
|
|
||||||
|
@bpf
|
||||||
|
@map
|
||||||
|
def syscall_count() -> HashMap:
|
||||||
|
return HashMap(key=c_uint64, value=c_uint64, max_entries=1024)
|
||||||
|
|
||||||
|
|
||||||
|
# ==================== File I/O Tracing ====================
|
||||||
|
|
||||||
|
|
||||||
|
@bpf
|
||||||
|
@section("kprobe/vfs_read")
|
||||||
|
def trace_read(ctx: struct_pt_regs) -> c_int32:
|
||||||
|
cg = get_current_cgroup_id()
|
||||||
|
count = c_uint64(ctx.dx)
|
||||||
|
ptr = read_map.lookup(cg)
|
||||||
|
if ptr:
|
||||||
|
s = read_stats()
|
||||||
|
s.bytes = ptr.bytes + count
|
||||||
|
s.ops = ptr.ops + 1
|
||||||
|
read_map.update(cg, s)
|
||||||
|
else:
|
||||||
|
s = read_stats()
|
||||||
|
s.bytes = count
|
||||||
|
s.ops = c_uint64(1)
|
||||||
|
read_map.update(cg, s)
|
||||||
|
|
||||||
|
return c_int32(0)
|
||||||
|
|
||||||
|
|
||||||
|
@bpf
|
||||||
|
@section("kprobe/vfs_write")
|
||||||
|
def trace_write(ctx1: struct_pt_regs) -> c_int32:
|
||||||
|
cg = get_current_cgroup_id()
|
||||||
|
count = c_uint64(ctx1.dx)
|
||||||
|
ptr = write_map.lookup(cg)
|
||||||
|
|
||||||
|
if ptr:
|
||||||
|
s = write_stats()
|
||||||
|
s.bytes = ptr.bytes + count
|
||||||
|
s.ops = ptr.ops + 1
|
||||||
|
write_map.update(cg, s)
|
||||||
|
else:
|
||||||
|
s = write_stats()
|
||||||
|
s.bytes = count
|
||||||
|
s.ops = c_uint64(1)
|
||||||
|
write_map.update(cg, s)
|
||||||
|
|
||||||
|
return c_int32(0)
|
||||||
|
|
||||||
|
|
||||||
|
# ==================== Network I/O Tracing ====================
|
||||||
|
|
||||||
|
|
||||||
|
@bpf
|
||||||
|
@section("kprobe/__netif_receive_skb")
|
||||||
|
def trace_netif_rx(ctx2: struct_pt_regs) -> c_int32:
|
||||||
|
cgroup_id = get_current_cgroup_id()
|
||||||
|
skb = struct_sk_buff(ctx2.di)
|
||||||
|
pkt_len = c_uint64(skb.len)
|
||||||
|
|
||||||
|
stats_ptr = net_stats_map.lookup(cgroup_id)
|
||||||
|
|
||||||
|
if stats_ptr:
|
||||||
|
stats = net_stats()
|
||||||
|
stats.rx_packets = stats_ptr.rx_packets + 1
|
||||||
|
stats.tx_packets = stats_ptr.tx_packets
|
||||||
|
stats.rx_bytes = stats_ptr.rx_bytes + pkt_len
|
||||||
|
stats.tx_bytes = stats_ptr.tx_bytes
|
||||||
|
net_stats_map.update(cgroup_id, stats)
|
||||||
|
else:
|
||||||
|
stats = net_stats()
|
||||||
|
stats.rx_packets = c_uint64(1)
|
||||||
|
stats.tx_packets = c_uint64(0)
|
||||||
|
stats.rx_bytes = pkt_len
|
||||||
|
stats.tx_bytes = c_uint64(0)
|
||||||
|
net_stats_map.update(cgroup_id, stats)
|
||||||
|
|
||||||
|
return c_int32(0)
|
||||||
|
|
||||||
|
|
||||||
|
@bpf
|
||||||
|
@section("kprobe/__dev_queue_xmit")
|
||||||
|
def trace_dev_xmit(ctx3: struct_pt_regs) -> c_int32:
|
||||||
|
cgroup_id = get_current_cgroup_id()
|
||||||
|
skb = struct_sk_buff(ctx3.di)
|
||||||
|
pkt_len = c_uint64(skb.len)
|
||||||
|
|
||||||
|
stats_ptr = net_stats_map.lookup(cgroup_id)
|
||||||
|
|
||||||
|
if stats_ptr:
|
||||||
|
stats = net_stats()
|
||||||
|
stats.rx_packets = stats_ptr.rx_packets
|
||||||
|
stats.tx_packets = stats_ptr.tx_packets + 1
|
||||||
|
stats.rx_bytes = stats_ptr.rx_bytes
|
||||||
|
stats.tx_bytes = stats_ptr.tx_bytes + pkt_len
|
||||||
|
net_stats_map.update(cgroup_id, stats)
|
||||||
|
else:
|
||||||
|
stats = net_stats()
|
||||||
|
stats.rx_packets = c_uint64(0)
|
||||||
|
stats.tx_packets = c_uint64(1)
|
||||||
|
stats.rx_bytes = c_uint64(0)
|
||||||
|
stats.tx_bytes = pkt_len
|
||||||
|
net_stats_map.update(cgroup_id, stats)
|
||||||
|
|
||||||
|
return c_int32(0)
|
||||||
|
|
||||||
|
|
||||||
|
# ==================== Syscall Tracing ====================
|
||||||
|
|
||||||
|
|
||||||
|
@bpf
|
||||||
|
@section("tracepoint/raw_syscalls/sys_enter")
|
||||||
|
def count_syscalls(ctx: c_void_p) -> c_int32:
|
||||||
|
cgroup_id = get_current_cgroup_id()
|
||||||
|
count_ptr = syscall_count.lookup(cgroup_id)
|
||||||
|
|
||||||
|
if count_ptr:
|
||||||
|
new_count = count_ptr + c_uint64(1)
|
||||||
|
syscall_count.update(cgroup_id, new_count)
|
||||||
|
else:
|
||||||
|
syscall_count.update(cgroup_id, c_uint64(1))
|
||||||
|
|
||||||
|
return c_int32(0)
|
||||||
|
|
||||||
|
|
||||||
|
@bpf
|
||||||
|
@bpfglobal
|
||||||
|
def LICENSE() -> str:
|
||||||
|
return "GPL"
|
||||||
|
|
||||||
|
|
||||||
|
# ==================== Main ====================
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
print("🔥 Loading BPF programs...")
|
||||||
|
|
||||||
|
# Load and attach BPF program
|
||||||
|
b = BPF()
|
||||||
|
b.load()
|
||||||
|
b.attach_all()
|
||||||
|
|
||||||
|
# Get map references and enable struct deserialization
|
||||||
|
read_map_ref = b["read_map"]
|
||||||
|
write_map_ref = b["write_map"]
|
||||||
|
net_stats_map_ref = b["net_stats_map"]
|
||||||
|
syscall_count_ref = b["syscall_count"]
|
||||||
|
|
||||||
|
read_map_ref.set_value_struct("read_stats")
|
||||||
|
write_map_ref.set_value_struct("write_stats")
|
||||||
|
net_stats_map_ref.set_value_struct("net_stats")
|
||||||
|
|
||||||
|
print("✅ BPF programs loaded and attached")
|
||||||
|
|
||||||
|
# Setup data collector
|
||||||
|
collector = ContainerDataCollector(
|
||||||
|
read_map_ref, write_map_ref, net_stats_map_ref, syscall_count_ref
|
||||||
|
)
|
||||||
|
|
||||||
|
# Create and run TUI
|
||||||
|
tui = ContainerMonitorTUI(collector)
|
||||||
|
tui.run()
|
||||||
208
BCC-Examples/container-monitor/data_collection.py
Normal file
208
BCC-Examples/container-monitor/data_collection.py
Normal file
@ -0,0 +1,208 @@
|
|||||||
|
"""Data collection and management for container monitoring."""
|
||||||
|
|
||||||
|
import os
|
||||||
|
import time
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Dict, List, Set, Optional
|
||||||
|
from dataclasses import dataclass
|
||||||
|
from collections import deque, defaultdict
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class CgroupInfo:
|
||||||
|
"""Information about a cgroup."""
|
||||||
|
|
||||||
|
id: int
|
||||||
|
name: str
|
||||||
|
path: str
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class ContainerStats:
|
||||||
|
"""Statistics for a container/cgroup."""
|
||||||
|
|
||||||
|
cgroup_id: int
|
||||||
|
cgroup_name: str
|
||||||
|
|
||||||
|
# File I/O
|
||||||
|
read_ops: int = 0
|
||||||
|
read_bytes: int = 0
|
||||||
|
write_ops: int = 0
|
||||||
|
write_bytes: int = 0
|
||||||
|
|
||||||
|
# Network I/O
|
||||||
|
rx_packets: int = 0
|
||||||
|
rx_bytes: int = 0
|
||||||
|
tx_packets: int = 0
|
||||||
|
tx_bytes: int = 0
|
||||||
|
|
||||||
|
# Syscalls
|
||||||
|
syscall_count: int = 0
|
||||||
|
|
||||||
|
# Timestamp
|
||||||
|
timestamp: float = 0.0
|
||||||
|
|
||||||
|
|
||||||
|
class ContainerDataCollector:
|
||||||
|
"""Collects and manages container monitoring data from BPF."""
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self, read_map, write_map, net_stats_map, syscall_map, history_size: int = 100
|
||||||
|
):
|
||||||
|
self.read_map = read_map
|
||||||
|
self.write_map = write_map
|
||||||
|
self.net_stats_map = net_stats_map
|
||||||
|
self.syscall_map = syscall_map
|
||||||
|
|
||||||
|
# Caching
|
||||||
|
self._cgroup_cache: Dict[int, CgroupInfo] = {}
|
||||||
|
self._cgroup_cache_time = 0
|
||||||
|
self._cache_ttl = 5.0
|
||||||
|
0 # Refresh cache every 5 seconds
|
||||||
|
|
||||||
|
# Historical data for graphing
|
||||||
|
self._history_size = history_size
|
||||||
|
self._history: Dict[int, deque] = defaultdict(
|
||||||
|
lambda: deque(maxlen=history_size)
|
||||||
|
)
|
||||||
|
|
||||||
|
def get_all_cgroups(self) -> List[CgroupInfo]:
|
||||||
|
"""Get all cgroups with caching."""
|
||||||
|
current_time = time.time()
|
||||||
|
|
||||||
|
# Use cached data if still valid
|
||||||
|
if current_time - self._cgroup_cache_time < self._cache_ttl:
|
||||||
|
return list(self._cgroup_cache.values())
|
||||||
|
|
||||||
|
# Refresh cache
|
||||||
|
self._refresh_cgroup_cache()
|
||||||
|
return list(self._cgroup_cache.values())
|
||||||
|
|
||||||
|
def _refresh_cgroup_cache(self):
|
||||||
|
"""Refresh the cgroup cache from /proc."""
|
||||||
|
cgroup_map: Dict[int, Set[str]] = defaultdict(set)
|
||||||
|
|
||||||
|
# Scan /proc to find all cgroups
|
||||||
|
for proc_dir in Path("/proc").glob("[0-9]*"):
|
||||||
|
try:
|
||||||
|
cgroup_file = proc_dir / "cgroup"
|
||||||
|
if not cgroup_file.exists():
|
||||||
|
continue
|
||||||
|
|
||||||
|
with open(cgroup_file) as f:
|
||||||
|
for line in f:
|
||||||
|
parts = line.strip().split(":")
|
||||||
|
if len(parts) >= 3:
|
||||||
|
cgroup_path = parts[2]
|
||||||
|
cgroup_mount = f"/sys/fs/cgroup{cgroup_path}"
|
||||||
|
|
||||||
|
if os.path.exists(cgroup_mount):
|
||||||
|
stat_info = os.stat(cgroup_mount)
|
||||||
|
cgroup_id = stat_info.st_ino
|
||||||
|
cgroup_map[cgroup_id].add(cgroup_path)
|
||||||
|
|
||||||
|
except (PermissionError, FileNotFoundError, OSError):
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Update cache with best names
|
||||||
|
new_cache = {}
|
||||||
|
for cgroup_id, paths in cgroup_map.items():
|
||||||
|
# Pick the most descriptive path
|
||||||
|
best_path = self._get_best_cgroup_path(paths)
|
||||||
|
name = self._get_cgroup_name(best_path)
|
||||||
|
|
||||||
|
new_cache[cgroup_id] = CgroupInfo(id=cgroup_id, name=name, path=best_path)
|
||||||
|
|
||||||
|
self._cgroup_cache = new_cache
|
||||||
|
self._cgroup_cache_time = time.time()
|
||||||
|
|
||||||
|
def _get_best_cgroup_path(self, paths: Set[str]) -> str:
|
||||||
|
"""Select the most descriptive cgroup path."""
|
||||||
|
path_list = list(paths)
|
||||||
|
|
||||||
|
# Prefer paths with more components (more specific)
|
||||||
|
# Prefer paths containing docker, podman, etc.
|
||||||
|
for keyword in ["docker", "podman", "kubernetes", "k8s", "systemd"]:
|
||||||
|
for path in path_list:
|
||||||
|
if keyword in path.lower():
|
||||||
|
return path
|
||||||
|
|
||||||
|
# Return longest path (most specific)
|
||||||
|
return max(path_list, key=lambda p: (len(p.split("/")), len(p)))
|
||||||
|
|
||||||
|
def _get_cgroup_name(self, path: str) -> str:
|
||||||
|
"""Extract a friendly name from cgroup path."""
|
||||||
|
if not path or path == "/":
|
||||||
|
return "root"
|
||||||
|
|
||||||
|
# Remove leading/trailing slashes
|
||||||
|
path = path.strip("/")
|
||||||
|
|
||||||
|
# Try to extract container ID or service name
|
||||||
|
parts = path.split("/")
|
||||||
|
|
||||||
|
# For Docker: /docker/<container_id>
|
||||||
|
if "docker" in path.lower():
|
||||||
|
for i, part in enumerate(parts):
|
||||||
|
if part.lower() == "docker" and i + 1 < len(parts):
|
||||||
|
container_id = parts[i + 1][:12] # Short ID
|
||||||
|
return f"docker:{container_id}"
|
||||||
|
|
||||||
|
# For systemd services
|
||||||
|
if "system.slice" in path:
|
||||||
|
for part in parts:
|
||||||
|
if part.endswith(".service"):
|
||||||
|
return part.replace(".service", "")
|
||||||
|
|
||||||
|
# For user slices
|
||||||
|
if "user.slice" in path:
|
||||||
|
return f"user:{parts[-1]}" if parts else "user"
|
||||||
|
|
||||||
|
# Default: use last component
|
||||||
|
return parts[-1] if parts else path
|
||||||
|
|
||||||
|
def get_stats_for_cgroup(self, cgroup_id: int) -> ContainerStats:
|
||||||
|
"""Get current statistics for a specific cgroup."""
|
||||||
|
cgroup_info = self._cgroup_cache.get(cgroup_id)
|
||||||
|
cgroup_name = cgroup_info.name if cgroup_info else f"cgroup-{cgroup_id}"
|
||||||
|
|
||||||
|
stats = ContainerStats(
|
||||||
|
cgroup_id=cgroup_id, cgroup_name=cgroup_name, timestamp=time.time()
|
||||||
|
)
|
||||||
|
|
||||||
|
# Get file I/O stats
|
||||||
|
read_stat = self.read_map.lookup(cgroup_id)
|
||||||
|
if read_stat:
|
||||||
|
stats.read_ops = int(read_stat.ops)
|
||||||
|
stats.read_bytes = int(read_stat.bytes)
|
||||||
|
|
||||||
|
write_stat = self.write_map.lookup(cgroup_id)
|
||||||
|
if write_stat:
|
||||||
|
stats.write_ops = int(write_stat.ops)
|
||||||
|
stats.write_bytes = int(write_stat.bytes)
|
||||||
|
|
||||||
|
# Get network stats
|
||||||
|
net_stat = self.net_stats_map.lookup(cgroup_id)
|
||||||
|
if net_stat:
|
||||||
|
stats.rx_packets = int(net_stat.rx_packets)
|
||||||
|
stats.rx_bytes = int(net_stat.rx_bytes)
|
||||||
|
stats.tx_packets = int(net_stat.tx_packets)
|
||||||
|
stats.tx_bytes = int(net_stat.tx_bytes)
|
||||||
|
|
||||||
|
# Get syscall count
|
||||||
|
syscall_cnt = self.syscall_map.lookup(cgroup_id)
|
||||||
|
if syscall_cnt is not None:
|
||||||
|
stats.syscall_count = int(syscall_cnt)
|
||||||
|
|
||||||
|
# Add to history
|
||||||
|
self._history[cgroup_id].append(stats)
|
||||||
|
|
||||||
|
return stats
|
||||||
|
|
||||||
|
def get_history(self, cgroup_id: int) -> List[ContainerStats]:
|
||||||
|
"""Get historical statistics for graphing."""
|
||||||
|
return list(self._history[cgroup_id])
|
||||||
|
|
||||||
|
def get_cgroup_info(self, cgroup_id: int) -> Optional[CgroupInfo]:
|
||||||
|
"""Get cached cgroup information."""
|
||||||
|
return self._cgroup_cache.get(cgroup_id)
|
||||||
752
BCC-Examples/container-monitor/tui.py
Normal file
752
BCC-Examples/container-monitor/tui.py
Normal file
@ -0,0 +1,752 @@
|
|||||||
|
"""Terminal User Interface for container monitoring."""
|
||||||
|
|
||||||
|
import time
|
||||||
|
import curses
|
||||||
|
import threading
|
||||||
|
from typing import Optional, List
|
||||||
|
from data_collection import ContainerDataCollector
|
||||||
|
from web_dashboard import WebDashboard
|
||||||
|
|
||||||
|
|
||||||
|
def _safe_addstr(stdscr, y: int, x: int, text: str, *args):
|
||||||
|
"""Safely add string to screen with bounds checking."""
|
||||||
|
try:
|
||||||
|
height, width = stdscr.getmaxyx()
|
||||||
|
if 0 <= y < height and 0 <= x < width:
|
||||||
|
# Truncate text to fit
|
||||||
|
max_len = width - x - 1
|
||||||
|
if max_len > 0:
|
||||||
|
stdscr.addstr(y, x, text[:max_len], *args)
|
||||||
|
except curses.error:
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def _draw_fancy_header(stdscr, title: str, subtitle: str):
|
||||||
|
"""Draw a fancy header with title and subtitle."""
|
||||||
|
height, width = stdscr.getmaxyx()
|
||||||
|
|
||||||
|
# Top border
|
||||||
|
_safe_addstr(stdscr, 0, 0, "═" * width, curses.color_pair(6) | curses.A_BOLD)
|
||||||
|
|
||||||
|
# Title
|
||||||
|
_safe_addstr(
|
||||||
|
stdscr,
|
||||||
|
0,
|
||||||
|
max(0, (width - len(title)) // 2),
|
||||||
|
f" {title} ",
|
||||||
|
curses.color_pair(6) | curses.A_BOLD,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Subtitle
|
||||||
|
_safe_addstr(
|
||||||
|
stdscr,
|
||||||
|
1,
|
||||||
|
max(0, (width - len(subtitle)) // 2),
|
||||||
|
subtitle,
|
||||||
|
curses.color_pair(1),
|
||||||
|
)
|
||||||
|
|
||||||
|
# Bottom border
|
||||||
|
_safe_addstr(stdscr, 2, 0, "═" * width, curses.color_pair(6))
|
||||||
|
|
||||||
|
|
||||||
|
def _draw_metric_box(
|
||||||
|
stdscr,
|
||||||
|
y: int,
|
||||||
|
x: int,
|
||||||
|
width: int,
|
||||||
|
label: str,
|
||||||
|
value: str,
|
||||||
|
detail: str,
|
||||||
|
color_pair: int,
|
||||||
|
):
|
||||||
|
"""Draw a fancy box for displaying a metric."""
|
||||||
|
height, _ = stdscr.getmaxyx()
|
||||||
|
|
||||||
|
if y + 4 >= height:
|
||||||
|
return
|
||||||
|
|
||||||
|
# Top border
|
||||||
|
_safe_addstr(
|
||||||
|
stdscr, y, x, "┌" + "─" * (width - 2) + "┐", color_pair | curses.A_BOLD
|
||||||
|
)
|
||||||
|
|
||||||
|
# Label
|
||||||
|
_safe_addstr(stdscr, y + 1, x, "│", color_pair | curses.A_BOLD)
|
||||||
|
_safe_addstr(stdscr, y + 1, x + 2, label, color_pair | curses.A_BOLD)
|
||||||
|
_safe_addstr(stdscr, y + 1, x + width - 1, "│", color_pair | curses.A_BOLD)
|
||||||
|
|
||||||
|
# Value
|
||||||
|
_safe_addstr(stdscr, y + 2, x, "│", color_pair | curses.A_BOLD)
|
||||||
|
_safe_addstr(stdscr, y + 2, x + 4, value, curses.color_pair(2) | curses.A_BOLD)
|
||||||
|
_safe_addstr(
|
||||||
|
stdscr,
|
||||||
|
y + 2,
|
||||||
|
min(x + width - len(detail) - 3, x + width - 2),
|
||||||
|
detail,
|
||||||
|
color_pair | curses.A_BOLD,
|
||||||
|
)
|
||||||
|
_safe_addstr(stdscr, y + 2, x + width - 1, "│", color_pair | curses.A_BOLD)
|
||||||
|
|
||||||
|
# Bottom border
|
||||||
|
_safe_addstr(
|
||||||
|
stdscr, y + 3, x, "└" + "─" * (width - 2) + "┘", color_pair | curses.A_BOLD
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def _draw_section_header(stdscr, y: int, title: str, color_pair: int):
|
||||||
|
"""Draw a section header."""
|
||||||
|
height, width = stdscr.getmaxyx()
|
||||||
|
|
||||||
|
if y >= height:
|
||||||
|
return
|
||||||
|
|
||||||
|
_safe_addstr(stdscr, y, 2, title, curses.color_pair(color_pair) | curses.A_BOLD)
|
||||||
|
_safe_addstr(
|
||||||
|
stdscr,
|
||||||
|
y,
|
||||||
|
len(title) + 3,
|
||||||
|
"─" * (width - len(title) - 5),
|
||||||
|
curses.color_pair(color_pair) | curses.A_BOLD,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def _calculate_rates(history: List) -> dict:
|
||||||
|
"""Calculate per-second rates from history."""
|
||||||
|
if len(history) < 2:
|
||||||
|
return {
|
||||||
|
"syscalls_per_sec": 0.0,
|
||||||
|
"rx_bytes_per_sec": 0.0,
|
||||||
|
"tx_bytes_per_sec": 0.0,
|
||||||
|
"rx_pkts_per_sec": 0.0,
|
||||||
|
"tx_pkts_per_sec": 0.0,
|
||||||
|
"read_bytes_per_sec": 0.0,
|
||||||
|
"write_bytes_per_sec": 0.0,
|
||||||
|
"read_ops_per_sec": 0.0,
|
||||||
|
"write_ops_per_sec": 0.0,
|
||||||
|
}
|
||||||
|
|
||||||
|
# Calculate delta between last two samples
|
||||||
|
recent = history[-1]
|
||||||
|
previous = history[-2]
|
||||||
|
time_delta = recent.timestamp - previous.timestamp
|
||||||
|
|
||||||
|
if time_delta <= 0:
|
||||||
|
time_delta = 1.0
|
||||||
|
|
||||||
|
return {
|
||||||
|
"syscalls_per_sec": (recent.syscall_count - previous.syscall_count)
|
||||||
|
/ time_delta,
|
||||||
|
"rx_bytes_per_sec": (recent.rx_bytes - previous.rx_bytes) / time_delta,
|
||||||
|
"tx_bytes_per_sec": (recent.tx_bytes - previous.tx_bytes) / time_delta,
|
||||||
|
"rx_pkts_per_sec": (recent.rx_packets - previous.rx_packets) / time_delta,
|
||||||
|
"tx_pkts_per_sec": (recent.tx_packets - previous.tx_packets) / time_delta,
|
||||||
|
"read_bytes_per_sec": (recent.read_bytes - previous.read_bytes) / time_delta,
|
||||||
|
"write_bytes_per_sec": (recent.write_bytes - previous.write_bytes) / time_delta,
|
||||||
|
"read_ops_per_sec": (recent.read_ops - previous.read_ops) / time_delta,
|
||||||
|
"write_ops_per_sec": (recent.write_ops - previous.write_ops) / time_delta,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def _format_bytes(bytes_val: float) -> str:
|
||||||
|
"""Format bytes into human-readable string."""
|
||||||
|
if bytes_val < 0:
|
||||||
|
bytes_val = 0
|
||||||
|
for unit in ["B", "KB", "MB", "GB", "TB"]:
|
||||||
|
if bytes_val < 1024.0:
|
||||||
|
return f"{bytes_val:.1f}{unit}"
|
||||||
|
bytes_val /= 1024.0
|
||||||
|
return f"{bytes_val:.1f}PB"
|
||||||
|
|
||||||
|
|
||||||
|
def _draw_bar_graph_enhanced(
|
||||||
|
stdscr,
|
||||||
|
y: int,
|
||||||
|
x: int,
|
||||||
|
width: int,
|
||||||
|
height: int,
|
||||||
|
data: List[float],
|
||||||
|
color_pair: int,
|
||||||
|
):
|
||||||
|
"""Draw an enhanced bar graph with axis and scale."""
|
||||||
|
screen_height, screen_width = stdscr.getmaxyx()
|
||||||
|
|
||||||
|
if not data or width < 2 or y + height >= screen_height:
|
||||||
|
return
|
||||||
|
|
||||||
|
# Calculate statistics
|
||||||
|
max_val = max(data) if max(data) > 0 else 1
|
||||||
|
min_val = min(data)
|
||||||
|
avg_val = sum(data) / len(data)
|
||||||
|
|
||||||
|
# Take last 'width - 12' data points (leave room for Y-axis)
|
||||||
|
graph_width = max(1, width - 12)
|
||||||
|
recent_data = data[-graph_width:] if len(data) > graph_width else data
|
||||||
|
|
||||||
|
# Draw Y-axis labels (with bounds checking)
|
||||||
|
if y < screen_height:
|
||||||
|
_safe_addstr(
|
||||||
|
stdscr, y, x, f"│{_format_bytes(max_val):>9}", curses.color_pair(7)
|
||||||
|
)
|
||||||
|
if y + height // 2 < screen_height:
|
||||||
|
_safe_addstr(
|
||||||
|
stdscr,
|
||||||
|
y + height // 2,
|
||||||
|
x,
|
||||||
|
f"│{_format_bytes(avg_val):>9}",
|
||||||
|
curses.color_pair(7),
|
||||||
|
)
|
||||||
|
if y + height - 1 < screen_height:
|
||||||
|
_safe_addstr(
|
||||||
|
stdscr,
|
||||||
|
y + height - 1,
|
||||||
|
x,
|
||||||
|
f"│{_format_bytes(min_val):>9}",
|
||||||
|
curses.color_pair(7),
|
||||||
|
)
|
||||||
|
|
||||||
|
# Draw bars
|
||||||
|
for row in range(height):
|
||||||
|
if y + row >= screen_height:
|
||||||
|
break
|
||||||
|
|
||||||
|
threshold = (height - row) / height
|
||||||
|
bar_line = ""
|
||||||
|
|
||||||
|
for val in recent_data:
|
||||||
|
normalized = val / max_val if max_val > 0 else 0
|
||||||
|
if normalized >= threshold:
|
||||||
|
bar_line += "█"
|
||||||
|
elif normalized >= threshold - 0.15:
|
||||||
|
bar_line += "▓"
|
||||||
|
elif normalized >= threshold - 0.35:
|
||||||
|
bar_line += "▒"
|
||||||
|
elif normalized >= threshold - 0.5:
|
||||||
|
bar_line += "░"
|
||||||
|
else:
|
||||||
|
bar_line += " "
|
||||||
|
|
||||||
|
_safe_addstr(stdscr, y + row, x + 11, bar_line, color_pair)
|
||||||
|
|
||||||
|
# Draw X-axis
|
||||||
|
if y + height < screen_height:
|
||||||
|
_safe_addstr(
|
||||||
|
stdscr,
|
||||||
|
y + height,
|
||||||
|
x + 10,
|
||||||
|
"├" + "─" * len(recent_data),
|
||||||
|
curses.color_pair(7),
|
||||||
|
)
|
||||||
|
_safe_addstr(
|
||||||
|
stdscr,
|
||||||
|
y + height,
|
||||||
|
x + 10 + len(recent_data),
|
||||||
|
"→ time",
|
||||||
|
curses.color_pair(7),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def _draw_labeled_graph(
|
||||||
|
stdscr,
|
||||||
|
y: int,
|
||||||
|
x: int,
|
||||||
|
width: int,
|
||||||
|
height: int,
|
||||||
|
label: str,
|
||||||
|
rate: str,
|
||||||
|
detail: str,
|
||||||
|
data: List[float],
|
||||||
|
color_pair: int,
|
||||||
|
description: str,
|
||||||
|
):
|
||||||
|
"""Draw a graph with labels and legend."""
|
||||||
|
screen_height, screen_width = stdscr.getmaxyx()
|
||||||
|
|
||||||
|
if y >= screen_height or y + height + 2 >= screen_height:
|
||||||
|
return
|
||||||
|
|
||||||
|
# Header with metrics
|
||||||
|
_safe_addstr(stdscr, y, x, label, curses.color_pair(1) | curses.A_BOLD)
|
||||||
|
_safe_addstr(stdscr, y, x + len(label) + 2, rate, curses.color_pair(2))
|
||||||
|
_safe_addstr(
|
||||||
|
stdscr, y, x + len(label) + len(rate) + 4, detail, curses.color_pair(7)
|
||||||
|
)
|
||||||
|
|
||||||
|
# Draw the graph
|
||||||
|
if len(data) > 1:
|
||||||
|
_draw_bar_graph_enhanced(stdscr, y + 1, x, width, height, data, color_pair)
|
||||||
|
else:
|
||||||
|
_safe_addstr(stdscr, y + 2, x + 2, "Collecting data...", curses.color_pair(7))
|
||||||
|
|
||||||
|
# Graph legend
|
||||||
|
if y + height + 1 < screen_height:
|
||||||
|
_safe_addstr(
|
||||||
|
stdscr, y + height + 1, x, f"└─ {description}", curses.color_pair(7)
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class ContainerMonitorTUI:
|
||||||
|
"""TUI for container monitoring with cgroup selection and live graphs."""
|
||||||
|
|
||||||
|
def __init__(self, collector: ContainerDataCollector):
|
||||||
|
self.collector = collector
|
||||||
|
self.selected_cgroup: Optional[int] = None
|
||||||
|
self.current_screen = "selection" # "selection" or "monitoring"
|
||||||
|
self.selected_index = 0
|
||||||
|
self.scroll_offset = 0
|
||||||
|
self.web_dashboard = None
|
||||||
|
self.web_thread = None
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
"""Run the TUI application."""
|
||||||
|
curses.wrapper(self._main_loop)
|
||||||
|
|
||||||
|
def _main_loop(self, stdscr):
|
||||||
|
"""Main curses loop."""
|
||||||
|
# Configure curses
|
||||||
|
curses.curs_set(0) # Hide cursor
|
||||||
|
stdscr.nodelay(True) # Non-blocking input
|
||||||
|
stdscr.timeout(100) # Refresh every 100ms
|
||||||
|
|
||||||
|
# Initialize colors
|
||||||
|
curses.start_color()
|
||||||
|
curses.init_pair(1, curses.COLOR_CYAN, curses.COLOR_BLACK)
|
||||||
|
curses.init_pair(2, curses.COLOR_GREEN, curses.COLOR_BLACK)
|
||||||
|
curses.init_pair(3, curses.COLOR_YELLOW, curses.COLOR_BLACK)
|
||||||
|
curses.init_pair(4, curses.COLOR_RED, curses.COLOR_BLACK)
|
||||||
|
curses.init_pair(5, curses.COLOR_MAGENTA, curses.COLOR_BLACK)
|
||||||
|
curses.init_pair(6, curses.COLOR_WHITE, curses.COLOR_BLUE)
|
||||||
|
curses.init_pair(7, curses.COLOR_BLUE, curses.COLOR_BLACK)
|
||||||
|
curses.init_pair(8, curses.COLOR_WHITE, curses.COLOR_CYAN)
|
||||||
|
|
||||||
|
while True:
|
||||||
|
stdscr.clear()
|
||||||
|
|
||||||
|
try:
|
||||||
|
height, width = stdscr.getmaxyx()
|
||||||
|
|
||||||
|
# Check minimum terminal size
|
||||||
|
if height < 25 or width < 80:
|
||||||
|
msg = "Terminal too small! Minimum: 80x25"
|
||||||
|
stdscr.attron(curses.color_pair(4) | curses.A_BOLD)
|
||||||
|
stdscr.addstr(
|
||||||
|
height // 2, max(0, (width - len(msg)) // 2), msg[: width - 1]
|
||||||
|
)
|
||||||
|
stdscr.attroff(curses.color_pair(4) | curses.A_BOLD)
|
||||||
|
stdscr.refresh()
|
||||||
|
key = stdscr.getch()
|
||||||
|
if key == ord("q") or key == ord("Q"):
|
||||||
|
break
|
||||||
|
continue
|
||||||
|
|
||||||
|
if self.current_screen == "selection":
|
||||||
|
self._draw_selection_screen(stdscr)
|
||||||
|
elif self.current_screen == "monitoring":
|
||||||
|
self._draw_monitoring_screen(stdscr)
|
||||||
|
|
||||||
|
stdscr.refresh()
|
||||||
|
|
||||||
|
# Handle input
|
||||||
|
key = stdscr.getch()
|
||||||
|
if key != -1:
|
||||||
|
if not self._handle_input(key, stdscr):
|
||||||
|
break # Exit requested
|
||||||
|
|
||||||
|
except KeyboardInterrupt:
|
||||||
|
break
|
||||||
|
except curses.error:
|
||||||
|
# Curses error - likely terminal too small, just continue
|
||||||
|
pass
|
||||||
|
except Exception as e:
|
||||||
|
# Show error briefly
|
||||||
|
height, width = stdscr.getmaxyx()
|
||||||
|
error_msg = f"Error: {str(e)[: width - 10]}"
|
||||||
|
stdscr.addstr(0, 0, error_msg[: width - 1])
|
||||||
|
stdscr.refresh()
|
||||||
|
time.sleep(1)
|
||||||
|
|
||||||
|
def _draw_selection_screen(self, stdscr):
|
||||||
|
"""Draw the cgroup selection screen."""
|
||||||
|
height, width = stdscr.getmaxyx()
|
||||||
|
|
||||||
|
# Draw fancy header box
|
||||||
|
_draw_fancy_header(stdscr, "🐳 CONTAINER MONITOR", "Select a Cgroup to Monitor")
|
||||||
|
|
||||||
|
# Instructions
|
||||||
|
instructions = (
|
||||||
|
"↑↓: Navigate | ENTER: Select | w: Web Mode | q: Quit | r: Refresh"
|
||||||
|
)
|
||||||
|
_safe_addstr(
|
||||||
|
stdscr,
|
||||||
|
3,
|
||||||
|
max(0, (width - len(instructions)) // 2),
|
||||||
|
instructions,
|
||||||
|
curses.color_pair(3),
|
||||||
|
)
|
||||||
|
|
||||||
|
# Get cgroups
|
||||||
|
cgroups = self.collector.get_all_cgroups()
|
||||||
|
|
||||||
|
if not cgroups:
|
||||||
|
msg = "No cgroups found. Waiting for activity..."
|
||||||
|
_safe_addstr(
|
||||||
|
stdscr,
|
||||||
|
height // 2,
|
||||||
|
max(0, (width - len(msg)) // 2),
|
||||||
|
msg,
|
||||||
|
curses.color_pair(4),
|
||||||
|
)
|
||||||
|
return
|
||||||
|
|
||||||
|
# Sort cgroups by name
|
||||||
|
cgroups.sort(key=lambda c: c.name)
|
||||||
|
|
||||||
|
# Adjust selection bounds
|
||||||
|
if self.selected_index >= len(cgroups):
|
||||||
|
self.selected_index = len(cgroups) - 1
|
||||||
|
if self.selected_index < 0:
|
||||||
|
self.selected_index = 0
|
||||||
|
|
||||||
|
# Calculate visible range
|
||||||
|
list_height = max(1, height - 8)
|
||||||
|
if self.selected_index < self.scroll_offset:
|
||||||
|
self.scroll_offset = self.selected_index
|
||||||
|
elif self.selected_index >= self.scroll_offset + list_height:
|
||||||
|
self.scroll_offset = self.selected_index - list_height + 1
|
||||||
|
|
||||||
|
# Calculate max name length and ID width for alignment
|
||||||
|
max_name_len = min(50, max(len(cg.name) for cg in cgroups))
|
||||||
|
max_id_len = max(len(str(cg.id)) for cg in cgroups)
|
||||||
|
|
||||||
|
# Draw cgroup list with fancy borders
|
||||||
|
start_y = 5
|
||||||
|
_safe_addstr(
|
||||||
|
stdscr, start_y, 2, "╔" + "═" * (width - 6) + "╗", curses.color_pair(1)
|
||||||
|
)
|
||||||
|
|
||||||
|
# Header row
|
||||||
|
header = f" {'CGROUP NAME':<{max_name_len}} │ {'ID':>{max_id_len}} "
|
||||||
|
_safe_addstr(stdscr, start_y + 1, 2, "║", curses.color_pair(1))
|
||||||
|
_safe_addstr(
|
||||||
|
stdscr, start_y + 1, 3, header, curses.color_pair(1) | curses.A_BOLD
|
||||||
|
)
|
||||||
|
_safe_addstr(stdscr, start_y + 1, width - 3, "║", curses.color_pair(1))
|
||||||
|
|
||||||
|
# Separator
|
||||||
|
_safe_addstr(
|
||||||
|
stdscr, start_y + 2, 2, "╟" + "─" * (width - 6) + "╢", curses.color_pair(1)
|
||||||
|
)
|
||||||
|
|
||||||
|
for i in range(list_height):
|
||||||
|
idx = self.scroll_offset + i
|
||||||
|
y = start_y + 3 + i
|
||||||
|
|
||||||
|
if y >= height - 2:
|
||||||
|
break
|
||||||
|
|
||||||
|
_safe_addstr(stdscr, y, 2, "║", curses.color_pair(1))
|
||||||
|
_safe_addstr(stdscr, y, width - 3, "║", curses.color_pair(1))
|
||||||
|
|
||||||
|
if idx >= len(cgroups):
|
||||||
|
continue
|
||||||
|
|
||||||
|
cgroup = cgroups[idx]
|
||||||
|
|
||||||
|
# Truncate name if too long
|
||||||
|
display_name = (
|
||||||
|
cgroup.name
|
||||||
|
if len(cgroup.name) <= max_name_len
|
||||||
|
else cgroup.name[: max_name_len - 3] + "..."
|
||||||
|
)
|
||||||
|
|
||||||
|
if idx == self.selected_index:
|
||||||
|
# Highlight selected with proper alignment
|
||||||
|
line = f" ► {display_name:<{max_name_len}} │ {cgroup.id:>{max_id_len}} "
|
||||||
|
_safe_addstr(stdscr, y, 3, line, curses.color_pair(8) | curses.A_BOLD)
|
||||||
|
else:
|
||||||
|
line = f" {display_name:<{max_name_len}} │ {cgroup.id:>{max_id_len}} "
|
||||||
|
_safe_addstr(stdscr, y, 3, line, curses.color_pair(7))
|
||||||
|
|
||||||
|
# Bottom border
|
||||||
|
bottom_y = min(start_y + 3 + list_height, height - 3)
|
||||||
|
_safe_addstr(
|
||||||
|
stdscr, bottom_y, 2, "╚" + "═" * (width - 6) + "╝", curses.color_pair(1)
|
||||||
|
)
|
||||||
|
|
||||||
|
# Footer
|
||||||
|
footer = f"Total: {len(cgroups)} cgroups"
|
||||||
|
if len(cgroups) > list_height:
|
||||||
|
footer += f" │ Showing {self.scroll_offset + 1}-{min(self.scroll_offset + list_height, len(cgroups))}"
|
||||||
|
_safe_addstr(
|
||||||
|
stdscr,
|
||||||
|
height - 2,
|
||||||
|
max(0, (width - len(footer)) // 2),
|
||||||
|
footer,
|
||||||
|
curses.color_pair(1),
|
||||||
|
)
|
||||||
|
|
||||||
|
def _draw_monitoring_screen(self, stdscr):
|
||||||
|
"""Draw the monitoring screen for selected cgroup."""
|
||||||
|
height, width = stdscr.getmaxyx()
|
||||||
|
|
||||||
|
if self.selected_cgroup is None:
|
||||||
|
return
|
||||||
|
|
||||||
|
# Get current stats
|
||||||
|
stats = self.collector.get_stats_for_cgroup(self.selected_cgroup)
|
||||||
|
history = self.collector.get_history(self.selected_cgroup)
|
||||||
|
|
||||||
|
# Draw fancy header
|
||||||
|
_draw_fancy_header(
|
||||||
|
stdscr, f"📊 {stats.cgroup_name[:40]}", "Live Performance Metrics"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Instructions
|
||||||
|
instructions = "ESC/b: Back to List | w: Web Mode | q: Quit"
|
||||||
|
_safe_addstr(
|
||||||
|
stdscr,
|
||||||
|
3,
|
||||||
|
max(0, (width - len(instructions)) // 2),
|
||||||
|
instructions,
|
||||||
|
curses.color_pair(3),
|
||||||
|
)
|
||||||
|
|
||||||
|
# Calculate metrics for rate display
|
||||||
|
rates = _calculate_rates(history)
|
||||||
|
|
||||||
|
y = 5
|
||||||
|
|
||||||
|
# Syscall count in a fancy box
|
||||||
|
if y + 4 < height:
|
||||||
|
_draw_metric_box(
|
||||||
|
stdscr,
|
||||||
|
y,
|
||||||
|
2,
|
||||||
|
min(width - 4, 80),
|
||||||
|
"⚡ SYSTEM CALLS",
|
||||||
|
f"{stats.syscall_count:,}",
|
||||||
|
f"Rate: {rates['syscalls_per_sec']:.1f}/sec",
|
||||||
|
curses.color_pair(5),
|
||||||
|
)
|
||||||
|
y += 4
|
||||||
|
|
||||||
|
# Network I/O Section
|
||||||
|
if y + 8 < height:
|
||||||
|
_draw_section_header(stdscr, y, "🌐 NETWORK I/O", 1)
|
||||||
|
y += 1
|
||||||
|
|
||||||
|
# RX graph
|
||||||
|
rx_label = f"RX: {_format_bytes(stats.rx_bytes)}"
|
||||||
|
rx_rate = f"{_format_bytes(rates['rx_bytes_per_sec'])}/s"
|
||||||
|
rx_pkts = f"{stats.rx_packets:,} pkts ({rates['rx_pkts_per_sec']:.1f}/s)"
|
||||||
|
|
||||||
|
_draw_labeled_graph(
|
||||||
|
stdscr,
|
||||||
|
y,
|
||||||
|
2,
|
||||||
|
width - 4,
|
||||||
|
4,
|
||||||
|
rx_label,
|
||||||
|
rx_rate,
|
||||||
|
rx_pkts,
|
||||||
|
[s.rx_bytes for s in history],
|
||||||
|
curses.color_pair(2),
|
||||||
|
"Received Traffic (last 100 samples)",
|
||||||
|
)
|
||||||
|
y += 6
|
||||||
|
|
||||||
|
# TX graph
|
||||||
|
if y + 8 < height:
|
||||||
|
tx_label = f"TX: {_format_bytes(stats.tx_bytes)}"
|
||||||
|
tx_rate = f"{_format_bytes(rates['tx_bytes_per_sec'])}/s"
|
||||||
|
tx_pkts = f"{stats.tx_packets:,} pkts ({rates['tx_pkts_per_sec']:.1f}/s)"
|
||||||
|
|
||||||
|
_draw_labeled_graph(
|
||||||
|
stdscr,
|
||||||
|
y,
|
||||||
|
2,
|
||||||
|
width - 4,
|
||||||
|
4,
|
||||||
|
tx_label,
|
||||||
|
tx_rate,
|
||||||
|
tx_pkts,
|
||||||
|
[s.tx_bytes for s in history],
|
||||||
|
curses.color_pair(3),
|
||||||
|
"Transmitted Traffic (last 100 samples)",
|
||||||
|
)
|
||||||
|
y += 6
|
||||||
|
|
||||||
|
# File I/O Section
|
||||||
|
if y + 8 < height:
|
||||||
|
_draw_section_header(stdscr, y, "💾 FILE I/O", 1)
|
||||||
|
y += 1
|
||||||
|
|
||||||
|
# Read graph
|
||||||
|
read_label = f"READ: {_format_bytes(stats.read_bytes)}"
|
||||||
|
read_rate = f"{_format_bytes(rates['read_bytes_per_sec'])}/s"
|
||||||
|
read_ops = f"{stats.read_ops:,} ops ({rates['read_ops_per_sec']:.1f}/s)"
|
||||||
|
|
||||||
|
_draw_labeled_graph(
|
||||||
|
stdscr,
|
||||||
|
y,
|
||||||
|
2,
|
||||||
|
width - 4,
|
||||||
|
4,
|
||||||
|
read_label,
|
||||||
|
read_rate,
|
||||||
|
read_ops,
|
||||||
|
[s.read_bytes for s in history],
|
||||||
|
curses.color_pair(4),
|
||||||
|
"Read Operations (last 100 samples)",
|
||||||
|
)
|
||||||
|
y += 6
|
||||||
|
|
||||||
|
# Write graph
|
||||||
|
if y + 8 < height:
|
||||||
|
write_label = f"WRITE: {_format_bytes(stats.write_bytes)}"
|
||||||
|
write_rate = f"{_format_bytes(rates['write_bytes_per_sec'])}/s"
|
||||||
|
write_ops = f"{stats.write_ops:,} ops ({rates['write_ops_per_sec']:.1f}/s)"
|
||||||
|
|
||||||
|
_draw_labeled_graph(
|
||||||
|
stdscr,
|
||||||
|
y,
|
||||||
|
2,
|
||||||
|
width - 4,
|
||||||
|
4,
|
||||||
|
write_label,
|
||||||
|
write_rate,
|
||||||
|
write_ops,
|
||||||
|
[s.write_bytes for s in history],
|
||||||
|
curses.color_pair(5),
|
||||||
|
"Write Operations (last 100 samples)",
|
||||||
|
)
|
||||||
|
|
||||||
|
def _launch_web_mode(self, stdscr):
|
||||||
|
"""Launch web dashboard mode."""
|
||||||
|
height, width = stdscr.getmaxyx()
|
||||||
|
|
||||||
|
# Show transition message
|
||||||
|
stdscr.clear()
|
||||||
|
|
||||||
|
msg1 = "🌐 LAUNCHING WEB DASHBOARD"
|
||||||
|
_safe_addstr(
|
||||||
|
stdscr,
|
||||||
|
height // 2 - 2,
|
||||||
|
max(0, (width - len(msg1)) // 2),
|
||||||
|
msg1,
|
||||||
|
curses.color_pair(6) | curses.A_BOLD,
|
||||||
|
)
|
||||||
|
|
||||||
|
msg2 = "Server starting at http://localhost:8050"
|
||||||
|
_safe_addstr(
|
||||||
|
stdscr,
|
||||||
|
height // 2,
|
||||||
|
max(0, (width - len(msg2)) // 2),
|
||||||
|
msg2,
|
||||||
|
curses.color_pair(2),
|
||||||
|
)
|
||||||
|
|
||||||
|
msg3 = "Press 'q' to stop web server and return to TUI"
|
||||||
|
_safe_addstr(
|
||||||
|
stdscr,
|
||||||
|
height // 2 + 2,
|
||||||
|
max(0, (width - len(msg3)) // 2),
|
||||||
|
msg3,
|
||||||
|
curses.color_pair(3),
|
||||||
|
)
|
||||||
|
|
||||||
|
stdscr.refresh()
|
||||||
|
time.sleep(1)
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Create and start web dashboard
|
||||||
|
self.web_dashboard = WebDashboard(
|
||||||
|
self.collector, selected_cgroup=self.selected_cgroup
|
||||||
|
)
|
||||||
|
|
||||||
|
# Start in background thread
|
||||||
|
self.web_thread = threading.Thread(
|
||||||
|
target=self.web_dashboard.run, daemon=True
|
||||||
|
)
|
||||||
|
self.web_thread.start()
|
||||||
|
|
||||||
|
time.sleep(2) # Give server time to start
|
||||||
|
|
||||||
|
# Wait for user to press 'q' to return
|
||||||
|
msg4 = "Web dashboard running at http://localhost:8050"
|
||||||
|
msg5 = "Press 'q' to return to TUI"
|
||||||
|
_safe_addstr(
|
||||||
|
stdscr,
|
||||||
|
height // 2 + 4,
|
||||||
|
max(0, (width - len(msg4)) // 2),
|
||||||
|
msg4,
|
||||||
|
curses.color_pair(1) | curses.A_BOLD,
|
||||||
|
)
|
||||||
|
_safe_addstr(
|
||||||
|
stdscr,
|
||||||
|
height // 2 + 5,
|
||||||
|
max(0, (width - len(msg5)) // 2),
|
||||||
|
msg5,
|
||||||
|
curses.color_pair(3) | curses.A_BOLD,
|
||||||
|
)
|
||||||
|
stdscr.refresh()
|
||||||
|
|
||||||
|
stdscr.nodelay(False) # Blocking mode
|
||||||
|
while True:
|
||||||
|
key = stdscr.getch()
|
||||||
|
if key == ord("q") or key == ord("Q"):
|
||||||
|
break
|
||||||
|
|
||||||
|
# Stop web server
|
||||||
|
if self.web_dashboard:
|
||||||
|
self.web_dashboard.stop()
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
error_msg = f"Error starting web dashboard: {str(e)}"
|
||||||
|
_safe_addstr(
|
||||||
|
stdscr,
|
||||||
|
height // 2 + 4,
|
||||||
|
max(0, (width - len(error_msg)) // 2),
|
||||||
|
error_msg,
|
||||||
|
curses.color_pair(4),
|
||||||
|
)
|
||||||
|
stdscr.refresh()
|
||||||
|
time.sleep(3)
|
||||||
|
|
||||||
|
# Restore TUI settings
|
||||||
|
stdscr.nodelay(True)
|
||||||
|
stdscr.timeout(100)
|
||||||
|
|
||||||
|
def _handle_input(self, key: int, stdscr) -> bool:
|
||||||
|
"""Handle keyboard input. Returns False to exit."""
|
||||||
|
if key == ord("q") or key == ord("Q"):
|
||||||
|
return False # Exit
|
||||||
|
|
||||||
|
if key == ord("w") or key == ord("W"):
|
||||||
|
# Launch web mode
|
||||||
|
self._launch_web_mode(stdscr)
|
||||||
|
return True
|
||||||
|
|
||||||
|
if self.current_screen == "selection":
|
||||||
|
if key == curses.KEY_UP:
|
||||||
|
self.selected_index = max(0, self.selected_index - 1)
|
||||||
|
elif key == curses.KEY_DOWN:
|
||||||
|
cgroups = self.collector.get_all_cgroups()
|
||||||
|
self.selected_index = min(len(cgroups) - 1, self.selected_index + 1)
|
||||||
|
elif key == ord("\n") or key == curses.KEY_ENTER or key == 10:
|
||||||
|
# Select cgroup
|
||||||
|
cgroups = self.collector.get_all_cgroups()
|
||||||
|
if cgroups and 0 <= self.selected_index < len(cgroups):
|
||||||
|
cgroups.sort(key=lambda c: c.name)
|
||||||
|
self.selected_cgroup = cgroups[self.selected_index].id
|
||||||
|
self.current_screen = "monitoring"
|
||||||
|
elif key == ord("r") or key == ord("R"):
|
||||||
|
# Force refresh cache
|
||||||
|
self.collector._cgroup_cache_time = 0
|
||||||
|
|
||||||
|
elif self.current_screen == "monitoring":
|
||||||
|
if key == 27 or key == ord("b") or key == ord("B"): # ESC or 'b'
|
||||||
|
self.current_screen = "selection"
|
||||||
|
self.selected_cgroup = None
|
||||||
|
|
||||||
|
return True # Continue running
|
||||||
826
BCC-Examples/container-monitor/web_dashboard.py
Normal file
826
BCC-Examples/container-monitor/web_dashboard.py
Normal file
@ -0,0 +1,826 @@
|
|||||||
|
"""Beautiful web dashboard for container monitoring using Plotly Dash."""
|
||||||
|
|
||||||
|
import dash
|
||||||
|
from dash import dcc, html
|
||||||
|
from dash.dependencies import Input, Output
|
||||||
|
import plotly.graph_objects as go
|
||||||
|
from plotly.subplots import make_subplots
|
||||||
|
from typing import Optional
|
||||||
|
from data_collection import ContainerDataCollector
|
||||||
|
|
||||||
|
|
||||||
|
class WebDashboard:
|
||||||
|
"""Beautiful web dashboard for container monitoring."""
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
collector: ContainerDataCollector,
|
||||||
|
selected_cgroup: Optional[int] = None,
|
||||||
|
host: str = "0.0.0.0",
|
||||||
|
port: int = 8050,
|
||||||
|
):
|
||||||
|
self.collector = collector
|
||||||
|
self.selected_cgroup = selected_cgroup
|
||||||
|
self.host = host
|
||||||
|
self.port = port
|
||||||
|
|
||||||
|
# Suppress Dash dev tools and debug output
|
||||||
|
self.app = dash.Dash(
|
||||||
|
__name__,
|
||||||
|
title="pythonBPF Container Monitor",
|
||||||
|
suppress_callback_exceptions=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
self._setup_layout()
|
||||||
|
self._setup_callbacks()
|
||||||
|
self._running = False
|
||||||
|
|
||||||
|
def _setup_layout(self):
|
||||||
|
"""Create the dashboard layout."""
|
||||||
|
self.app.layout = html.Div(
|
||||||
|
[
|
||||||
|
# Futuristic Header with pythonBPF branding
|
||||||
|
html.Div(
|
||||||
|
[
|
||||||
|
html.Div(
|
||||||
|
[
|
||||||
|
html.Div(
|
||||||
|
[
|
||||||
|
html.Span(
|
||||||
|
"python",
|
||||||
|
style={
|
||||||
|
"fontSize": "52px",
|
||||||
|
"fontWeight": "300",
|
||||||
|
"color": "#00ff88",
|
||||||
|
"fontFamily": "'Courier New', monospace",
|
||||||
|
"textShadow": "0 0 20px rgba(0,255,136,0.5)",
|
||||||
|
},
|
||||||
|
),
|
||||||
|
html.Span(
|
||||||
|
"BPF",
|
||||||
|
style={
|
||||||
|
"fontSize": "52px",
|
||||||
|
"fontWeight": "900",
|
||||||
|
"color": "#00d4ff",
|
||||||
|
"fontFamily": "'Courier New', monospace",
|
||||||
|
"textShadow": "0 0 20px rgba(0,212,255,0.5)",
|
||||||
|
},
|
||||||
|
),
|
||||||
|
],
|
||||||
|
style={"marginBottom": "5px"},
|
||||||
|
),
|
||||||
|
html.Div(
|
||||||
|
"CONTAINER PERFORMANCE MONITOR",
|
||||||
|
style={
|
||||||
|
"fontSize": "16px",
|
||||||
|
"letterSpacing": "8px",
|
||||||
|
"color": "#8899ff",
|
||||||
|
"fontWeight": "300",
|
||||||
|
"fontFamily": "'Courier New', monospace",
|
||||||
|
},
|
||||||
|
),
|
||||||
|
],
|
||||||
|
style={
|
||||||
|
"textAlign": "center",
|
||||||
|
},
|
||||||
|
),
|
||||||
|
html.Div(
|
||||||
|
id="cgroup-name",
|
||||||
|
style={
|
||||||
|
"textAlign": "center",
|
||||||
|
"color": "#00ff88",
|
||||||
|
"fontSize": "20px",
|
||||||
|
"marginTop": "15px",
|
||||||
|
"fontFamily": "'Courier New', monospace",
|
||||||
|
"fontWeight": "bold",
|
||||||
|
"textShadow": "0 0 10px rgba(0,255,136,0.3)",
|
||||||
|
},
|
||||||
|
),
|
||||||
|
],
|
||||||
|
style={
|
||||||
|
"background": "linear-gradient(135deg, #0a0e27 0%, #1a1f3a 50%, #0a0e27 100%)",
|
||||||
|
"padding": "40px 20px",
|
||||||
|
"borderRadius": "0",
|
||||||
|
"marginBottom": "0",
|
||||||
|
"boxShadow": "0 10px 40px rgba(0,212,255,0.2)",
|
||||||
|
"border": "1px solid rgba(0,212,255,0.3)",
|
||||||
|
"borderTop": "3px solid #00d4ff",
|
||||||
|
"borderBottom": "3px solid #00ff88",
|
||||||
|
"position": "relative",
|
||||||
|
"overflow": "hidden",
|
||||||
|
},
|
||||||
|
),
|
||||||
|
# Cgroup selector (if no cgroup selected)
|
||||||
|
html.Div(
|
||||||
|
[
|
||||||
|
html.Label(
|
||||||
|
"SELECT CGROUP:",
|
||||||
|
style={
|
||||||
|
"fontSize": "14px",
|
||||||
|
"fontWeight": "bold",
|
||||||
|
"color": "#00d4ff",
|
||||||
|
"marginRight": "15px",
|
||||||
|
"fontFamily": "'Courier New', monospace",
|
||||||
|
"letterSpacing": "2px",
|
||||||
|
},
|
||||||
|
),
|
||||||
|
dcc.Dropdown(
|
||||||
|
id="cgroup-selector",
|
||||||
|
style={
|
||||||
|
"width": "600px",
|
||||||
|
"display": "inline-block",
|
||||||
|
"background": "#1a1f3a",
|
||||||
|
"border": "1px solid #00d4ff",
|
||||||
|
},
|
||||||
|
),
|
||||||
|
],
|
||||||
|
id="selector-container",
|
||||||
|
style={
|
||||||
|
"textAlign": "center",
|
||||||
|
"marginTop": "30px",
|
||||||
|
"marginBottom": "30px",
|
||||||
|
"padding": "20px",
|
||||||
|
"background": "rgba(26,31,58,0.5)",
|
||||||
|
"borderRadius": "10px",
|
||||||
|
"border": "1px solid rgba(0,212,255,0.2)",
|
||||||
|
"display": "block" if self.selected_cgroup is None else "none",
|
||||||
|
},
|
||||||
|
),
|
||||||
|
# Stats cards row
|
||||||
|
html.Div(
|
||||||
|
[
|
||||||
|
self._create_stat_card(
|
||||||
|
"syscall-card", "⚡ SYSCALLS", "#00ff88"
|
||||||
|
),
|
||||||
|
self._create_stat_card("network-card", "🌐 NETWORK", "#00d4ff"),
|
||||||
|
self._create_stat_card("file-card", "💾 FILE I/O", "#ff0088"),
|
||||||
|
],
|
||||||
|
style={
|
||||||
|
"display": "flex",
|
||||||
|
"justifyContent": "space-around",
|
||||||
|
"marginBottom": "30px",
|
||||||
|
"marginTop": "30px",
|
||||||
|
"gap": "25px",
|
||||||
|
"flexWrap": "wrap",
|
||||||
|
"padding": "0 20px",
|
||||||
|
},
|
||||||
|
),
|
||||||
|
# Graphs container
|
||||||
|
html.Div(
|
||||||
|
[
|
||||||
|
# Network graphs
|
||||||
|
html.Div(
|
||||||
|
[
|
||||||
|
html.Div(
|
||||||
|
[
|
||||||
|
html.Span("🌐 ", style={"fontSize": "24px"}),
|
||||||
|
html.Span(
|
||||||
|
"NETWORK",
|
||||||
|
style={
|
||||||
|
"fontFamily": "'Courier New', monospace",
|
||||||
|
"letterSpacing": "3px",
|
||||||
|
"fontWeight": "bold",
|
||||||
|
},
|
||||||
|
),
|
||||||
|
html.Span(
|
||||||
|
" I/O",
|
||||||
|
style={
|
||||||
|
"fontFamily": "'Courier New', monospace",
|
||||||
|
"letterSpacing": "3px",
|
||||||
|
"color": "#00d4ff",
|
||||||
|
},
|
||||||
|
),
|
||||||
|
],
|
||||||
|
style={
|
||||||
|
"color": "#ffffff",
|
||||||
|
"fontSize": "20px",
|
||||||
|
"borderBottom": "2px solid #00d4ff",
|
||||||
|
"paddingBottom": "15px",
|
||||||
|
"marginBottom": "25px",
|
||||||
|
"textShadow": "0 0 10px rgba(0,212,255,0.3)",
|
||||||
|
},
|
||||||
|
),
|
||||||
|
dcc.Graph(
|
||||||
|
id="network-graph", style={"height": "400px"}
|
||||||
|
),
|
||||||
|
],
|
||||||
|
style={
|
||||||
|
"background": "linear-gradient(135deg, #0a0e27 0%, #1a1f3a 100%)",
|
||||||
|
"padding": "30px",
|
||||||
|
"borderRadius": "15px",
|
||||||
|
"boxShadow": "0 8px 32px rgba(0,212,255,0.15)",
|
||||||
|
"marginBottom": "30px",
|
||||||
|
"border": "1px solid rgba(0,212,255,0.2)",
|
||||||
|
},
|
||||||
|
),
|
||||||
|
# File I/O graphs
|
||||||
|
html.Div(
|
||||||
|
[
|
||||||
|
html.Div(
|
||||||
|
[
|
||||||
|
html.Span("💾 ", style={"fontSize": "24px"}),
|
||||||
|
html.Span(
|
||||||
|
"FILE",
|
||||||
|
style={
|
||||||
|
"fontFamily": "'Courier New', monospace",
|
||||||
|
"letterSpacing": "3px",
|
||||||
|
"fontWeight": "bold",
|
||||||
|
},
|
||||||
|
),
|
||||||
|
html.Span(
|
||||||
|
" I/O",
|
||||||
|
style={
|
||||||
|
"fontFamily": "'Courier New', monospace",
|
||||||
|
"letterSpacing": "3px",
|
||||||
|
"color": "#ff0088",
|
||||||
|
},
|
||||||
|
),
|
||||||
|
],
|
||||||
|
style={
|
||||||
|
"color": "#ffffff",
|
||||||
|
"fontSize": "20px",
|
||||||
|
"borderBottom": "2px solid #ff0088",
|
||||||
|
"paddingBottom": "15px",
|
||||||
|
"marginBottom": "25px",
|
||||||
|
"textShadow": "0 0 10px rgba(255,0,136,0.3)",
|
||||||
|
},
|
||||||
|
),
|
||||||
|
dcc.Graph(
|
||||||
|
id="file-io-graph", style={"height": "400px"}
|
||||||
|
),
|
||||||
|
],
|
||||||
|
style={
|
||||||
|
"background": "linear-gradient(135deg, #0a0e27 0%, #1a1f3a 100%)",
|
||||||
|
"padding": "30px",
|
||||||
|
"borderRadius": "15px",
|
||||||
|
"boxShadow": "0 8px 32px rgba(255,0,136,0.15)",
|
||||||
|
"marginBottom": "30px",
|
||||||
|
"border": "1px solid rgba(255,0,136,0.2)",
|
||||||
|
},
|
||||||
|
),
|
||||||
|
# Combined time series
|
||||||
|
html.Div(
|
||||||
|
[
|
||||||
|
html.Div(
|
||||||
|
[
|
||||||
|
html.Span("📈 ", style={"fontSize": "24px"}),
|
||||||
|
html.Span(
|
||||||
|
"REAL-TIME",
|
||||||
|
style={
|
||||||
|
"fontFamily": "'Courier New', monospace",
|
||||||
|
"letterSpacing": "3px",
|
||||||
|
"fontWeight": "bold",
|
||||||
|
},
|
||||||
|
),
|
||||||
|
html.Span(
|
||||||
|
" METRICS",
|
||||||
|
style={
|
||||||
|
"fontFamily": "'Courier New', monospace",
|
||||||
|
"letterSpacing": "3px",
|
||||||
|
"color": "#00ff88",
|
||||||
|
},
|
||||||
|
),
|
||||||
|
],
|
||||||
|
style={
|
||||||
|
"color": "#ffffff",
|
||||||
|
"fontSize": "20px",
|
||||||
|
"borderBottom": "2px solid #00ff88",
|
||||||
|
"paddingBottom": "15px",
|
||||||
|
"marginBottom": "25px",
|
||||||
|
"textShadow": "0 0 10px rgba(0,255,136,0.3)",
|
||||||
|
},
|
||||||
|
),
|
||||||
|
dcc.Graph(
|
||||||
|
id="timeseries-graph", style={"height": "500px"}
|
||||||
|
),
|
||||||
|
],
|
||||||
|
style={
|
||||||
|
"background": "linear-gradient(135deg, #0a0e27 0%, #1a1f3a 100%)",
|
||||||
|
"padding": "30px",
|
||||||
|
"borderRadius": "15px",
|
||||||
|
"boxShadow": "0 8px 32px rgba(0,255,136,0.15)",
|
||||||
|
"border": "1px solid rgba(0,255,136,0.2)",
|
||||||
|
},
|
||||||
|
),
|
||||||
|
],
|
||||||
|
style={"padding": "0 20px"},
|
||||||
|
),
|
||||||
|
# Footer with pythonBPF branding
|
||||||
|
html.Div(
|
||||||
|
[
|
||||||
|
html.Div(
|
||||||
|
[
|
||||||
|
html.Span(
|
||||||
|
"Powered by ",
|
||||||
|
style={"color": "#8899ff", "fontSize": "12px"},
|
||||||
|
),
|
||||||
|
html.Span(
|
||||||
|
"pythonBPF",
|
||||||
|
style={
|
||||||
|
"color": "#00d4ff",
|
||||||
|
"fontSize": "14px",
|
||||||
|
"fontWeight": "bold",
|
||||||
|
"fontFamily": "'Courier New', monospace",
|
||||||
|
},
|
||||||
|
),
|
||||||
|
html.Span(
|
||||||
|
" | eBPF Container Monitoring",
|
||||||
|
style={
|
||||||
|
"color": "#8899ff",
|
||||||
|
"fontSize": "12px",
|
||||||
|
"marginLeft": "10px",
|
||||||
|
},
|
||||||
|
),
|
||||||
|
]
|
||||||
|
)
|
||||||
|
],
|
||||||
|
style={
|
||||||
|
"textAlign": "center",
|
||||||
|
"padding": "20px",
|
||||||
|
"marginTop": "40px",
|
||||||
|
"background": "linear-gradient(135deg, #0a0e27 0%, #1a1f3a 100%)",
|
||||||
|
"borderTop": "1px solid rgba(0,212,255,0.2)",
|
||||||
|
},
|
||||||
|
),
|
||||||
|
# Auto-update interval
|
||||||
|
dcc.Interval(id="interval-component", interval=1000, n_intervals=0),
|
||||||
|
],
|
||||||
|
style={
|
||||||
|
"padding": "0",
|
||||||
|
"fontFamily": "'Segoe UI', 'Courier New', monospace",
|
||||||
|
"background": "linear-gradient(to bottom, #050813 0%, #0a0e27 100%)",
|
||||||
|
"minHeight": "100vh",
|
||||||
|
"margin": "0",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
def _create_stat_card(self, card_id: str, title: str, color: str):
|
||||||
|
"""Create a statistics card with futuristic styling."""
|
||||||
|
return html.Div(
|
||||||
|
[
|
||||||
|
html.H3(
|
||||||
|
title,
|
||||||
|
style={
|
||||||
|
"color": color,
|
||||||
|
"fontSize": "16px",
|
||||||
|
"marginBottom": "20px",
|
||||||
|
"fontWeight": "bold",
|
||||||
|
"fontFamily": "'Courier New', monospace",
|
||||||
|
"letterSpacing": "2px",
|
||||||
|
"textShadow": f"0 0 10px {color}50",
|
||||||
|
},
|
||||||
|
),
|
||||||
|
html.Div(
|
||||||
|
[
|
||||||
|
html.Div(
|
||||||
|
id=f"{card_id}-value",
|
||||||
|
style={
|
||||||
|
"fontSize": "42px",
|
||||||
|
"fontWeight": "bold",
|
||||||
|
"color": "#ffffff",
|
||||||
|
"marginBottom": "10px",
|
||||||
|
"fontFamily": "'Courier New', monospace",
|
||||||
|
"textShadow": f"0 0 20px {color}40",
|
||||||
|
},
|
||||||
|
),
|
||||||
|
html.Div(
|
||||||
|
id=f"{card_id}-rate",
|
||||||
|
style={
|
||||||
|
"fontSize": "14px",
|
||||||
|
"color": "#8899ff",
|
||||||
|
"fontFamily": "'Courier New', monospace",
|
||||||
|
},
|
||||||
|
),
|
||||||
|
]
|
||||||
|
),
|
||||||
|
],
|
||||||
|
style={
|
||||||
|
"flex": "1",
|
||||||
|
"minWidth": "280px",
|
||||||
|
"background": "linear-gradient(135deg, #0a0e27 0%, #1a1f3a 100%)",
|
||||||
|
"padding": "30px",
|
||||||
|
"borderRadius": "15px",
|
||||||
|
"boxShadow": f"0 8px 32px {color}20",
|
||||||
|
"border": f"1px solid {color}40",
|
||||||
|
"borderLeft": f"4px solid {color}",
|
||||||
|
"transition": "transform 0.3s, box-shadow 0.3s",
|
||||||
|
"position": "relative",
|
||||||
|
"overflow": "hidden",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
def _setup_callbacks(self):
|
||||||
|
"""Setup dashboard callbacks."""
|
||||||
|
|
||||||
|
@self.app.callback(
|
||||||
|
[Output("cgroup-selector", "options"), Output("cgroup-selector", "value")],
|
||||||
|
[Input("interval-component", "n_intervals")],
|
||||||
|
)
|
||||||
|
def update_cgroup_selector(n):
|
||||||
|
if self.selected_cgroup is not None:
|
||||||
|
return [], self.selected_cgroup
|
||||||
|
|
||||||
|
cgroups = self.collector.get_all_cgroups()
|
||||||
|
options = [
|
||||||
|
{"label": f"{cg.name} (ID: {cg.id})", "value": cg.id}
|
||||||
|
for cg in sorted(cgroups, key=lambda c: c.name)
|
||||||
|
]
|
||||||
|
value = options[0]["value"] if options else None
|
||||||
|
|
||||||
|
if value and self.selected_cgroup is None:
|
||||||
|
self.selected_cgroup = value
|
||||||
|
|
||||||
|
return options, self.selected_cgroup
|
||||||
|
|
||||||
|
@self.app.callback(
|
||||||
|
Output("cgroup-selector", "value", allow_duplicate=True),
|
||||||
|
[Input("cgroup-selector", "value")],
|
||||||
|
prevent_initial_call=True,
|
||||||
|
)
|
||||||
|
def select_cgroup(value):
|
||||||
|
if value:
|
||||||
|
self.selected_cgroup = value
|
||||||
|
return value
|
||||||
|
|
||||||
|
@self.app.callback(
|
||||||
|
[
|
||||||
|
Output("cgroup-name", "children"),
|
||||||
|
Output("syscall-card-value", "children"),
|
||||||
|
Output("syscall-card-rate", "children"),
|
||||||
|
Output("network-card-value", "children"),
|
||||||
|
Output("network-card-rate", "children"),
|
||||||
|
Output("file-card-value", "children"),
|
||||||
|
Output("file-card-rate", "children"),
|
||||||
|
Output("network-graph", "figure"),
|
||||||
|
Output("file-io-graph", "figure"),
|
||||||
|
Output("timeseries-graph", "figure"),
|
||||||
|
],
|
||||||
|
[Input("interval-component", "n_intervals")],
|
||||||
|
)
|
||||||
|
def update_dashboard(n):
|
||||||
|
if self.selected_cgroup is None:
|
||||||
|
empty_fig = self._create_empty_figure(
|
||||||
|
"Select a cgroup to begin monitoring"
|
||||||
|
)
|
||||||
|
return (
|
||||||
|
"SELECT A CGROUP TO START",
|
||||||
|
"0",
|
||||||
|
"",
|
||||||
|
"0 B",
|
||||||
|
"",
|
||||||
|
"0 B",
|
||||||
|
"",
|
||||||
|
empty_fig,
|
||||||
|
empty_fig,
|
||||||
|
empty_fig,
|
||||||
|
)
|
||||||
|
|
||||||
|
try:
|
||||||
|
stats = self.collector.get_stats_for_cgroup(self.selected_cgroup)
|
||||||
|
history = self.collector.get_history(self.selected_cgroup)
|
||||||
|
rates = self._calculate_rates(history)
|
||||||
|
|
||||||
|
return (
|
||||||
|
f"► {stats.cgroup_name}",
|
||||||
|
f"{stats.syscall_count:,}",
|
||||||
|
f"{rates['syscalls_per_sec']:.1f} calls/sec",
|
||||||
|
f"{self._format_bytes(stats.rx_bytes + stats.tx_bytes)}",
|
||||||
|
f"↓ {self._format_bytes(rates['rx_bytes_per_sec'])}/s ↑ {self._format_bytes(rates['tx_bytes_per_sec'])}/s",
|
||||||
|
f"{self._format_bytes(stats.read_bytes + stats.write_bytes)}",
|
||||||
|
f"R: {self._format_bytes(rates['read_bytes_per_sec'])}/s W: {self._format_bytes(rates['write_bytes_per_sec'])}/s",
|
||||||
|
self._create_network_graph(history),
|
||||||
|
self._create_file_io_graph(history),
|
||||||
|
self._create_timeseries_graph(history),
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
empty_fig = self._create_empty_figure(f"Error: {str(e)}")
|
||||||
|
return (
|
||||||
|
"ERROR",
|
||||||
|
"0",
|
||||||
|
str(e),
|
||||||
|
"0 B",
|
||||||
|
"",
|
||||||
|
"0 B",
|
||||||
|
"",
|
||||||
|
empty_fig,
|
||||||
|
empty_fig,
|
||||||
|
empty_fig,
|
||||||
|
)
|
||||||
|
|
||||||
|
def _create_empty_figure(self, message: str):
|
||||||
|
"""Create an empty figure with a message."""
|
||||||
|
fig = go.Figure()
|
||||||
|
fig.update_layout(
|
||||||
|
title=message,
|
||||||
|
template="plotly_dark",
|
||||||
|
paper_bgcolor="#0a0e27",
|
||||||
|
plot_bgcolor="#0a0e27",
|
||||||
|
font=dict(color="#8899ff", family="Courier New, monospace"),
|
||||||
|
)
|
||||||
|
return fig
|
||||||
|
|
||||||
|
def _create_network_graph(self, history):
|
||||||
|
"""Create network I/O graph with futuristic styling."""
|
||||||
|
if len(history) < 2:
|
||||||
|
return self._create_empty_figure("Collecting data...")
|
||||||
|
|
||||||
|
times = [i for i in range(len(history))]
|
||||||
|
rx_bytes = [s.rx_bytes for s in history]
|
||||||
|
tx_bytes = [s.tx_bytes for s in history]
|
||||||
|
|
||||||
|
fig = make_subplots(
|
||||||
|
rows=2,
|
||||||
|
cols=1,
|
||||||
|
subplot_titles=("RECEIVED (RX)", "TRANSMITTED (TX)"),
|
||||||
|
vertical_spacing=0.15,
|
||||||
|
)
|
||||||
|
|
||||||
|
fig.add_trace(
|
||||||
|
go.Scatter(
|
||||||
|
x=times,
|
||||||
|
y=rx_bytes,
|
||||||
|
mode="lines",
|
||||||
|
name="RX",
|
||||||
|
fill="tozeroy",
|
||||||
|
line=dict(color="#00d4ff", width=3, shape="spline"),
|
||||||
|
fillcolor="rgba(0, 212, 255, 0.2)",
|
||||||
|
),
|
||||||
|
row=1,
|
||||||
|
col=1,
|
||||||
|
)
|
||||||
|
|
||||||
|
fig.add_trace(
|
||||||
|
go.Scatter(
|
||||||
|
x=times,
|
||||||
|
y=tx_bytes,
|
||||||
|
mode="lines",
|
||||||
|
name="TX",
|
||||||
|
fill="tozeroy",
|
||||||
|
line=dict(color="#00ff88", width=3, shape="spline"),
|
||||||
|
fillcolor="rgba(0, 255, 136, 0.2)",
|
||||||
|
),
|
||||||
|
row=2,
|
||||||
|
col=1,
|
||||||
|
)
|
||||||
|
|
||||||
|
fig.update_xaxes(title_text="Time (samples)", row=2, col=1, color="#8899ff")
|
||||||
|
fig.update_yaxes(title_text="Bytes", row=1, col=1, color="#8899ff")
|
||||||
|
fig.update_yaxes(title_text="Bytes", row=2, col=1, color="#8899ff")
|
||||||
|
|
||||||
|
fig.update_layout(
|
||||||
|
height=400,
|
||||||
|
template="plotly_dark",
|
||||||
|
paper_bgcolor="rgba(0,0,0,0)",
|
||||||
|
plot_bgcolor="#0a0e27",
|
||||||
|
showlegend=False,
|
||||||
|
hovermode="x unified",
|
||||||
|
font=dict(family="Courier New, monospace", color="#8899ff"),
|
||||||
|
)
|
||||||
|
|
||||||
|
return fig
|
||||||
|
|
||||||
|
def _create_file_io_graph(self, history):
|
||||||
|
"""Create file I/O graph with futuristic styling."""
|
||||||
|
if len(history) < 2:
|
||||||
|
return self._create_empty_figure("Collecting data...")
|
||||||
|
|
||||||
|
times = [i for i in range(len(history))]
|
||||||
|
read_bytes = [s.read_bytes for s in history]
|
||||||
|
write_bytes = [s.write_bytes for s in history]
|
||||||
|
|
||||||
|
fig = make_subplots(
|
||||||
|
rows=2,
|
||||||
|
cols=1,
|
||||||
|
subplot_titles=("READ OPERATIONS", "WRITE OPERATIONS"),
|
||||||
|
vertical_spacing=0.15,
|
||||||
|
)
|
||||||
|
|
||||||
|
fig.add_trace(
|
||||||
|
go.Scatter(
|
||||||
|
x=times,
|
||||||
|
y=read_bytes,
|
||||||
|
mode="lines",
|
||||||
|
name="Read",
|
||||||
|
fill="tozeroy",
|
||||||
|
line=dict(color="#ff0088", width=3, shape="spline"),
|
||||||
|
fillcolor="rgba(255, 0, 136, 0.2)",
|
||||||
|
),
|
||||||
|
row=1,
|
||||||
|
col=1,
|
||||||
|
)
|
||||||
|
|
||||||
|
fig.add_trace(
|
||||||
|
go.Scatter(
|
||||||
|
x=times,
|
||||||
|
y=write_bytes,
|
||||||
|
mode="lines",
|
||||||
|
name="Write",
|
||||||
|
fill="tozeroy",
|
||||||
|
line=dict(color="#8844ff", width=3, shape="spline"),
|
||||||
|
fillcolor="rgba(136, 68, 255, 0.2)",
|
||||||
|
),
|
||||||
|
row=2,
|
||||||
|
col=1,
|
||||||
|
)
|
||||||
|
|
||||||
|
fig.update_xaxes(title_text="Time (samples)", row=2, col=1, color="#8899ff")
|
||||||
|
fig.update_yaxes(title_text="Bytes", row=1, col=1, color="#8899ff")
|
||||||
|
fig.update_yaxes(title_text="Bytes", row=2, col=1, color="#8899ff")
|
||||||
|
|
||||||
|
fig.update_layout(
|
||||||
|
height=400,
|
||||||
|
template="plotly_dark",
|
||||||
|
paper_bgcolor="rgba(0,0,0,0)",
|
||||||
|
plot_bgcolor="#0a0e27",
|
||||||
|
showlegend=False,
|
||||||
|
hovermode="x unified",
|
||||||
|
font=dict(family="Courier New, monospace", color="#8899ff"),
|
||||||
|
)
|
||||||
|
|
||||||
|
return fig
|
||||||
|
|
||||||
|
def _create_timeseries_graph(self, history):
|
||||||
|
"""Create combined time series graph with futuristic styling."""
|
||||||
|
if len(history) < 2:
|
||||||
|
return self._create_empty_figure("Collecting data...")
|
||||||
|
|
||||||
|
times = [i for i in range(len(history))]
|
||||||
|
|
||||||
|
fig = make_subplots(
|
||||||
|
rows=3,
|
||||||
|
cols=1,
|
||||||
|
subplot_titles=(
|
||||||
|
"SYSTEM CALLS",
|
||||||
|
"NETWORK TRAFFIC (Bytes)",
|
||||||
|
"FILE I/O (Bytes)",
|
||||||
|
),
|
||||||
|
vertical_spacing=0.1,
|
||||||
|
specs=[
|
||||||
|
[{"secondary_y": False}],
|
||||||
|
[{"secondary_y": True}],
|
||||||
|
[{"secondary_y": True}],
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
|
# Syscalls
|
||||||
|
fig.add_trace(
|
||||||
|
go.Scatter(
|
||||||
|
x=times,
|
||||||
|
y=[s.syscall_count for s in history],
|
||||||
|
mode="lines",
|
||||||
|
name="Syscalls",
|
||||||
|
line=dict(color="#00ff88", width=3, shape="spline"),
|
||||||
|
),
|
||||||
|
row=1,
|
||||||
|
col=1,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Network
|
||||||
|
fig.add_trace(
|
||||||
|
go.Scatter(
|
||||||
|
x=times,
|
||||||
|
y=[s.rx_bytes for s in history],
|
||||||
|
mode="lines",
|
||||||
|
name="RX",
|
||||||
|
line=dict(color="#00d4ff", width=2, shape="spline"),
|
||||||
|
),
|
||||||
|
row=2,
|
||||||
|
col=1,
|
||||||
|
secondary_y=False,
|
||||||
|
)
|
||||||
|
|
||||||
|
fig.add_trace(
|
||||||
|
go.Scatter(
|
||||||
|
x=times,
|
||||||
|
y=[s.tx_bytes for s in history],
|
||||||
|
mode="lines",
|
||||||
|
name="TX",
|
||||||
|
line=dict(color="#00ff88", width=2, shape="spline", dash="dot"),
|
||||||
|
),
|
||||||
|
row=2,
|
||||||
|
col=1,
|
||||||
|
secondary_y=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
# File I/O
|
||||||
|
fig.add_trace(
|
||||||
|
go.Scatter(
|
||||||
|
x=times,
|
||||||
|
y=[s.read_bytes for s in history],
|
||||||
|
mode="lines",
|
||||||
|
name="Read",
|
||||||
|
line=dict(color="#ff0088", width=2, shape="spline"),
|
||||||
|
),
|
||||||
|
row=3,
|
||||||
|
col=1,
|
||||||
|
secondary_y=False,
|
||||||
|
)
|
||||||
|
|
||||||
|
fig.add_trace(
|
||||||
|
go.Scatter(
|
||||||
|
x=times,
|
||||||
|
y=[s.write_bytes for s in history],
|
||||||
|
mode="lines",
|
||||||
|
name="Write",
|
||||||
|
line=dict(color="#8844ff", width=2, shape="spline", dash="dot"),
|
||||||
|
),
|
||||||
|
row=3,
|
||||||
|
col=1,
|
||||||
|
secondary_y=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
fig.update_xaxes(title_text="Time (samples)", row=3, col=1, color="#8899ff")
|
||||||
|
fig.update_yaxes(title_text="Count", row=1, col=1, color="#8899ff")
|
||||||
|
fig.update_yaxes(
|
||||||
|
title_text="RX Bytes", row=2, col=1, secondary_y=False, color="#00d4ff"
|
||||||
|
)
|
||||||
|
fig.update_yaxes(
|
||||||
|
title_text="TX Bytes", row=2, col=1, secondary_y=True, color="#00ff88"
|
||||||
|
)
|
||||||
|
fig.update_yaxes(
|
||||||
|
title_text="Read Bytes", row=3, col=1, secondary_y=False, color="#ff0088"
|
||||||
|
)
|
||||||
|
fig.update_yaxes(
|
||||||
|
title_text="Write Bytes", row=3, col=1, secondary_y=True, color="#8844ff"
|
||||||
|
)
|
||||||
|
|
||||||
|
fig.update_layout(
|
||||||
|
height=500,
|
||||||
|
template="plotly_dark",
|
||||||
|
paper_bgcolor="rgba(0,0,0,0)",
|
||||||
|
plot_bgcolor="#0a0e27",
|
||||||
|
hovermode="x unified",
|
||||||
|
showlegend=True,
|
||||||
|
legend=dict(
|
||||||
|
orientation="h",
|
||||||
|
yanchor="bottom",
|
||||||
|
y=1.02,
|
||||||
|
xanchor="right",
|
||||||
|
x=1,
|
||||||
|
font=dict(color="#8899ff"),
|
||||||
|
),
|
||||||
|
font=dict(family="Courier New, monospace", color="#8899ff"),
|
||||||
|
)
|
||||||
|
|
||||||
|
return fig
|
||||||
|
|
||||||
|
def _calculate_rates(self, history):
|
||||||
|
"""Calculate rates from history."""
|
||||||
|
if len(history) < 2:
|
||||||
|
return {
|
||||||
|
"syscalls_per_sec": 0.0,
|
||||||
|
"rx_bytes_per_sec": 0.0,
|
||||||
|
"tx_bytes_per_sec": 0.0,
|
||||||
|
"read_bytes_per_sec": 0.0,
|
||||||
|
"write_bytes_per_sec": 0.0,
|
||||||
|
}
|
||||||
|
|
||||||
|
recent = history[-1]
|
||||||
|
previous = history[-2]
|
||||||
|
time_delta = recent.timestamp - previous.timestamp
|
||||||
|
|
||||||
|
if time_delta <= 0:
|
||||||
|
time_delta = 1.0
|
||||||
|
|
||||||
|
return {
|
||||||
|
"syscalls_per_sec": max(
|
||||||
|
0, (recent.syscall_count - previous.syscall_count) / time_delta
|
||||||
|
),
|
||||||
|
"rx_bytes_per_sec": max(
|
||||||
|
0, (recent.rx_bytes - previous.rx_bytes) / time_delta
|
||||||
|
),
|
||||||
|
"tx_bytes_per_sec": max(
|
||||||
|
0, (recent.tx_bytes - previous.tx_bytes) / time_delta
|
||||||
|
),
|
||||||
|
"read_bytes_per_sec": max(
|
||||||
|
0, (recent.read_bytes - previous.read_bytes) / time_delta
|
||||||
|
),
|
||||||
|
"write_bytes_per_sec": max(
|
||||||
|
0, (recent.write_bytes - previous.write_bytes) / time_delta
|
||||||
|
),
|
||||||
|
}
|
||||||
|
|
||||||
|
def _format_bytes(self, bytes_val: float) -> str:
|
||||||
|
"""Format bytes into human-readable string."""
|
||||||
|
if bytes_val < 0:
|
||||||
|
bytes_val = 0
|
||||||
|
for unit in ["B", "KB", "MB", "GB", "TB"]:
|
||||||
|
if bytes_val < 1024.0:
|
||||||
|
return f"{bytes_val:.2f} {unit}"
|
||||||
|
bytes_val /= 1024.0
|
||||||
|
return f"{bytes_val:.2f} PB"
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
"""Run the web dashboard."""
|
||||||
|
self._running = True
|
||||||
|
# Suppress Werkzeug logging
|
||||||
|
import logging
|
||||||
|
|
||||||
|
log = logging.getLogger("werkzeug")
|
||||||
|
log.setLevel(logging.ERROR)
|
||||||
|
|
||||||
|
self.app.run(debug=False, host=self.host, port=self.port, use_reloader=False)
|
||||||
|
|
||||||
|
def stop(self):
|
||||||
|
"""Stop the web dashboard."""
|
||||||
|
self._running = False
|
||||||
122
BCC-Examples/disksnoop.ipynb
Normal file
122
BCC-Examples/disksnoop.ipynb
Normal file
@ -0,0 +1,122 @@
|
|||||||
|
{
|
||||||
|
"cells": [
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"id": "c3520e58-e50f-4bc1-8f9d-a6fecbf6e9f0",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from vmlinux import struct_request, struct_pt_regs\n",
|
||||||
|
"from pythonbpf import bpf, section, bpfglobal, map, BPF\n",
|
||||||
|
"from pythonbpf.helper import ktime\n",
|
||||||
|
"from pythonbpf.maps import HashMap\n",
|
||||||
|
"from ctypes import c_int64, c_uint64, c_int32\n",
|
||||||
|
"\n",
|
||||||
|
"REQ_WRITE = 1\n",
|
||||||
|
"\n",
|
||||||
|
"\n",
|
||||||
|
"@bpf\n",
|
||||||
|
"@map\n",
|
||||||
|
"def start() -> HashMap:\n",
|
||||||
|
" return HashMap(key=c_uint64, value=c_uint64, max_entries=10240)\n",
|
||||||
|
"\n",
|
||||||
|
"\n",
|
||||||
|
"@bpf\n",
|
||||||
|
"@section(\"kprobe/blk_mq_end_request\")\n",
|
||||||
|
"def trace_completion(ctx: struct_pt_regs) -> c_int64:\n",
|
||||||
|
" # Get request pointer from first argument\n",
|
||||||
|
" req_ptr = ctx.di\n",
|
||||||
|
" req = struct_request(ctx.di)\n",
|
||||||
|
" # Print: data_len, cmd_flags, latency_us\n",
|
||||||
|
" data_len = req.__data_len\n",
|
||||||
|
" cmd_flags = req.cmd_flags\n",
|
||||||
|
" # Lookup start timestamp\n",
|
||||||
|
" req_tsp = start.lookup(req_ptr)\n",
|
||||||
|
" if req_tsp:\n",
|
||||||
|
" # Calculate delta in nanoseconds\n",
|
||||||
|
" delta = ktime() - req_tsp\n",
|
||||||
|
"\n",
|
||||||
|
" # Convert to microseconds for printing\n",
|
||||||
|
" delta_us = delta // 1000\n",
|
||||||
|
"\n",
|
||||||
|
" print(f\"{data_len} {cmd_flags:x} {delta_us}\\n\")\n",
|
||||||
|
"\n",
|
||||||
|
" # Delete the entry\n",
|
||||||
|
" start.delete(req_ptr)\n",
|
||||||
|
"\n",
|
||||||
|
" return c_int64(0)\n",
|
||||||
|
"\n",
|
||||||
|
"\n",
|
||||||
|
"@bpf\n",
|
||||||
|
"@section(\"kprobe/blk_mq_start_request\")\n",
|
||||||
|
"def trace_start(ctx1: struct_pt_regs) -> c_int32:\n",
|
||||||
|
" req = ctx1.di\n",
|
||||||
|
" ts = ktime()\n",
|
||||||
|
" start.update(req, ts)\n",
|
||||||
|
" return c_int32(0)\n",
|
||||||
|
"\n",
|
||||||
|
"\n",
|
||||||
|
"@bpf\n",
|
||||||
|
"@bpfglobal\n",
|
||||||
|
"def LICENSE() -> str:\n",
|
||||||
|
" return \"GPL\"\n",
|
||||||
|
"\n",
|
||||||
|
"\n",
|
||||||
|
"b = BPF()"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"id": "97040f73-98e0-4993-94c6-125d1b42d931",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"b.load()\n",
|
||||||
|
"b.attach_all()"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"id": "b1bd4f51-fa25-42e1-877c-e48a2605189f",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from pythonbpf import trace_pipe"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"id": "96b4b59b-b0db-4952-9534-7a714f685089",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"trace_pipe()"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"metadata": {
|
||||||
|
"kernelspec": {
|
||||||
|
"display_name": "Python 3 (ipykernel)",
|
||||||
|
"language": "python",
|
||||||
|
"name": "python3"
|
||||||
|
},
|
||||||
|
"language_info": {
|
||||||
|
"codemirror_mode": {
|
||||||
|
"name": "ipython",
|
||||||
|
"version": 3
|
||||||
|
},
|
||||||
|
"file_extension": ".py",
|
||||||
|
"mimetype": "text/x-python",
|
||||||
|
"name": "python",
|
||||||
|
"nbconvert_exporter": "python",
|
||||||
|
"pygments_lexer": "ipython3",
|
||||||
|
"version": "3.12.3"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"nbformat": 4,
|
||||||
|
"nbformat_minor": 5
|
||||||
|
}
|
||||||
58
BCC-Examples/disksnoop.py
Normal file
58
BCC-Examples/disksnoop.py
Normal file
@ -0,0 +1,58 @@
|
|||||||
|
from vmlinux import struct_request, struct_pt_regs
|
||||||
|
from pythonbpf import bpf, section, bpfglobal, compile, map
|
||||||
|
from pythonbpf.helper import ktime
|
||||||
|
from pythonbpf.maps import HashMap
|
||||||
|
from ctypes import c_int64, c_uint64, c_int32
|
||||||
|
|
||||||
|
# Constants
|
||||||
|
REQ_WRITE = 1 # from include/linux/blk_types.h
|
||||||
|
|
||||||
|
|
||||||
|
@bpf
|
||||||
|
@map
|
||||||
|
def start() -> HashMap:
|
||||||
|
return HashMap(key=c_uint64, value=c_uint64, max_entries=10240)
|
||||||
|
|
||||||
|
|
||||||
|
@bpf
|
||||||
|
@section("kprobe/blk_mq_end_request")
|
||||||
|
def trace_completion(ctx: struct_pt_regs) -> c_int64:
|
||||||
|
# Get request pointer from first argument
|
||||||
|
req_ptr = ctx.di
|
||||||
|
req = struct_request(ctx.di)
|
||||||
|
# Print: data_len, cmd_flags, latency_us
|
||||||
|
data_len = req.__data_len
|
||||||
|
cmd_flags = req.cmd_flags
|
||||||
|
# Lookup start timestamp
|
||||||
|
req_tsp = start.lookup(req_ptr)
|
||||||
|
if req_tsp:
|
||||||
|
# Calculate delta in nanoseconds
|
||||||
|
delta = ktime() - req_tsp
|
||||||
|
|
||||||
|
# Convert to microseconds for printing
|
||||||
|
delta_us = delta // 1000
|
||||||
|
|
||||||
|
print(f"{data_len} {cmd_flags:x} {delta_us}\n")
|
||||||
|
|
||||||
|
# Delete the entry
|
||||||
|
start.delete(req_ptr)
|
||||||
|
|
||||||
|
return c_int64(0)
|
||||||
|
|
||||||
|
|
||||||
|
@bpf
|
||||||
|
@section("kprobe/blk_mq_start_request")
|
||||||
|
def trace_start(ctx1: struct_pt_regs) -> c_int32:
|
||||||
|
req = ctx1.di
|
||||||
|
ts = ktime()
|
||||||
|
start.update(req, ts)
|
||||||
|
return c_int32(0)
|
||||||
|
|
||||||
|
|
||||||
|
@bpf
|
||||||
|
@bpfglobal
|
||||||
|
def LICENSE() -> str:
|
||||||
|
return "GPL"
|
||||||
|
|
||||||
|
|
||||||
|
compile()
|
||||||
9
BCC-Examples/requirements.txt
Normal file
9
BCC-Examples/requirements.txt
Normal file
@ -0,0 +1,9 @@
|
|||||||
|
# =============================================================================
|
||||||
|
# Requirements for PythonBPF BCC-Examples
|
||||||
|
# =============================================================================
|
||||||
|
|
||||||
|
dash
|
||||||
|
matplotlib
|
||||||
|
numpy
|
||||||
|
plotly
|
||||||
|
rich
|
||||||
46
README.md
46
README.md
@ -40,16 +40,11 @@ Python-BPF is an LLVM IR generator for eBPF programs written in Python. It uses
|
|||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## Try It Out!
|
|
||||||
Run
|
|
||||||
```bash
|
|
||||||
curl -s https://raw.githubusercontent.com/pythonbpf/Python-BPF/refs/heads/master/tools/setup.sh | sudo bash
|
|
||||||
```
|
|
||||||
|
|
||||||
## Installation
|
## Installation
|
||||||
|
|
||||||
Dependencies:
|
Dependencies:
|
||||||
|
|
||||||
|
* `bpftool`
|
||||||
* `clang`
|
* `clang`
|
||||||
* Python ≥ 3.8
|
* Python ≥ 3.8
|
||||||
|
|
||||||
@ -61,6 +56,38 @@ pip install pythonbpf pylibbpf
|
|||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
|
## Try It Out!
|
||||||
|
|
||||||
|
#### First, generate the vmlinux.py file for your kernel:
|
||||||
|
- Install the required dependencies:
|
||||||
|
- On Ubuntu:
|
||||||
|
```bash
|
||||||
|
sudo apt-get install bpftool clang
|
||||||
|
pip install pythonbpf pylibbpf ctypeslib2
|
||||||
|
```
|
||||||
|
- Generate the `vmlinux.py` using:
|
||||||
|
```bash
|
||||||
|
sudo tools/vmlinux-gen.py
|
||||||
|
```
|
||||||
|
- Copy this file to `BCC-Examples/`
|
||||||
|
|
||||||
|
#### Next, install requirements for BCC-Examples:
|
||||||
|
- These requirements are only required for the python notebooks, vfsreadlat and container-monitor examples.
|
||||||
|
```bash
|
||||||
|
pip install -r BCC-Examples/requirements.txt
|
||||||
|
```
|
||||||
|
- Now, follow the instructions in the [BCC-Examples/README.md](https://github.com/pythonbpf/Python-BPF/blob/master/BCC-Examples/README.md) to run the examples.
|
||||||
|
|
||||||
|
|
||||||
|
#### To spin up jupyter notebook examples:
|
||||||
|
- Run and follow the instructions on screen
|
||||||
|
```bash
|
||||||
|
curl -s https://raw.githubusercontent.com/pythonbpf/Python-BPF/refs/heads/master/tools/setup.sh | sudo bash
|
||||||
|
```
|
||||||
|
- Check the jupyter server on the web browser and run the notebooks in the `BCC-Examples/` folder.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
## Example Usage
|
## Example Usage
|
||||||
|
|
||||||
```python
|
```python
|
||||||
@ -88,16 +115,15 @@ def hist() -> HashMap:
|
|||||||
@section("tracepoint/syscalls/sys_enter_clone")
|
@section("tracepoint/syscalls/sys_enter_clone")
|
||||||
def hello(ctx: c_void_p) -> c_int64:
|
def hello(ctx: c_void_p) -> c_int64:
|
||||||
process_id = pid()
|
process_id = pid()
|
||||||
one = 1
|
|
||||||
prev = hist.lookup(process_id)
|
prev = hist.lookup(process_id)
|
||||||
if prev:
|
if prev:
|
||||||
previous_value = prev + 1
|
previous_value = prev + 1
|
||||||
print(f"count: {previous_value} with {process_id}")
|
print(f"count: {previous_value} with {process_id}")
|
||||||
hist.update(process_id, previous_value)
|
hist.update(process_id, previous_value)
|
||||||
return c_int64(0)
|
return 0
|
||||||
else:
|
else:
|
||||||
hist.update(process_id, one)
|
hist.update(process_id, 1)
|
||||||
return c_int64(0)
|
return 0
|
||||||
|
|
||||||
|
|
||||||
@bpf
|
@bpf
|
||||||
|
|||||||
@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
|
|||||||
|
|
||||||
[project]
|
[project]
|
||||||
name = "pythonbpf"
|
name = "pythonbpf"
|
||||||
version = "0.1.6"
|
version = "0.1.8"
|
||||||
description = "Reduced Python frontend for eBPF"
|
description = "Reduced Python frontend for eBPF"
|
||||||
authors = [
|
authors = [
|
||||||
{ name = "r41k0u", email="pragyanshchaturvedi18@gmail.com" },
|
{ name = "r41k0u", email="pragyanshchaturvedi18@gmail.com" },
|
||||||
@ -29,7 +29,7 @@ license = {text = "Apache-2.0"}
|
|||||||
requires-python = ">=3.10"
|
requires-python = ">=3.10"
|
||||||
|
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"llvmlite",
|
"llvmlite>=0.45",
|
||||||
"astpretty",
|
"astpretty",
|
||||||
"pylibbpf"
|
"pylibbpf"
|
||||||
]
|
]
|
||||||
|
|||||||
@ -114,9 +114,34 @@ def _allocate_for_call(
|
|||||||
# Struct constructors
|
# Struct constructors
|
||||||
elif call_type in structs_sym_tab:
|
elif call_type in structs_sym_tab:
|
||||||
struct_info = structs_sym_tab[call_type]
|
struct_info = structs_sym_tab[call_type]
|
||||||
var = builder.alloca(struct_info.ir_type, name=var_name)
|
if len(rval.args) == 0:
|
||||||
local_sym_tab[var_name] = LocalSymbol(var, struct_info.ir_type, call_type)
|
# Zero-arg constructor: allocate the struct itself
|
||||||
logger.info(f"Pre-allocated {var_name} for struct {call_type}")
|
var = builder.alloca(struct_info.ir_type, name=var_name)
|
||||||
|
local_sym_tab[var_name] = LocalSymbol(
|
||||||
|
var, struct_info.ir_type, call_type
|
||||||
|
)
|
||||||
|
logger.info(f"Pre-allocated {var_name} for struct {call_type}")
|
||||||
|
else:
|
||||||
|
# Pointer cast: allocate as pointer to struct
|
||||||
|
ptr_type = ir.PointerType(struct_info.ir_type)
|
||||||
|
var = builder.alloca(ptr_type, name=var_name)
|
||||||
|
var.align = 8
|
||||||
|
local_sym_tab[var_name] = LocalSymbol(var, ptr_type, call_type)
|
||||||
|
logger.info(
|
||||||
|
f"Pre-allocated {var_name} for struct pointer cast to {call_type}"
|
||||||
|
)
|
||||||
|
|
||||||
|
elif VmlinuxHandlerRegistry.is_vmlinux_struct(call_type):
|
||||||
|
# When calling struct_name(pointer), we're doing a cast, not construction
|
||||||
|
# So we allocate as a pointer (i64) not as the actual struct
|
||||||
|
var = builder.alloca(ir.IntType(64), name=var_name)
|
||||||
|
var.align = 8
|
||||||
|
local_sym_tab[var_name] = LocalSymbol(
|
||||||
|
var, ir.IntType(64), VmlinuxHandlerRegistry.get_struct_type(call_type)
|
||||||
|
)
|
||||||
|
logger.info(
|
||||||
|
f"Pre-allocated {var_name} for vmlinux struct pointer cast to {call_type}"
|
||||||
|
)
|
||||||
|
|
||||||
else:
|
else:
|
||||||
logger.warning(f"Unknown call type for allocation: {call_type}")
|
logger.warning(f"Unknown call type for allocation: {call_type}")
|
||||||
@ -178,7 +203,7 @@ def _allocate_for_map_method(
|
|||||||
# Main variable (pointer to pointer)
|
# Main variable (pointer to pointer)
|
||||||
ir_type = ir.PointerType(ir.IntType(64))
|
ir_type = ir.PointerType(ir.IntType(64))
|
||||||
var = builder.alloca(ir_type, name=var_name)
|
var = builder.alloca(ir_type, name=var_name)
|
||||||
local_sym_tab[var_name] = LocalSymbol(var, ir_type)
|
local_sym_tab[var_name] = LocalSymbol(var, ir_type, value_type)
|
||||||
# Temporary variable for computed values
|
# Temporary variable for computed values
|
||||||
tmp_ir_type = value_ir_type
|
tmp_ir_type = value_ir_type
|
||||||
var_tmp = builder.alloca(tmp_ir_type, name=f"{var_name}_tmp")
|
var_tmp = builder.alloca(tmp_ir_type, name=f"{var_name}_tmp")
|
||||||
@ -325,13 +350,6 @@ def _allocate_for_attribute(builder, var_name, rval, local_sym_tab, structs_sym_
|
|||||||
VmlinuxHandlerRegistry.get_field_type(vmlinux_struct_name, field_name)
|
VmlinuxHandlerRegistry.get_field_type(vmlinux_struct_name, field_name)
|
||||||
)
|
)
|
||||||
field_ir, field = field_type
|
field_ir, field = field_type
|
||||||
# TODO: For now, we only support integer type allocations.
|
|
||||||
# This always assumes first argument of function to be the context struct
|
|
||||||
base_ptr = builder.function.args[0]
|
|
||||||
local_sym_tab[
|
|
||||||
struct_var
|
|
||||||
].var = base_ptr # This is repurposing of var to store the pointer of the base type
|
|
||||||
local_sym_tab[struct_var].ir_type = field_ir
|
|
||||||
|
|
||||||
# Determine the actual IR type based on the field's type
|
# Determine the actual IR type based on the field's type
|
||||||
actual_ir_type = None
|
actual_ir_type = None
|
||||||
@ -366,6 +384,7 @@ def _allocate_for_attribute(builder, var_name, rval, local_sym_tab, structs_sym_
|
|||||||
f"Could not determine size for ctypes field {field_name}: {e}"
|
f"Could not determine size for ctypes field {field_name}: {e}"
|
||||||
)
|
)
|
||||||
actual_ir_type = ir.IntType(64)
|
actual_ir_type = ir.IntType(64)
|
||||||
|
field_size_bits = 64
|
||||||
|
|
||||||
# Check if it's a nested vmlinux struct or complex type
|
# Check if it's a nested vmlinux struct or complex type
|
||||||
elif field.type.__module__ == "vmlinux":
|
elif field.type.__module__ == "vmlinux":
|
||||||
@ -374,24 +393,37 @@ def _allocate_for_attribute(builder, var_name, rval, local_sym_tab, structs_sym_
|
|||||||
field.ctype_complex_type, ctypes._Pointer
|
field.ctype_complex_type, ctypes._Pointer
|
||||||
):
|
):
|
||||||
actual_ir_type = ir.IntType(64) # Pointer is always 64-bit
|
actual_ir_type = ir.IntType(64) # Pointer is always 64-bit
|
||||||
|
field_size_bits = 64
|
||||||
# For embedded structs, this is more complex - might need different handling
|
# For embedded structs, this is more complex - might need different handling
|
||||||
else:
|
else:
|
||||||
logger.warning(
|
logger.warning(
|
||||||
f"Field {field_name} is a nested vmlinux struct, using i64 for now"
|
f"Field {field_name} is a nested vmlinux struct, using i64 for now"
|
||||||
)
|
)
|
||||||
actual_ir_type = ir.IntType(64)
|
actual_ir_type = ir.IntType(64)
|
||||||
|
field_size_bits = 64
|
||||||
else:
|
else:
|
||||||
logger.warning(
|
logger.warning(
|
||||||
f"Unknown field type module {field.type.__module__} for {field_name}"
|
f"Unknown field type module {field.type.__module__} for {field_name}"
|
||||||
)
|
)
|
||||||
actual_ir_type = ir.IntType(64)
|
actual_ir_type = ir.IntType(64)
|
||||||
|
field_size_bits = 64
|
||||||
|
|
||||||
# Allocate with the actual IR type, not the GlobalVariable
|
# Pre-allocate the tmp storage used by load_struct_field (so we don't alloca inside handler)
|
||||||
|
tmp_name = f"{struct_var}_{field_name}_tmp"
|
||||||
|
tmp_ir_type = ir.IntType(field_size_bits)
|
||||||
|
tmp_var = builder.alloca(tmp_ir_type, name=tmp_name)
|
||||||
|
tmp_var.align = tmp_ir_type.width // 8
|
||||||
|
local_sym_tab[tmp_name] = LocalSymbol(tmp_var, tmp_ir_type)
|
||||||
|
logger.info(
|
||||||
|
f"Pre-allocated temp {tmp_name} (i{field_size_bits}) for vmlinux field read {vmlinux_struct_name}.{field_name}"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Allocate with the actual IR type for the destination var
|
||||||
var = _allocate_with_type(builder, var_name, actual_ir_type)
|
var = _allocate_with_type(builder, var_name, actual_ir_type)
|
||||||
local_sym_tab[var_name] = LocalSymbol(var, actual_ir_type, field)
|
local_sym_tab[var_name] = LocalSymbol(var, actual_ir_type, field)
|
||||||
|
|
||||||
logger.info(
|
logger.info(
|
||||||
f"Pre-allocated {var_name} from vmlinux struct {vmlinux_struct_name}.{field_name}"
|
f"Pre-allocated {var_name} as {actual_ir_type} from vmlinux struct {vmlinux_struct_name}.{field_name}"
|
||||||
)
|
)
|
||||||
return
|
return
|
||||||
else:
|
else:
|
||||||
|
|||||||
@ -1,5 +1,7 @@
|
|||||||
import ast
|
import ast
|
||||||
import logging
|
import logging
|
||||||
|
from inspect import isclass
|
||||||
|
|
||||||
from llvmlite import ir
|
from llvmlite import ir
|
||||||
from pythonbpf.expr import eval_expr
|
from pythonbpf.expr import eval_expr
|
||||||
from pythonbpf.helper import emit_probe_read_kernel_str_call
|
from pythonbpf.helper import emit_probe_read_kernel_str_call
|
||||||
@ -148,8 +150,47 @@ def handle_variable_assignment(
|
|||||||
return False
|
return False
|
||||||
|
|
||||||
val, val_type = val_result
|
val, val_type = val_result
|
||||||
logger.info(f"Evaluated value for {var_name}: {val} of type {val_type}, {var_type}")
|
logger.info(
|
||||||
|
f"Evaluated value for {var_name}: {val} of type {val_type}, expected {var_type}"
|
||||||
|
)
|
||||||
|
|
||||||
if val_type != var_type:
|
if val_type != var_type:
|
||||||
|
# Handle vmlinux struct pointers - they're represented as Python classes but are i64 pointers
|
||||||
|
if isclass(val_type) and (val_type.__module__ == "vmlinux"):
|
||||||
|
logger.info("Handling vmlinux struct pointer assignment")
|
||||||
|
# vmlinux struct pointers: val is a pointer, need to convert to i64
|
||||||
|
if isinstance(var_type, ir.IntType) and var_type.width == 64:
|
||||||
|
# Convert pointer to i64 using ptrtoint
|
||||||
|
if isinstance(val.type, ir.PointerType):
|
||||||
|
val = builder.ptrtoint(val, ir.IntType(64))
|
||||||
|
logger.info(
|
||||||
|
"Converted vmlinux struct pointer to i64 using ptrtoint"
|
||||||
|
)
|
||||||
|
builder.store(val, var_ptr)
|
||||||
|
logger.info(f"Assigned vmlinux struct pointer to {var_name} (i64)")
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
logger.error(
|
||||||
|
f"Type mismatch: vmlinux struct pointer requires i64, got {var_type}"
|
||||||
|
)
|
||||||
|
return False
|
||||||
|
# Handle user-defined struct pointer casts
|
||||||
|
# val_type is a string (struct name), var_type is a pointer to the struct
|
||||||
|
if isinstance(val_type, str) and val_type in structs_sym_tab:
|
||||||
|
struct_info = structs_sym_tab[val_type]
|
||||||
|
expected_ptr_type = ir.PointerType(struct_info.ir_type)
|
||||||
|
|
||||||
|
# Check if var_type matches the expected pointer type
|
||||||
|
if isinstance(var_type, ir.PointerType) and var_type == expected_ptr_type:
|
||||||
|
# val is already the correct pointer type from inttoptr/bitcast
|
||||||
|
builder.store(val, var_ptr)
|
||||||
|
logger.info(f"Assigned user-defined struct pointer cast to {var_name}")
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
logger.error(
|
||||||
|
f"Type mismatch: user-defined struct pointer cast requires pointer type, got {var_type}"
|
||||||
|
)
|
||||||
|
return False
|
||||||
if isinstance(val_type, Field):
|
if isinstance(val_type, Field):
|
||||||
logger.info("Handling assignment to struct field")
|
logger.info("Handling assignment to struct field")
|
||||||
# Special handling for struct_xdp_md i32 fields that are zero-extended to i64
|
# Special handling for struct_xdp_md i32 fields that are zero-extended to i64
|
||||||
|
|||||||
@ -25,7 +25,7 @@ import re
|
|||||||
|
|
||||||
logger: Logger = logging.getLogger(__name__)
|
logger: Logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
VERSION = "v0.1.6"
|
VERSION = "v0.1.8"
|
||||||
|
|
||||||
|
|
||||||
def finalize_module(original_str):
|
def finalize_module(original_str):
|
||||||
|
|||||||
@ -1,6 +1,6 @@
|
|||||||
from .expr_pass import eval_expr, handle_expr, get_operand_value
|
from .expr_pass import eval_expr, handle_expr, get_operand_value
|
||||||
from .type_normalization import convert_to_bool, get_base_type_and_depth
|
from .type_normalization import convert_to_bool, get_base_type_and_depth
|
||||||
from .ir_ops import deref_to_depth
|
from .ir_ops import deref_to_depth, access_struct_field
|
||||||
from .call_registry import CallHandlerRegistry
|
from .call_registry import CallHandlerRegistry
|
||||||
from .vmlinux_registry import VmlinuxHandlerRegistry
|
from .vmlinux_registry import VmlinuxHandlerRegistry
|
||||||
|
|
||||||
@ -10,6 +10,7 @@ __all__ = [
|
|||||||
"convert_to_bool",
|
"convert_to_bool",
|
||||||
"get_base_type_and_depth",
|
"get_base_type_and_depth",
|
||||||
"deref_to_depth",
|
"deref_to_depth",
|
||||||
|
"access_struct_field",
|
||||||
"get_operand_value",
|
"get_operand_value",
|
||||||
"CallHandlerRegistry",
|
"CallHandlerRegistry",
|
||||||
"VmlinuxHandlerRegistry",
|
"VmlinuxHandlerRegistry",
|
||||||
|
|||||||
@ -6,14 +6,14 @@ from typing import Dict
|
|||||||
|
|
||||||
from pythonbpf.type_deducer import ctypes_to_ir, is_ctypes
|
from pythonbpf.type_deducer import ctypes_to_ir, is_ctypes
|
||||||
from .call_registry import CallHandlerRegistry
|
from .call_registry import CallHandlerRegistry
|
||||||
|
from .ir_ops import deref_to_depth, access_struct_field
|
||||||
from .type_normalization import (
|
from .type_normalization import (
|
||||||
convert_to_bool,
|
convert_to_bool,
|
||||||
handle_comparator,
|
handle_comparator,
|
||||||
get_base_type_and_depth,
|
get_base_type_and_depth,
|
||||||
deref_to_depth,
|
|
||||||
)
|
)
|
||||||
from pythonbpf.vmlinux_parser.assignment_info import Field
|
|
||||||
from .vmlinux_registry import VmlinuxHandlerRegistry
|
from .vmlinux_registry import VmlinuxHandlerRegistry
|
||||||
|
from ..vmlinux_parser.dependency_node import Field
|
||||||
|
|
||||||
logger: Logger = logging.getLogger(__name__)
|
logger: Logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
@ -61,6 +61,7 @@ def _handle_constant_expr(module, builder, expr: ast.Constant):
|
|||||||
|
|
||||||
|
|
||||||
def _handle_attribute_expr(
|
def _handle_attribute_expr(
|
||||||
|
func,
|
||||||
expr: ast.Attribute,
|
expr: ast.Attribute,
|
||||||
local_sym_tab: Dict,
|
local_sym_tab: Dict,
|
||||||
structs_sym_tab: Dict,
|
structs_sym_tab: Dict,
|
||||||
@ -89,12 +90,30 @@ def _handle_attribute_expr(
|
|||||||
return vmlinux_result
|
return vmlinux_result
|
||||||
else:
|
else:
|
||||||
raise RuntimeError("Vmlinux struct did not process successfully")
|
raise RuntimeError("Vmlinux struct did not process successfully")
|
||||||
metadata = structs_sym_tab[var_metadata]
|
|
||||||
if attr_name in metadata.fields:
|
elif isinstance(var_metadata, Field):
|
||||||
gep = metadata.gep(builder, var_ptr, attr_name)
|
logger.error(
|
||||||
val = builder.load(gep)
|
f"Cannot access field '{attr_name}' on already-loaded field value '{var_name}'"
|
||||||
field_type = metadata.field_type(attr_name)
|
)
|
||||||
return val, field_type
|
return None
|
||||||
|
|
||||||
|
if var_metadata in structs_sym_tab:
|
||||||
|
return access_struct_field(
|
||||||
|
builder,
|
||||||
|
var_ptr,
|
||||||
|
var_type,
|
||||||
|
var_metadata,
|
||||||
|
expr.attr,
|
||||||
|
structs_sym_tab,
|
||||||
|
func,
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
logger.error(f"Struct metadata for '{var_name}' not found")
|
||||||
|
else:
|
||||||
|
logger.error(f"Undefined variable '{var_name}' for attribute access")
|
||||||
|
else:
|
||||||
|
logger.error("Unsupported attribute base expression type")
|
||||||
|
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
|
||||||
@ -149,7 +168,11 @@ def get_operand_value(
|
|||||||
var_type = var.type
|
var_type = var.type
|
||||||
base_type, depth = get_base_type_and_depth(var_type)
|
base_type, depth = get_base_type_and_depth(var_type)
|
||||||
logger.info(f"var is {var}, base_type is {base_type}, depth is {depth}")
|
logger.info(f"var is {var}, base_type is {base_type}, depth is {depth}")
|
||||||
val = deref_to_depth(func, builder, var, depth)
|
if depth == 1:
|
||||||
|
val = builder.load(var)
|
||||||
|
return val
|
||||||
|
else:
|
||||||
|
val = deref_to_depth(func, builder, var, depth)
|
||||||
return val
|
return val
|
||||||
else:
|
else:
|
||||||
# Check if it's a vmlinux enum/constant
|
# Check if it's a vmlinux enum/constant
|
||||||
@ -525,6 +548,134 @@ def _handle_boolean_op(
|
|||||||
return None
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
# ============================================================================
|
||||||
|
# Struct casting (including vmlinux struct casting)
|
||||||
|
# ============================================================================
|
||||||
|
|
||||||
|
|
||||||
|
def _handle_vmlinux_cast(
|
||||||
|
func,
|
||||||
|
module,
|
||||||
|
builder,
|
||||||
|
expr,
|
||||||
|
local_sym_tab,
|
||||||
|
map_sym_tab,
|
||||||
|
structs_sym_tab=None,
|
||||||
|
):
|
||||||
|
# handle expressions such as struct_request(ctx.di) where struct_request is a vmlinux
|
||||||
|
# struct and ctx.di is a pointer to a struct but is actually represented as a c_uint64
|
||||||
|
# which needs to be cast to a pointer. This is also a field of another vmlinux struct
|
||||||
|
"""Handle vmlinux struct cast expressions like struct_request(ctx.di)."""
|
||||||
|
if len(expr.args) != 1:
|
||||||
|
logger.info("vmlinux struct cast takes exactly one argument")
|
||||||
|
return None
|
||||||
|
|
||||||
|
# Get the struct name
|
||||||
|
struct_name = expr.func.id
|
||||||
|
|
||||||
|
# Evaluate the argument (e.g., ctx.di which is a c_uint64)
|
||||||
|
arg_result = eval_expr(
|
||||||
|
func,
|
||||||
|
module,
|
||||||
|
builder,
|
||||||
|
expr.args[0],
|
||||||
|
local_sym_tab,
|
||||||
|
map_sym_tab,
|
||||||
|
structs_sym_tab,
|
||||||
|
)
|
||||||
|
|
||||||
|
if arg_result is None:
|
||||||
|
logger.info("Failed to evaluate argument to vmlinux struct cast")
|
||||||
|
return None
|
||||||
|
|
||||||
|
arg_val, arg_type = arg_result
|
||||||
|
# Get the vmlinux struct type
|
||||||
|
vmlinux_struct_type = VmlinuxHandlerRegistry.get_struct_type(struct_name)
|
||||||
|
if vmlinux_struct_type is None:
|
||||||
|
logger.error(f"Failed to get vmlinux struct type for {struct_name}")
|
||||||
|
return None
|
||||||
|
# Cast the integer/value to a pointer to the struct
|
||||||
|
# If arg_val is an integer type, we need to inttoptr it
|
||||||
|
ptr_type = ir.PointerType()
|
||||||
|
# TODO: add a field value type check here
|
||||||
|
# print(arg_type)
|
||||||
|
if isinstance(arg_type, Field):
|
||||||
|
if ctypes_to_ir(arg_type.type.__name__):
|
||||||
|
# Cast integer to pointer
|
||||||
|
casted_ptr = builder.inttoptr(arg_val, ptr_type)
|
||||||
|
else:
|
||||||
|
logger.error(f"Unsupported type for vmlinux cast: {arg_type}")
|
||||||
|
return None
|
||||||
|
else:
|
||||||
|
casted_ptr = builder.inttoptr(arg_val, ptr_type)
|
||||||
|
|
||||||
|
return casted_ptr, vmlinux_struct_type
|
||||||
|
|
||||||
|
|
||||||
|
def _handle_user_defined_struct_cast(
|
||||||
|
func,
|
||||||
|
module,
|
||||||
|
builder,
|
||||||
|
expr,
|
||||||
|
local_sym_tab,
|
||||||
|
map_sym_tab,
|
||||||
|
structs_sym_tab,
|
||||||
|
):
|
||||||
|
"""Handle user-defined struct cast expressions like iphdr(nh).
|
||||||
|
|
||||||
|
This casts a pointer/integer value to a pointer to the user-defined struct,
|
||||||
|
similar to how vmlinux struct casts work but for user-defined @struct types.
|
||||||
|
"""
|
||||||
|
if len(expr.args) != 1:
|
||||||
|
logger.info("User-defined struct cast takes exactly one argument")
|
||||||
|
return None
|
||||||
|
|
||||||
|
# Get the struct name
|
||||||
|
struct_name = expr.func.id
|
||||||
|
|
||||||
|
if struct_name not in structs_sym_tab:
|
||||||
|
logger.error(f"Struct {struct_name} not found in structs_sym_tab")
|
||||||
|
return None
|
||||||
|
|
||||||
|
struct_info = structs_sym_tab[struct_name]
|
||||||
|
|
||||||
|
# Evaluate the argument (e.g.,
|
||||||
|
# an address/pointer value)
|
||||||
|
arg_result = eval_expr(
|
||||||
|
func,
|
||||||
|
module,
|
||||||
|
builder,
|
||||||
|
expr.args[0],
|
||||||
|
local_sym_tab,
|
||||||
|
map_sym_tab,
|
||||||
|
structs_sym_tab,
|
||||||
|
)
|
||||||
|
|
||||||
|
if arg_result is None:
|
||||||
|
logger.info("Failed to evaluate argument to user-defined struct cast")
|
||||||
|
return None
|
||||||
|
|
||||||
|
arg_val, arg_type = arg_result
|
||||||
|
|
||||||
|
# Cast the integer/pointer value to a pointer to the struct type
|
||||||
|
# The struct pointer type is a pointer to the struct's IR type
|
||||||
|
struct_ptr_type = ir.PointerType(struct_info.ir_type)
|
||||||
|
|
||||||
|
# If arg_val is an integer type (like i64), convert to pointer using inttoptr
|
||||||
|
if isinstance(arg_val.type, ir.IntType):
|
||||||
|
casted_ptr = builder.inttoptr(arg_val, struct_ptr_type)
|
||||||
|
logger.info(f"Cast integer to pointer for struct {struct_name}")
|
||||||
|
elif isinstance(arg_val.type, ir.PointerType):
|
||||||
|
# If already a pointer, bitcast to the struct pointer type
|
||||||
|
casted_ptr = builder.bitcast(arg_val, struct_ptr_type)
|
||||||
|
logger.info(f"Bitcast pointer to struct pointer for {struct_name}")
|
||||||
|
else:
|
||||||
|
logger.error(f"Unsupported type for user-defined struct cast: {arg_val.type}")
|
||||||
|
return None
|
||||||
|
|
||||||
|
return casted_ptr, struct_name
|
||||||
|
|
||||||
|
|
||||||
# ============================================================================
|
# ============================================================================
|
||||||
# Expression Dispatcher
|
# Expression Dispatcher
|
||||||
# ============================================================================
|
# ============================================================================
|
||||||
@ -545,6 +696,18 @@ def eval_expr(
|
|||||||
elif isinstance(expr, ast.Constant):
|
elif isinstance(expr, ast.Constant):
|
||||||
return _handle_constant_expr(module, builder, expr)
|
return _handle_constant_expr(module, builder, expr)
|
||||||
elif isinstance(expr, ast.Call):
|
elif isinstance(expr, ast.Call):
|
||||||
|
if isinstance(expr.func, ast.Name) and VmlinuxHandlerRegistry.is_vmlinux_struct(
|
||||||
|
expr.func.id
|
||||||
|
):
|
||||||
|
return _handle_vmlinux_cast(
|
||||||
|
func,
|
||||||
|
module,
|
||||||
|
builder,
|
||||||
|
expr,
|
||||||
|
local_sym_tab,
|
||||||
|
map_sym_tab,
|
||||||
|
structs_sym_tab,
|
||||||
|
)
|
||||||
if isinstance(expr.func, ast.Name) and expr.func.id == "deref":
|
if isinstance(expr.func, ast.Name) and expr.func.id == "deref":
|
||||||
return _handle_deref_call(expr, local_sym_tab, builder)
|
return _handle_deref_call(expr, local_sym_tab, builder)
|
||||||
|
|
||||||
@ -558,6 +721,16 @@ def eval_expr(
|
|||||||
map_sym_tab,
|
map_sym_tab,
|
||||||
structs_sym_tab,
|
structs_sym_tab,
|
||||||
)
|
)
|
||||||
|
if isinstance(expr.func, ast.Name) and (expr.func.id in structs_sym_tab):
|
||||||
|
return _handle_user_defined_struct_cast(
|
||||||
|
func,
|
||||||
|
module,
|
||||||
|
builder,
|
||||||
|
expr,
|
||||||
|
local_sym_tab,
|
||||||
|
map_sym_tab,
|
||||||
|
structs_sym_tab,
|
||||||
|
)
|
||||||
|
|
||||||
result = CallHandlerRegistry.handle_call(
|
result = CallHandlerRegistry.handle_call(
|
||||||
expr, module, builder, func, local_sym_tab, map_sym_tab, structs_sym_tab
|
expr, module, builder, func, local_sym_tab, map_sym_tab, structs_sym_tab
|
||||||
@ -568,7 +741,9 @@ def eval_expr(
|
|||||||
logger.warning(f"Unknown call: {ast.dump(expr)}")
|
logger.warning(f"Unknown call: {ast.dump(expr)}")
|
||||||
return None
|
return None
|
||||||
elif isinstance(expr, ast.Attribute):
|
elif isinstance(expr, ast.Attribute):
|
||||||
return _handle_attribute_expr(expr, local_sym_tab, structs_sym_tab, builder)
|
return _handle_attribute_expr(
|
||||||
|
func, expr, local_sym_tab, structs_sym_tab, builder
|
||||||
|
)
|
||||||
elif isinstance(expr, ast.BinOp):
|
elif isinstance(expr, ast.BinOp):
|
||||||
return _handle_binary_op(
|
return _handle_binary_op(
|
||||||
func,
|
func,
|
||||||
|
|||||||
@ -17,34 +17,100 @@ def deref_to_depth(func, builder, val, target_depth):
|
|||||||
|
|
||||||
# dereference with null check
|
# dereference with null check
|
||||||
pointee_type = cur_type.pointee
|
pointee_type = cur_type.pointee
|
||||||
null_check_block = builder.block
|
|
||||||
not_null_block = func.append_basic_block(name=f"deref_not_null_{depth}")
|
|
||||||
merge_block = func.append_basic_block(name=f"deref_merge_{depth}")
|
|
||||||
|
|
||||||
null_ptr = ir.Constant(cur_type, None)
|
def load_op(builder, ptr):
|
||||||
is_not_null = builder.icmp_signed("!=", cur_val, null_ptr)
|
return builder.load(ptr)
|
||||||
logger.debug(f"Inserted null check for pointer at depth {depth}")
|
|
||||||
|
|
||||||
builder.cbranch(is_not_null, not_null_block, merge_block)
|
cur_val = _null_checked_operation(
|
||||||
|
func, builder, cur_val, load_op, pointee_type, f"deref_{depth}"
|
||||||
builder.position_at_end(not_null_block)
|
|
||||||
dereferenced_val = builder.load(cur_val)
|
|
||||||
logger.debug(f"Dereferenced to depth {depth - 1}, type: {pointee_type}")
|
|
||||||
builder.branch(merge_block)
|
|
||||||
|
|
||||||
builder.position_at_end(merge_block)
|
|
||||||
phi = builder.phi(pointee_type, name=f"deref_result_{depth}")
|
|
||||||
|
|
||||||
zero_value = (
|
|
||||||
ir.Constant(pointee_type, 0)
|
|
||||||
if isinstance(pointee_type, ir.IntType)
|
|
||||||
else ir.Constant(pointee_type, None)
|
|
||||||
)
|
)
|
||||||
phi.add_incoming(zero_value, null_check_block)
|
|
||||||
|
|
||||||
phi.add_incoming(dereferenced_val, not_null_block)
|
|
||||||
|
|
||||||
# Continue with phi result
|
|
||||||
cur_val = phi
|
|
||||||
cur_type = pointee_type
|
cur_type = pointee_type
|
||||||
|
logger.debug(f"Dereferenced to depth {depth}, type: {pointee_type}")
|
||||||
return cur_val
|
return cur_val
|
||||||
|
|
||||||
|
|
||||||
|
def _null_checked_operation(func, builder, ptr, operation, result_type, name_prefix):
|
||||||
|
"""
|
||||||
|
Generic null-checked operation on a pointer.
|
||||||
|
"""
|
||||||
|
curr_block = builder.block
|
||||||
|
not_null_block = func.append_basic_block(name=f"{name_prefix}_not_null")
|
||||||
|
merge_block = func.append_basic_block(name=f"{name_prefix}_merge")
|
||||||
|
|
||||||
|
null_ptr = ir.Constant(ptr.type, None)
|
||||||
|
is_not_null = builder.icmp_signed("!=", ptr, null_ptr)
|
||||||
|
builder.cbranch(is_not_null, not_null_block, merge_block)
|
||||||
|
|
||||||
|
builder.position_at_end(not_null_block)
|
||||||
|
result = operation(builder, ptr)
|
||||||
|
not_null_after = builder.block
|
||||||
|
builder.branch(merge_block)
|
||||||
|
|
||||||
|
builder.position_at_end(merge_block)
|
||||||
|
phi = builder.phi(result_type, name=f"{name_prefix}_result")
|
||||||
|
|
||||||
|
if isinstance(result_type, ir.IntType):
|
||||||
|
null_val = ir.Constant(result_type, 0)
|
||||||
|
elif isinstance(result_type, ir.PointerType):
|
||||||
|
null_val = ir.Constant(result_type, None)
|
||||||
|
else:
|
||||||
|
null_val = ir.Constant(result_type, ir.Undefined)
|
||||||
|
|
||||||
|
phi.add_incoming(null_val, curr_block)
|
||||||
|
phi.add_incoming(result, not_null_after)
|
||||||
|
|
||||||
|
return phi
|
||||||
|
|
||||||
|
|
||||||
|
def access_struct_field(
|
||||||
|
builder, var_ptr, var_type, var_metadata, field_name, structs_sym_tab, func=None
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Access a struct field - automatically returns value or pointer based on field type.
|
||||||
|
"""
|
||||||
|
metadata = (
|
||||||
|
structs_sym_tab.get(var_metadata)
|
||||||
|
if isinstance(var_metadata, str)
|
||||||
|
else var_metadata
|
||||||
|
)
|
||||||
|
if not metadata or field_name not in metadata.fields:
|
||||||
|
raise ValueError(f"Field '{field_name}' not found in struct")
|
||||||
|
|
||||||
|
field_type = metadata.field_type(field_name)
|
||||||
|
is_ptr_to_struct = isinstance(var_type, ir.PointerType) and isinstance(
|
||||||
|
var_metadata, str
|
||||||
|
)
|
||||||
|
|
||||||
|
# Get struct pointer
|
||||||
|
struct_ptr = builder.load(var_ptr) if is_ptr_to_struct else var_ptr
|
||||||
|
|
||||||
|
should_load = not isinstance(field_type, ir.ArrayType)
|
||||||
|
|
||||||
|
def field_access_op(builder, ptr):
|
||||||
|
typed_ptr = builder.bitcast(ptr, metadata.ir_type.as_pointer())
|
||||||
|
field_ptr = metadata.gep(builder, typed_ptr, field_name)
|
||||||
|
return builder.load(field_ptr) if should_load else field_ptr
|
||||||
|
|
||||||
|
# Handle null check for pointer-to-struct
|
||||||
|
if is_ptr_to_struct:
|
||||||
|
if func is None:
|
||||||
|
raise ValueError("func required for null-safe struct pointer access")
|
||||||
|
|
||||||
|
if should_load:
|
||||||
|
result_type = field_type
|
||||||
|
else:
|
||||||
|
result_type = field_type.as_pointer()
|
||||||
|
|
||||||
|
result = _null_checked_operation(
|
||||||
|
func,
|
||||||
|
builder,
|
||||||
|
struct_ptr,
|
||||||
|
field_access_op,
|
||||||
|
result_type,
|
||||||
|
f"field_{field_name}",
|
||||||
|
)
|
||||||
|
return result, field_type
|
||||||
|
|
||||||
|
field_ptr = metadata.gep(builder, struct_ptr, field_name)
|
||||||
|
result = builder.load(field_ptr) if should_load else field_ptr
|
||||||
|
return result, field_type
|
||||||
|
|||||||
@ -1,6 +1,10 @@
|
|||||||
from .helper_registry import HelperHandlerRegistry
|
from .helper_registry import HelperHandlerRegistry
|
||||||
from .helper_utils import reset_scratch_pool
|
from .helper_utils import reset_scratch_pool
|
||||||
from .bpf_helper_handler import handle_helper_call, emit_probe_read_kernel_str_call
|
from .bpf_helper_handler import (
|
||||||
|
handle_helper_call,
|
||||||
|
emit_probe_read_kernel_str_call,
|
||||||
|
emit_probe_read_kernel_call,
|
||||||
|
)
|
||||||
from .helpers import (
|
from .helpers import (
|
||||||
ktime,
|
ktime,
|
||||||
pid,
|
pid,
|
||||||
@ -12,6 +16,7 @@ from .helpers import (
|
|||||||
smp_processor_id,
|
smp_processor_id,
|
||||||
uid,
|
uid,
|
||||||
skb_store_bytes,
|
skb_store_bytes,
|
||||||
|
get_current_cgroup_id,
|
||||||
get_stack,
|
get_stack,
|
||||||
XDP_DROP,
|
XDP_DROP,
|
||||||
XDP_PASS,
|
XDP_PASS,
|
||||||
@ -74,6 +79,8 @@ __all__ = [
|
|||||||
"reset_scratch_pool",
|
"reset_scratch_pool",
|
||||||
"handle_helper_call",
|
"handle_helper_call",
|
||||||
"emit_probe_read_kernel_str_call",
|
"emit_probe_read_kernel_str_call",
|
||||||
|
"emit_probe_read_kernel_call",
|
||||||
|
"get_current_cgroup_id",
|
||||||
"ktime",
|
"ktime",
|
||||||
"pid",
|
"pid",
|
||||||
"deref",
|
"deref",
|
||||||
|
|||||||
@ -30,10 +30,12 @@ class BPFHelperID(Enum):
|
|||||||
BPF_SKB_STORE_BYTES = 9
|
BPF_SKB_STORE_BYTES = 9
|
||||||
BPF_GET_CURRENT_PID_TGID = 14
|
BPF_GET_CURRENT_PID_TGID = 14
|
||||||
BPF_GET_CURRENT_UID_GID = 15
|
BPF_GET_CURRENT_UID_GID = 15
|
||||||
|
BPF_GET_CURRENT_CGROUP_ID = 80
|
||||||
BPF_GET_CURRENT_COMM = 16
|
BPF_GET_CURRENT_COMM = 16
|
||||||
BPF_PERF_EVENT_OUTPUT = 25
|
BPF_PERF_EVENT_OUTPUT = 25
|
||||||
BPF_GET_STACK = 67
|
BPF_GET_STACK = 67
|
||||||
BPF_PROBE_READ_KERNEL_STR = 115
|
BPF_PROBE_READ_KERNEL_STR = 115
|
||||||
|
BPF_PROBE_READ_KERNEL = 113
|
||||||
BPF_RINGBUF_OUTPUT = 130
|
BPF_RINGBUF_OUTPUT = 130
|
||||||
BPF_RINGBUF_RESERVE = 131
|
BPF_RINGBUF_RESERVE = 131
|
||||||
BPF_RINGBUF_SUBMIT = 132
|
BPF_RINGBUF_SUBMIT = 132
|
||||||
@ -67,6 +69,33 @@ def bpf_ktime_get_ns_emitter(
|
|||||||
return result, ir.IntType(64)
|
return result, ir.IntType(64)
|
||||||
|
|
||||||
|
|
||||||
|
@HelperHandlerRegistry.register(
|
||||||
|
"get_current_cgroup_id",
|
||||||
|
param_types=[],
|
||||||
|
return_type=ir.IntType(64),
|
||||||
|
)
|
||||||
|
def bpf_get_current_cgroup_id(
|
||||||
|
call,
|
||||||
|
map_ptr,
|
||||||
|
module,
|
||||||
|
builder,
|
||||||
|
func,
|
||||||
|
local_sym_tab=None,
|
||||||
|
struct_sym_tab=None,
|
||||||
|
map_sym_tab=None,
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Emit LLVM IR for bpf_get_current_cgroup_id helper function call.
|
||||||
|
"""
|
||||||
|
# func is an arg to just have a uniform signature with other emitters
|
||||||
|
helper_id = ir.Constant(ir.IntType(64), BPFHelperID.BPF_GET_CURRENT_CGROUP_ID.value)
|
||||||
|
fn_type = ir.FunctionType(ir.IntType(64), [], var_arg=False)
|
||||||
|
fn_ptr_type = ir.PointerType(fn_type)
|
||||||
|
fn_ptr = builder.inttoptr(helper_id, fn_ptr_type)
|
||||||
|
result = builder.call(fn_ptr, [], tail=False)
|
||||||
|
return result, ir.IntType(64)
|
||||||
|
|
||||||
|
|
||||||
@HelperHandlerRegistry.register(
|
@HelperHandlerRegistry.register(
|
||||||
"lookup",
|
"lookup",
|
||||||
param_types=[ir.PointerType(ir.IntType(64))],
|
param_types=[ir.PointerType(ir.IntType(64))],
|
||||||
@ -574,6 +603,75 @@ def bpf_probe_read_kernel_str_emitter(
|
|||||||
return result, ir.IntType(64)
|
return result, ir.IntType(64)
|
||||||
|
|
||||||
|
|
||||||
|
def emit_probe_read_kernel_call(builder, dst_ptr, dst_size, src_ptr):
|
||||||
|
"""Emit LLVM IR call to bpf_probe_read_kernel"""
|
||||||
|
|
||||||
|
fn_type = ir.FunctionType(
|
||||||
|
ir.IntType(64),
|
||||||
|
[ir.PointerType(), ir.IntType(32), ir.PointerType()],
|
||||||
|
var_arg=False,
|
||||||
|
)
|
||||||
|
fn_ptr = builder.inttoptr(
|
||||||
|
ir.Constant(ir.IntType(64), BPFHelperID.BPF_PROBE_READ_KERNEL.value),
|
||||||
|
ir.PointerType(fn_type),
|
||||||
|
)
|
||||||
|
|
||||||
|
result = builder.call(
|
||||||
|
fn_ptr,
|
||||||
|
[
|
||||||
|
builder.bitcast(dst_ptr, ir.PointerType()),
|
||||||
|
ir.Constant(ir.IntType(32), dst_size),
|
||||||
|
builder.bitcast(src_ptr, ir.PointerType()),
|
||||||
|
],
|
||||||
|
tail=False,
|
||||||
|
)
|
||||||
|
|
||||||
|
logger.info(f"Emitted bpf_probe_read_kernel (size={dst_size})")
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
@HelperHandlerRegistry.register(
|
||||||
|
"probe_read_kernel",
|
||||||
|
param_types=[
|
||||||
|
ir.PointerType(ir.IntType(8)),
|
||||||
|
ir.PointerType(ir.IntType(8)),
|
||||||
|
],
|
||||||
|
return_type=ir.IntType(64),
|
||||||
|
)
|
||||||
|
def bpf_probe_read_kernel_emitter(
|
||||||
|
call,
|
||||||
|
map_ptr,
|
||||||
|
module,
|
||||||
|
builder,
|
||||||
|
func,
|
||||||
|
local_sym_tab=None,
|
||||||
|
struct_sym_tab=None,
|
||||||
|
map_sym_tab=None,
|
||||||
|
):
|
||||||
|
"""Emit LLVM IR for bpf_probe_read_kernel helper."""
|
||||||
|
|
||||||
|
if len(call.args) != 2:
|
||||||
|
raise ValueError(
|
||||||
|
f"probe_read_kernel expects 2 args (dst, src), got {len(call.args)}"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Get destination buffer (char array -> i8*)
|
||||||
|
dst_ptr, dst_size = get_or_create_ptr_from_arg(
|
||||||
|
func, module, call.args[0], builder, local_sym_tab, map_sym_tab, struct_sym_tab
|
||||||
|
)
|
||||||
|
|
||||||
|
# Get source pointer (evaluate expression)
|
||||||
|
src_ptr, src_type = get_ptr_from_arg(
|
||||||
|
call.args[1], func, module, builder, local_sym_tab, map_sym_tab, struct_sym_tab
|
||||||
|
)
|
||||||
|
|
||||||
|
# Emit the helper call
|
||||||
|
result = emit_probe_read_kernel_call(builder, dst_ptr, dst_size, src_ptr)
|
||||||
|
|
||||||
|
logger.info(f"Emitted bpf_probe_read_kernel (size={dst_size})")
|
||||||
|
return result, ir.IntType(64)
|
||||||
|
|
||||||
|
|
||||||
@HelperHandlerRegistry.register(
|
@HelperHandlerRegistry.register(
|
||||||
"random",
|
"random",
|
||||||
param_types=[],
|
param_types=[],
|
||||||
|
|||||||
@ -5,6 +5,7 @@ from llvmlite import ir
|
|||||||
from pythonbpf.expr import (
|
from pythonbpf.expr import (
|
||||||
get_operand_value,
|
get_operand_value,
|
||||||
eval_expr,
|
eval_expr,
|
||||||
|
access_struct_field,
|
||||||
)
|
)
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
@ -135,7 +136,7 @@ def get_or_create_ptr_from_arg(
|
|||||||
and field_type.element.width == 8
|
and field_type.element.width == 8
|
||||||
):
|
):
|
||||||
ptr, sz = get_char_array_ptr_and_size(
|
ptr, sz = get_char_array_ptr_and_size(
|
||||||
arg, builder, local_sym_tab, struct_sym_tab
|
arg, builder, local_sym_tab, struct_sym_tab, func
|
||||||
)
|
)
|
||||||
if not ptr:
|
if not ptr:
|
||||||
raise ValueError("Failed to get char array pointer from struct field")
|
raise ValueError("Failed to get char array pointer from struct field")
|
||||||
@ -266,7 +267,9 @@ def get_buffer_ptr_and_size(buf_arg, builder, local_sym_tab, struct_sym_tab):
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
def get_char_array_ptr_and_size(buf_arg, builder, local_sym_tab, struct_sym_tab):
|
def get_char_array_ptr_and_size(
|
||||||
|
buf_arg, builder, local_sym_tab, struct_sym_tab, func=None
|
||||||
|
):
|
||||||
"""Get pointer to char array and its size."""
|
"""Get pointer to char array and its size."""
|
||||||
|
|
||||||
# Struct field: obj.field
|
# Struct field: obj.field
|
||||||
@ -277,11 +280,11 @@ def get_char_array_ptr_and_size(buf_arg, builder, local_sym_tab, struct_sym_tab)
|
|||||||
if not (local_sym_tab and var_name in local_sym_tab):
|
if not (local_sym_tab and var_name in local_sym_tab):
|
||||||
raise ValueError(f"Variable '{var_name}' not found")
|
raise ValueError(f"Variable '{var_name}' not found")
|
||||||
|
|
||||||
struct_type = local_sym_tab[var_name].metadata
|
struct_ptr, struct_type, struct_metadata = local_sym_tab[var_name]
|
||||||
if not (struct_sym_tab and struct_type in struct_sym_tab):
|
if not (struct_sym_tab and struct_metadata in struct_sym_tab):
|
||||||
raise ValueError(f"Struct type '{struct_type}' not found")
|
raise ValueError(f"Struct type '{struct_metadata}' not found")
|
||||||
|
|
||||||
struct_info = struct_sym_tab[struct_type]
|
struct_info = struct_sym_tab[struct_metadata]
|
||||||
if field_name not in struct_info.fields:
|
if field_name not in struct_info.fields:
|
||||||
raise ValueError(f"Field '{field_name}' not found")
|
raise ValueError(f"Field '{field_name}' not found")
|
||||||
|
|
||||||
@ -292,8 +295,24 @@ def get_char_array_ptr_and_size(buf_arg, builder, local_sym_tab, struct_sym_tab)
|
|||||||
)
|
)
|
||||||
return None, 0
|
return None, 0
|
||||||
|
|
||||||
struct_ptr = local_sym_tab[var_name].var
|
# Check if char array
|
||||||
field_ptr = struct_info.gep(builder, struct_ptr, field_name)
|
if not (
|
||||||
|
isinstance(field_type, ir.ArrayType)
|
||||||
|
and isinstance(field_type.element, ir.IntType)
|
||||||
|
and field_type.element.width == 8
|
||||||
|
):
|
||||||
|
logger.warning("Field is not a char array")
|
||||||
|
return None, 0
|
||||||
|
|
||||||
|
field_ptr, _ = access_struct_field(
|
||||||
|
builder,
|
||||||
|
struct_ptr,
|
||||||
|
struct_type,
|
||||||
|
struct_metadata,
|
||||||
|
field_name,
|
||||||
|
struct_sym_tab,
|
||||||
|
func,
|
||||||
|
)
|
||||||
|
|
||||||
# GEP to first element: [N x i8]* -> i8*
|
# GEP to first element: [N x i8]* -> i8*
|
||||||
buf_ptr = builder.gep(
|
buf_ptr = builder.gep(
|
||||||
|
|||||||
@ -57,6 +57,11 @@ def get_stack(buf, flags=0):
|
|||||||
return ctypes.c_int64(0)
|
return ctypes.c_int64(0)
|
||||||
|
|
||||||
|
|
||||||
|
def get_current_cgroup_id():
|
||||||
|
"""Get the current cgroup ID"""
|
||||||
|
return ctypes.c_int64(0)
|
||||||
|
|
||||||
|
|
||||||
XDP_ABORTED = ctypes.c_int64(0)
|
XDP_ABORTED = ctypes.c_int64(0)
|
||||||
XDP_DROP = ctypes.c_int64(1)
|
XDP_DROP = ctypes.c_int64(1)
|
||||||
XDP_PASS = ctypes.c_int64(2)
|
XDP_PASS = ctypes.c_int64(2)
|
||||||
|
|||||||
@ -222,7 +222,7 @@ def _prepare_expr_args(expr, func, module, builder, local_sym_tab, struct_sym_ta
|
|||||||
# Special case: struct field char array needs pointer to first element
|
# Special case: struct field char array needs pointer to first element
|
||||||
if isinstance(expr, ast.Attribute):
|
if isinstance(expr, ast.Attribute):
|
||||||
char_array_ptr, _ = get_char_array_ptr_and_size(
|
char_array_ptr, _ = get_char_array_ptr_and_size(
|
||||||
expr, builder, local_sym_tab, struct_sym_tab
|
expr, builder, local_sym_tab, struct_sym_tab, func
|
||||||
)
|
)
|
||||||
if char_array_ptr:
|
if char_array_ptr:
|
||||||
return char_array_ptr
|
return char_array_ptr
|
||||||
|
|||||||
@ -117,6 +117,7 @@ def _get_key_val_dbg_type(name, generator, structs_sym_tab):
|
|||||||
|
|
||||||
type_obj = structs_sym_tab.get(name)
|
type_obj = structs_sym_tab.get(name)
|
||||||
if type_obj:
|
if type_obj:
|
||||||
|
logger.info(f"Found struct named {name}, generating debug type")
|
||||||
return _get_struct_debug_type(type_obj, generator, structs_sym_tab)
|
return _get_struct_debug_type(type_obj, generator, structs_sym_tab)
|
||||||
|
|
||||||
# Fallback to basic types
|
# Fallback to basic types
|
||||||
@ -165,6 +166,6 @@ def _get_struct_debug_type(struct_obj, generator, structs_sym_tab):
|
|||||||
)
|
)
|
||||||
elements_arr.append(member)
|
elements_arr.append(member)
|
||||||
struct_type = generator.create_struct_type(
|
struct_type = generator.create_struct_type(
|
||||||
elements_arr, struct_obj.size, is_distinct=True
|
elements_arr, struct_obj.size * 8, is_distinct=True
|
||||||
)
|
)
|
||||||
return struct_type
|
return struct_type
|
||||||
|
|||||||
@ -135,7 +135,7 @@ def process_perf_event_map(map_name, rval, module, structs_sym_tab):
|
|||||||
logger.info(f"Map parameters: {map_params}")
|
logger.info(f"Map parameters: {map_params}")
|
||||||
map_global = create_bpf_map(module, map_name, map_params)
|
map_global = create_bpf_map(module, map_name, map_params)
|
||||||
# Generate debug info for BTF
|
# Generate debug info for BTF
|
||||||
create_map_debug_info(module, map_global.sym, map_name, map_params)
|
create_map_debug_info(module, map_global.sym, map_name, map_params, structs_sym_tab)
|
||||||
return map_global
|
return map_global
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@ -18,6 +18,10 @@ mapping = {
|
|||||||
"c_longlong": ir.IntType(64),
|
"c_longlong": ir.IntType(64),
|
||||||
"c_uint": ir.IntType(32),
|
"c_uint": ir.IntType(32),
|
||||||
"c_int": ir.IntType(32),
|
"c_int": ir.IntType(32),
|
||||||
|
"c_ushort": ir.IntType(16),
|
||||||
|
"c_short": ir.IntType(16),
|
||||||
|
"c_ubyte": ir.IntType(8),
|
||||||
|
"c_byte": ir.IntType(8),
|
||||||
# Not so sure about this one
|
# Not so sure about this one
|
||||||
"str": ir.PointerType(ir.IntType(8)),
|
"str": ir.PointerType(ir.IntType(8)),
|
||||||
}
|
}
|
||||||
|
|||||||
@ -16,6 +16,33 @@ def get_module_symbols(module_name: str):
|
|||||||
return [name for name in dir(imported_module)], imported_module
|
return [name for name in dir(imported_module)], imported_module
|
||||||
|
|
||||||
|
|
||||||
|
def unwrap_pointer_type(type_obj: Any) -> Any:
|
||||||
|
"""
|
||||||
|
Recursively unwrap all pointer layers to get the base type.
|
||||||
|
|
||||||
|
This handles multiply nested pointers like LP_LP_struct_attribute_group
|
||||||
|
and returns the base type (struct_attribute_group).
|
||||||
|
|
||||||
|
Stops unwrapping when reaching a non-pointer type (one without _type_ attribute).
|
||||||
|
|
||||||
|
Args:
|
||||||
|
type_obj: The type object to unwrap
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
The base type after unwrapping all pointer layers
|
||||||
|
"""
|
||||||
|
current_type = type_obj
|
||||||
|
# Keep unwrapping while it's a pointer/array type (has _type_)
|
||||||
|
# But stop if _type_ is just a string or basic type marker
|
||||||
|
while hasattr(current_type, "_type_"):
|
||||||
|
next_type = current_type._type_
|
||||||
|
# Stop if _type_ is a string (like 'c' for c_char)
|
||||||
|
if isinstance(next_type, str):
|
||||||
|
break
|
||||||
|
current_type = next_type
|
||||||
|
return current_type
|
||||||
|
|
||||||
|
|
||||||
def process_vmlinux_class(
|
def process_vmlinux_class(
|
||||||
node,
|
node,
|
||||||
llvm_module,
|
llvm_module,
|
||||||
@ -158,13 +185,90 @@ def process_vmlinux_post_ast(
|
|||||||
if hasattr(elem_type, "_length_") and is_complex_type:
|
if hasattr(elem_type, "_length_") and is_complex_type:
|
||||||
type_length = elem_type._length_
|
type_length = elem_type._length_
|
||||||
|
|
||||||
if containing_type.__module__ == "vmlinux":
|
# Unwrap all pointer layers to get the base type for dependency tracking
|
||||||
new_dep_node.add_dependent(
|
base_type = unwrap_pointer_type(elem_type)
|
||||||
elem_type._type_.__name__
|
base_type_module = getattr(base_type, "__module__", None)
|
||||||
if hasattr(elem_type._type_, "__name__")
|
|
||||||
else str(elem_type._type_)
|
if base_type_module == "vmlinux":
|
||||||
|
base_type_name = (
|
||||||
|
base_type.__name__
|
||||||
|
if hasattr(base_type, "__name__")
|
||||||
|
else str(base_type)
|
||||||
|
)
|
||||||
|
# ONLY add vmlinux types as dependencies
|
||||||
|
new_dep_node.add_dependent(base_type_name)
|
||||||
|
|
||||||
|
logger.debug(
|
||||||
|
f"{containing_type} containing type of parent {elem_name} with {elem_type} and ctype {ctype_complex_type} and length {type_length}"
|
||||||
|
)
|
||||||
|
new_dep_node.set_field_containing_type(
|
||||||
|
elem_name, containing_type
|
||||||
|
)
|
||||||
|
new_dep_node.set_field_type_size(elem_name, type_length)
|
||||||
|
new_dep_node.set_field_ctype_complex_type(
|
||||||
|
elem_name, ctype_complex_type
|
||||||
|
)
|
||||||
|
new_dep_node.set_field_type(elem_name, elem_type)
|
||||||
|
|
||||||
|
# Check the containing_type module to decide whether to recurse
|
||||||
|
containing_type_module = getattr(
|
||||||
|
containing_type, "__module__", None
|
||||||
|
)
|
||||||
|
if containing_type_module == "vmlinux":
|
||||||
|
# Also unwrap containing_type to get base type name
|
||||||
|
base_containing_type = unwrap_pointer_type(
|
||||||
|
containing_type
|
||||||
|
)
|
||||||
|
containing_type_name = (
|
||||||
|
base_containing_type.__name__
|
||||||
|
if hasattr(base_containing_type, "__name__")
|
||||||
|
else str(base_containing_type)
|
||||||
|
)
|
||||||
|
|
||||||
|
# Check for self-reference or already processed
|
||||||
|
if containing_type_name == current_symbol_name:
|
||||||
|
# Self-referential pointer
|
||||||
|
logger.debug(
|
||||||
|
f"Self-referential pointer in {current_symbol_name}.{elem_name}"
|
||||||
|
)
|
||||||
|
new_dep_node.set_field_ready(elem_name, True)
|
||||||
|
elif handler.has_node(containing_type_name):
|
||||||
|
# Already processed
|
||||||
|
logger.debug(
|
||||||
|
f"Reusing already processed {containing_type_name}"
|
||||||
|
)
|
||||||
|
new_dep_node.set_field_ready(elem_name, True)
|
||||||
|
else:
|
||||||
|
# Process recursively - use base containing type, not the pointer wrapper
|
||||||
|
new_dep_node.add_dependent(containing_type_name)
|
||||||
|
process_vmlinux_post_ast(
|
||||||
|
base_containing_type,
|
||||||
|
llvm_handler,
|
||||||
|
handler,
|
||||||
|
processing_stack,
|
||||||
|
)
|
||||||
|
new_dep_node.set_field_ready(elem_name, True)
|
||||||
|
elif (
|
||||||
|
containing_type_module == ctypes.__name__
|
||||||
|
or containing_type_module is None
|
||||||
|
):
|
||||||
|
logger.debug(
|
||||||
|
f"Processing ctype internal{containing_type}"
|
||||||
|
)
|
||||||
|
new_dep_node.set_field_ready(elem_name, True)
|
||||||
|
else:
|
||||||
|
raise TypeError(
|
||||||
|
f"Module not supported in recursive resolution: {containing_type_module}"
|
||||||
|
)
|
||||||
|
elif (
|
||||||
|
base_type_module == ctypes.__name__
|
||||||
|
or base_type_module is None
|
||||||
|
):
|
||||||
|
# Handle ctypes or types with no module (like some internal ctypes types)
|
||||||
|
# DO NOT add ctypes as dependencies - just set field metadata and mark ready
|
||||||
|
logger.debug(
|
||||||
|
f"Base type {base_type} is ctypes - NOT adding as dependency, just processing field"
|
||||||
)
|
)
|
||||||
elif containing_type.__module__ == ctypes.__name__:
|
|
||||||
if isinstance(elem_type, type):
|
if isinstance(elem_type, type):
|
||||||
if issubclass(elem_type, ctypes.Array):
|
if issubclass(elem_type, ctypes.Array):
|
||||||
ctype_complex_type = ctypes.Array
|
ctype_complex_type = ctypes.Array
|
||||||
@ -176,57 +280,20 @@ def process_vmlinux_post_ast(
|
|||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
raise TypeError("Unsupported ctypes subclass")
|
raise TypeError("Unsupported ctypes subclass")
|
||||||
else:
|
|
||||||
raise ImportError(
|
|
||||||
f"Unsupported module of {containing_type}"
|
|
||||||
)
|
|
||||||
logger.debug(
|
|
||||||
f"{containing_type} containing type of parent {elem_name} with {elem_type} and ctype {ctype_complex_type} and length {type_length}"
|
|
||||||
)
|
|
||||||
new_dep_node.set_field_containing_type(
|
|
||||||
elem_name, containing_type
|
|
||||||
)
|
|
||||||
new_dep_node.set_field_type_size(elem_name, type_length)
|
|
||||||
new_dep_node.set_field_ctype_complex_type(
|
|
||||||
elem_name, ctype_complex_type
|
|
||||||
)
|
|
||||||
new_dep_node.set_field_type(elem_name, elem_type)
|
|
||||||
if containing_type.__module__ == "vmlinux":
|
|
||||||
containing_type_name = (
|
|
||||||
containing_type.__name__
|
|
||||||
if hasattr(containing_type, "__name__")
|
|
||||||
else str(containing_type)
|
|
||||||
)
|
|
||||||
|
|
||||||
# Check for self-reference or already processed
|
# Set field metadata but DO NOT add dependency or recurse
|
||||||
if containing_type_name == current_symbol_name:
|
new_dep_node.set_field_containing_type(
|
||||||
# Self-referential pointer
|
elem_name, containing_type
|
||||||
logger.debug(
|
)
|
||||||
f"Self-referential pointer in {current_symbol_name}.{elem_name}"
|
new_dep_node.set_field_type_size(elem_name, type_length)
|
||||||
)
|
new_dep_node.set_field_ctype_complex_type(
|
||||||
new_dep_node.set_field_ready(elem_name, True)
|
elem_name, ctype_complex_type
|
||||||
elif handler.has_node(containing_type_name):
|
)
|
||||||
# Already processed
|
new_dep_node.set_field_type(elem_name, elem_type)
|
||||||
logger.debug(
|
|
||||||
f"Reusing already processed {containing_type_name}"
|
|
||||||
)
|
|
||||||
new_dep_node.set_field_ready(elem_name, True)
|
|
||||||
else:
|
|
||||||
# Process recursively - THIS WAS MISSING
|
|
||||||
new_dep_node.add_dependent(containing_type_name)
|
|
||||||
process_vmlinux_post_ast(
|
|
||||||
containing_type,
|
|
||||||
llvm_handler,
|
|
||||||
handler,
|
|
||||||
processing_stack,
|
|
||||||
)
|
|
||||||
new_dep_node.set_field_ready(elem_name, True)
|
|
||||||
elif containing_type.__module__ == ctypes.__name__:
|
|
||||||
logger.debug(f"Processing ctype internal{containing_type}")
|
|
||||||
new_dep_node.set_field_ready(elem_name, True)
|
new_dep_node.set_field_ready(elem_name, True)
|
||||||
else:
|
else:
|
||||||
raise TypeError(
|
raise ImportError(
|
||||||
"Module not supported in recursive resolution"
|
f"Unsupported module of {base_type}: {base_type_module}"
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
new_dep_node.add_dependent(
|
new_dep_node.add_dependent(
|
||||||
@ -245,9 +312,12 @@ def process_vmlinux_post_ast(
|
|||||||
raise ValueError(
|
raise ValueError(
|
||||||
f"{elem_name} with type {elem_type} from module {module_name} not supported in recursive resolver"
|
f"{elem_name} with type {elem_type} from module {module_name} not supported in recursive resolver"
|
||||||
)
|
)
|
||||||
|
elif module_name == ctypes.__name__ or module_name is None:
|
||||||
|
# Handle ctypes types - these don't need processing, just return
|
||||||
|
logger.debug(f"Skipping ctypes type {current_symbol_name}")
|
||||||
|
return True
|
||||||
else:
|
else:
|
||||||
raise ImportError("UNSUPPORTED Module")
|
raise ImportError(f"UNSUPPORTED Module {module_name}")
|
||||||
|
|
||||||
logger.info(
|
logger.info(
|
||||||
f"{current_symbol_name} processed and handler readiness {handler.is_ready}"
|
f"{current_symbol_name} processed and handler readiness {handler.is_ready}"
|
||||||
|
|||||||
@ -11,7 +11,9 @@ from .class_handler import process_vmlinux_class
|
|||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
def detect_import_statement(tree: ast.AST) -> list[tuple[str, ast.ImportFrom]]:
|
def detect_import_statement(
|
||||||
|
tree: ast.AST,
|
||||||
|
) -> list[tuple[str, ast.ImportFrom, str, str]]:
|
||||||
"""
|
"""
|
||||||
Parse AST and detect import statements from vmlinux.
|
Parse AST and detect import statements from vmlinux.
|
||||||
|
|
||||||
@ -25,7 +27,7 @@ def detect_import_statement(tree: ast.AST) -> list[tuple[str, ast.ImportFrom]]:
|
|||||||
List of tuples containing (module_name, imported_item) for each vmlinux import
|
List of tuples containing (module_name, imported_item) for each vmlinux import
|
||||||
|
|
||||||
Raises:
|
Raises:
|
||||||
SyntaxError: If multiple imports from vmlinux are attempted or import * is used
|
SyntaxError: If import * is used
|
||||||
"""
|
"""
|
||||||
vmlinux_imports = []
|
vmlinux_imports = []
|
||||||
|
|
||||||
@ -40,28 +42,19 @@ def detect_import_statement(tree: ast.AST) -> list[tuple[str, ast.ImportFrom]]:
|
|||||||
"Please import specific types explicitly."
|
"Please import specific types explicitly."
|
||||||
)
|
)
|
||||||
|
|
||||||
# Check for multiple imports: from vmlinux import A, B, C
|
|
||||||
if len(node.names) > 1:
|
|
||||||
imported_names = [alias.name for alias in node.names]
|
|
||||||
raise SyntaxError(
|
|
||||||
f"Multiple imports from vmlinux are not supported. "
|
|
||||||
f"Found: {', '.join(imported_names)}. "
|
|
||||||
f"Please use separate import statements for each type."
|
|
||||||
)
|
|
||||||
|
|
||||||
# Check if no specific import is specified (should not happen with valid Python)
|
# Check if no specific import is specified (should not happen with valid Python)
|
||||||
if len(node.names) == 0:
|
if len(node.names) == 0:
|
||||||
raise SyntaxError(
|
raise SyntaxError(
|
||||||
"Import from vmlinux must specify at least one type."
|
"Import from vmlinux must specify at least one type."
|
||||||
)
|
)
|
||||||
|
|
||||||
# Valid single import
|
# Support multiple imports: from vmlinux import A, B, C
|
||||||
for alias in node.names:
|
for alias in node.names:
|
||||||
import_name = alias.name
|
import_name = alias.name
|
||||||
# Use alias if provided, otherwise use the original name (commented)
|
# Use alias if provided, otherwise use the original name
|
||||||
# as_name = alias.asname if alias.asname else alias.name
|
as_name = alias.asname if alias.asname else alias.name
|
||||||
vmlinux_imports.append(("vmlinux", node))
|
vmlinux_imports.append(("vmlinux", node, import_name, as_name))
|
||||||
logger.info(f"Found vmlinux import: {import_name}")
|
logger.info(f"Found vmlinux import: {import_name} as {as_name}")
|
||||||
|
|
||||||
# Handle "import vmlinux" statements (not typical but should be rejected)
|
# Handle "import vmlinux" statements (not typical but should be rejected)
|
||||||
elif isinstance(node, ast.Import):
|
elif isinstance(node, ast.Import):
|
||||||
@ -103,40 +96,37 @@ def vmlinux_proc(tree: ast.AST, module):
|
|||||||
with open(source_file, "r") as f:
|
with open(source_file, "r") as f:
|
||||||
mod_ast = ast.parse(f.read(), filename=source_file)
|
mod_ast = ast.parse(f.read(), filename=source_file)
|
||||||
|
|
||||||
for import_mod, import_node in import_statements:
|
for import_mod, import_node, imported_name, as_name in import_statements:
|
||||||
for alias in import_node.names:
|
found = False
|
||||||
imported_name = alias.name
|
for mod_node in mod_ast.body:
|
||||||
found = False
|
if isinstance(mod_node, ast.ClassDef) and mod_node.name == imported_name:
|
||||||
for mod_node in mod_ast.body:
|
process_vmlinux_class(mod_node, module, handler)
|
||||||
if (
|
found = True
|
||||||
isinstance(mod_node, ast.ClassDef)
|
break
|
||||||
and mod_node.name == imported_name
|
if isinstance(mod_node, ast.Assign):
|
||||||
):
|
for target in mod_node.targets:
|
||||||
process_vmlinux_class(mod_node, module, handler)
|
if isinstance(target, ast.Name) and target.id == imported_name:
|
||||||
found = True
|
process_vmlinux_assign(mod_node, module, assignments, as_name)
|
||||||
break
|
found = True
|
||||||
if isinstance(mod_node, ast.Assign):
|
break
|
||||||
for target in mod_node.targets:
|
if found:
|
||||||
if isinstance(target, ast.Name) and target.id == imported_name:
|
break
|
||||||
process_vmlinux_assign(mod_node, module, assignments)
|
if not found:
|
||||||
found = True
|
logger.info(f"{imported_name} not found as ClassDef or Assign in vmlinux")
|
||||||
break
|
|
||||||
if found:
|
|
||||||
break
|
|
||||||
if not found:
|
|
||||||
logger.info(
|
|
||||||
f"{imported_name} not found as ClassDef or Assign in vmlinux"
|
|
||||||
)
|
|
||||||
|
|
||||||
IRGenerator(module, handler, assignments)
|
IRGenerator(module, handler, assignments)
|
||||||
return assignments
|
return assignments
|
||||||
|
|
||||||
|
|
||||||
def process_vmlinux_assign(node, module, assignments: dict[str, AssignmentInfo]):
|
def process_vmlinux_assign(
|
||||||
|
node, module, assignments: dict[str, AssignmentInfo], target_name=None
|
||||||
|
):
|
||||||
"""Process assignments from vmlinux module."""
|
"""Process assignments from vmlinux module."""
|
||||||
# Only handle single-target assignments
|
# Only handle single-target assignments
|
||||||
if len(node.targets) == 1 and isinstance(node.targets[0], ast.Name):
|
if len(node.targets) == 1 and isinstance(node.targets[0], ast.Name):
|
||||||
target_name = node.targets[0].id
|
# Use provided target_name (for aliased imports) or fall back to original name
|
||||||
|
if target_name is None:
|
||||||
|
target_name = node.targets[0].id
|
||||||
|
|
||||||
# Handle constant value assignments
|
# Handle constant value assignments
|
||||||
if isinstance(node.value, ast.Constant):
|
if isinstance(node.value, ast.Constant):
|
||||||
|
|||||||
@ -21,7 +21,7 @@ def debug_info_generation(
|
|||||||
generated_debug_info: List of tuples (struct, debug_info) to track generated debug info
|
generated_debug_info: List of tuples (struct, debug_info) to track generated debug info
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
The generated global variable debug info
|
The generated global variable debug info, or None for unsupported types
|
||||||
"""
|
"""
|
||||||
# Set up debug info generator
|
# Set up debug info generator
|
||||||
generator = DebugInfoGenerator(llvm_module)
|
generator = DebugInfoGenerator(llvm_module)
|
||||||
@ -31,23 +31,42 @@ def debug_info_generation(
|
|||||||
if existing_struct.name == struct.name:
|
if existing_struct.name == struct.name:
|
||||||
return debug_info
|
return debug_info
|
||||||
|
|
||||||
|
# Check if this is a union (not supported yet)
|
||||||
|
if not struct.name.startswith("struct_"):
|
||||||
|
logger.warning(f"Skipping debug info generation for union: {struct.name}")
|
||||||
|
# Create a minimal forward declaration for unions
|
||||||
|
union_type = generator.create_struct_type(
|
||||||
|
[], struct.__sizeof__() * 8, is_distinct=True
|
||||||
|
)
|
||||||
|
return union_type
|
||||||
|
|
||||||
# Process all fields and create members for the struct
|
# Process all fields and create members for the struct
|
||||||
members = []
|
members = []
|
||||||
for field_name, field in struct.fields.items():
|
|
||||||
# Get appropriate debug type for this field
|
|
||||||
field_type = _get_field_debug_type(
|
|
||||||
field_name, field, generator, struct, generated_debug_info
|
|
||||||
)
|
|
||||||
# Create struct member with proper offset
|
|
||||||
member = generator.create_struct_member_vmlinux(
|
|
||||||
field_name, field_type, field.offset * 8
|
|
||||||
)
|
|
||||||
members.append(member)
|
|
||||||
|
|
||||||
if struct.name.startswith("struct_"):
|
sorted_fields = sorted(struct.fields.items(), key=lambda item: item[1].offset)
|
||||||
struct_name = struct.name.removeprefix("struct_")
|
|
||||||
else:
|
for field_name, field in sorted_fields:
|
||||||
raise ValueError("Unions are not supported in the current version")
|
try:
|
||||||
|
# Get appropriate debug type for this field
|
||||||
|
field_type = _get_field_debug_type(
|
||||||
|
field_name, field, generator, struct, generated_debug_info
|
||||||
|
)
|
||||||
|
|
||||||
|
# Ensure field_type is a tuple
|
||||||
|
if not isinstance(field_type, tuple) or len(field_type) != 2:
|
||||||
|
logger.error(f"Invalid field_type for {field_name}: {field_type}")
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Create struct member with proper offset
|
||||||
|
member = generator.create_struct_member_vmlinux(
|
||||||
|
field_name, field_type, field.offset * 8
|
||||||
|
)
|
||||||
|
members.append(member)
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Failed to process field {field_name} in {struct.name}: {e}")
|
||||||
|
continue
|
||||||
|
|
||||||
|
struct_name = struct.name.removeprefix("struct_")
|
||||||
# Create struct type with all members
|
# Create struct type with all members
|
||||||
struct_type = generator.create_struct_type_with_name(
|
struct_type = generator.create_struct_type_with_name(
|
||||||
struct_name, members, struct.__sizeof__() * 8, is_distinct=True
|
struct_name, members, struct.__sizeof__() * 8, is_distinct=True
|
||||||
@ -74,11 +93,19 @@ def _get_field_debug_type(
|
|||||||
generated_debug_info: List of already generated debug info
|
generated_debug_info: List of already generated debug info
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
The debug info type for this field
|
A tuple of (debug_type, size_in_bits)
|
||||||
"""
|
"""
|
||||||
# Handle complex types (arrays, pointers)
|
# Handle complex types (arrays, pointers, function pointers)
|
||||||
if field.ctype_complex_type is not None:
|
if field.ctype_complex_type is not None:
|
||||||
if issubclass(field.ctype_complex_type, ctypes.Array):
|
# Handle function pointer types (CFUNCTYPE)
|
||||||
|
if callable(field.ctype_complex_type):
|
||||||
|
# Function pointers are represented as void pointers
|
||||||
|
logger.warning(
|
||||||
|
f"Field {field_name} is a function pointer, using void pointer"
|
||||||
|
)
|
||||||
|
void_ptr = generator.create_pointer_type(None, 64)
|
||||||
|
return void_ptr, 64
|
||||||
|
elif issubclass(field.ctype_complex_type, ctypes.Array):
|
||||||
# Handle array types
|
# Handle array types
|
||||||
element_type, base_type_size = _get_basic_debug_type(
|
element_type, base_type_size = _get_basic_debug_type(
|
||||||
field.containing_type, generator
|
field.containing_type, generator
|
||||||
@ -100,11 +127,13 @@ def _get_field_debug_type(
|
|||||||
for existing_struct, debug_info in generated_debug_info:
|
for existing_struct, debug_info in generated_debug_info:
|
||||||
if existing_struct.name == struct_name:
|
if existing_struct.name == struct_name:
|
||||||
# Use existing debug info
|
# Use existing debug info
|
||||||
return debug_info, existing_struct.__sizeof__()
|
return debug_info, existing_struct.__sizeof__() * 8
|
||||||
|
|
||||||
# If not found, create a forward declaration
|
# If not found, create a forward declaration
|
||||||
# This will be completed when the actual struct is processed
|
# This will be completed when the actual struct is processed
|
||||||
logger.warning("Forward declaration in struct created")
|
logger.info(
|
||||||
|
f"Forward declaration created for {struct_name} in {parent_struct.name}"
|
||||||
|
)
|
||||||
forward_type = generator.create_struct_type([], 0, is_distinct=True)
|
forward_type = generator.create_struct_type([], 0, is_distinct=True)
|
||||||
return forward_type, 0
|
return forward_type, 0
|
||||||
|
|
||||||
|
|||||||
@ -11,6 +11,10 @@ logger = logging.getLogger(__name__)
|
|||||||
|
|
||||||
|
|
||||||
class IRGenerator:
|
class IRGenerator:
|
||||||
|
# This field keeps track of the non_struct names to avoid duplicate name errors.
|
||||||
|
type_number = 0
|
||||||
|
unprocessed_store: list[str] = []
|
||||||
|
|
||||||
# get the assignments dict and add this stuff to it.
|
# get the assignments dict and add this stuff to it.
|
||||||
def __init__(self, llvm_module, handler: DependencyHandler, assignments):
|
def __init__(self, llvm_module, handler: DependencyHandler, assignments):
|
||||||
self.llvm_module = llvm_module
|
self.llvm_module = llvm_module
|
||||||
@ -129,7 +133,19 @@ class IRGenerator:
|
|||||||
|
|
||||||
for field_name, field in struct.fields.items():
|
for field_name, field in struct.fields.items():
|
||||||
# does not take arrays and similar types into consideration yet.
|
# does not take arrays and similar types into consideration yet.
|
||||||
if field.ctype_complex_type is not None and issubclass(
|
if callable(field.ctype_complex_type):
|
||||||
|
# Function pointer case - generate a simple field accessor
|
||||||
|
field_co_re_name, returned = self._struct_name_generator(
|
||||||
|
struct, field, field_index
|
||||||
|
)
|
||||||
|
field_index += 1
|
||||||
|
globvar = ir.GlobalVariable(
|
||||||
|
self.llvm_module, ir.IntType(64), name=field_co_re_name
|
||||||
|
)
|
||||||
|
globvar.linkage = "external"
|
||||||
|
globvar.set_metadata("llvm.preserve.access.index", debug_info)
|
||||||
|
self.generated_field_names[struct.name][field_name] = globvar
|
||||||
|
elif field.ctype_complex_type is not None and issubclass(
|
||||||
field.ctype_complex_type, ctypes.Array
|
field.ctype_complex_type, ctypes.Array
|
||||||
):
|
):
|
||||||
array_size = field.type_size
|
array_size = field.type_size
|
||||||
@ -137,7 +153,7 @@ class IRGenerator:
|
|||||||
if containing_type.__module__ == ctypes.__name__:
|
if containing_type.__module__ == ctypes.__name__:
|
||||||
containing_type_size = ctypes.sizeof(containing_type)
|
containing_type_size = ctypes.sizeof(containing_type)
|
||||||
if array_size == 0:
|
if array_size == 0:
|
||||||
field_co_re_name = self._struct_name_generator(
|
field_co_re_name, returned = self._struct_name_generator(
|
||||||
struct, field, field_index, True, 0, containing_type_size
|
struct, field, field_index, True, 0, containing_type_size
|
||||||
)
|
)
|
||||||
globvar = ir.GlobalVariable(
|
globvar = ir.GlobalVariable(
|
||||||
@ -149,7 +165,7 @@ class IRGenerator:
|
|||||||
field_index += 1
|
field_index += 1
|
||||||
continue
|
continue
|
||||||
for i in range(0, array_size):
|
for i in range(0, array_size):
|
||||||
field_co_re_name = self._struct_name_generator(
|
field_co_re_name, returned = self._struct_name_generator(
|
||||||
struct, field, field_index, True, i, containing_type_size
|
struct, field, field_index, True, i, containing_type_size
|
||||||
)
|
)
|
||||||
globvar = ir.GlobalVariable(
|
globvar = ir.GlobalVariable(
|
||||||
@ -163,12 +179,28 @@ class IRGenerator:
|
|||||||
array_size = field.type_size
|
array_size = field.type_size
|
||||||
containing_type = field.containing_type
|
containing_type = field.containing_type
|
||||||
if containing_type.__module__ == "vmlinux":
|
if containing_type.__module__ == "vmlinux":
|
||||||
containing_type_size = self.handler[
|
# Unwrap all pointer layers to get the base struct type
|
||||||
containing_type.__name__
|
base_containing_type = containing_type
|
||||||
].current_offset
|
while hasattr(base_containing_type, "_type_"):
|
||||||
for i in range(0, array_size):
|
next_type = base_containing_type._type_
|
||||||
field_co_re_name = self._struct_name_generator(
|
# Stop if _type_ is a string (like 'c' for c_char)
|
||||||
struct, field, field_index, True, i, containing_type_size
|
# TODO: stacked pointers not handl;ing ctypes check here as well
|
||||||
|
if isinstance(next_type, str):
|
||||||
|
break
|
||||||
|
base_containing_type = next_type
|
||||||
|
|
||||||
|
# Get the base struct name
|
||||||
|
base_struct_name = (
|
||||||
|
base_containing_type.__name__
|
||||||
|
if hasattr(base_containing_type, "__name__")
|
||||||
|
else str(base_containing_type)
|
||||||
|
)
|
||||||
|
|
||||||
|
# Look up the size using the base struct name
|
||||||
|
containing_type_size = self.handler[base_struct_name].current_offset
|
||||||
|
if array_size == 0:
|
||||||
|
field_co_re_name, returned = self._struct_name_generator(
|
||||||
|
struct, field, field_index, True, 0, containing_type_size
|
||||||
)
|
)
|
||||||
globvar = ir.GlobalVariable(
|
globvar = ir.GlobalVariable(
|
||||||
self.llvm_module, ir.IntType(64), name=field_co_re_name
|
self.llvm_module, ir.IntType(64), name=field_co_re_name
|
||||||
@ -176,9 +208,30 @@ class IRGenerator:
|
|||||||
globvar.linkage = "external"
|
globvar.linkage = "external"
|
||||||
globvar.set_metadata("llvm.preserve.access.index", debug_info)
|
globvar.set_metadata("llvm.preserve.access.index", debug_info)
|
||||||
self.generated_field_names[struct.name][field_name] = globvar
|
self.generated_field_names[struct.name][field_name] = globvar
|
||||||
field_index += 1
|
field_index += 1
|
||||||
|
else:
|
||||||
|
for i in range(0, array_size):
|
||||||
|
field_co_re_name, returned = self._struct_name_generator(
|
||||||
|
struct,
|
||||||
|
field,
|
||||||
|
field_index,
|
||||||
|
True,
|
||||||
|
i,
|
||||||
|
containing_type_size,
|
||||||
|
)
|
||||||
|
globvar = ir.GlobalVariable(
|
||||||
|
self.llvm_module, ir.IntType(64), name=field_co_re_name
|
||||||
|
)
|
||||||
|
globvar.linkage = "external"
|
||||||
|
globvar.set_metadata(
|
||||||
|
"llvm.preserve.access.index", debug_info
|
||||||
|
)
|
||||||
|
self.generated_field_names[struct.name][field_name] = (
|
||||||
|
globvar
|
||||||
|
)
|
||||||
|
field_index += 1
|
||||||
else:
|
else:
|
||||||
field_co_re_name = self._struct_name_generator(
|
field_co_re_name, returned = self._struct_name_generator(
|
||||||
struct, field, field_index
|
struct, field, field_index
|
||||||
)
|
)
|
||||||
field_index += 1
|
field_index += 1
|
||||||
@ -198,7 +251,7 @@ class IRGenerator:
|
|||||||
is_indexed: bool = False,
|
is_indexed: bool = False,
|
||||||
index: int = 0,
|
index: int = 0,
|
||||||
containing_type_size: int = 0,
|
containing_type_size: int = 0,
|
||||||
) -> str:
|
) -> tuple[str, bool]:
|
||||||
# TODO: Does not support Unions as well as recursive pointer and array type naming
|
# TODO: Does not support Unions as well as recursive pointer and array type naming
|
||||||
if is_indexed:
|
if is_indexed:
|
||||||
name = (
|
name = (
|
||||||
@ -208,7 +261,7 @@ class IRGenerator:
|
|||||||
+ "$"
|
+ "$"
|
||||||
+ f"0:{field_index}:{index}"
|
+ f"0:{field_index}:{index}"
|
||||||
)
|
)
|
||||||
return name
|
return name, True
|
||||||
elif struct.name.startswith("struct_"):
|
elif struct.name.startswith("struct_"):
|
||||||
name = (
|
name = (
|
||||||
"llvm."
|
"llvm."
|
||||||
@ -217,9 +270,18 @@ class IRGenerator:
|
|||||||
+ "$"
|
+ "$"
|
||||||
+ f"0:{field_index}"
|
+ f"0:{field_index}"
|
||||||
)
|
)
|
||||||
return name
|
return name, True
|
||||||
else:
|
else:
|
||||||
print(self.handler[struct.name])
|
logger.warning(
|
||||||
raise TypeError(
|
"Blindly handling non-struct type to avoid type errors in vmlinux IR generation. Possibly a union."
|
||||||
"Name generation cannot occur due to type name not starting with struct"
|
|
||||||
)
|
)
|
||||||
|
self.type_number += 1
|
||||||
|
unprocessed_type = "unprocessed_type_" + str(self.handler[struct.name].name)
|
||||||
|
if self.unprocessed_store.__contains__(unprocessed_type):
|
||||||
|
return unprocessed_type + "_" + str(self.type_number), False
|
||||||
|
else:
|
||||||
|
self.unprocessed_store.append(unprocessed_type)
|
||||||
|
return unprocessed_type, False
|
||||||
|
# raise TypeError(
|
||||||
|
# "Name generation cannot occur due to type name not starting with struct"
|
||||||
|
# )
|
||||||
|
|||||||
@ -77,7 +77,7 @@ class VmlinuxHandler:
|
|||||||
return None
|
return None
|
||||||
|
|
||||||
def get_vmlinux_enum_value(self, name):
|
def get_vmlinux_enum_value(self, name):
|
||||||
"""Handle vmlinux enum constants by returning LLVM IR constants"""
|
"""Handle vmlinux.enum constants by returning LLVM IR constants"""
|
||||||
if self.is_vmlinux_enum(name):
|
if self.is_vmlinux_enum(name):
|
||||||
value = self.vmlinux_symtab[name].value
|
value = self.vmlinux_symtab[name].value
|
||||||
logger.info(f"The value of vmlinux enum {name} = {value}")
|
logger.info(f"The value of vmlinux enum {name} = {value}")
|
||||||
@ -94,17 +94,168 @@ class VmlinuxHandler:
|
|||||||
f"Attempting to access field {field_name} of possible vmlinux struct {struct_var_name}"
|
f"Attempting to access field {field_name} of possible vmlinux struct {struct_var_name}"
|
||||||
)
|
)
|
||||||
python_type: type = var_info.metadata
|
python_type: type = var_info.metadata
|
||||||
struct_name = python_type.__name__
|
# Check if this is a context field (ctx) or a cast struct
|
||||||
globvar_ir, field_data = self.get_field_type(struct_name, field_name)
|
is_context_field = var_info.var is None
|
||||||
builder.function.args[0].type = ir.PointerType(ir.IntType(8))
|
|
||||||
field_ptr = self.load_ctx_field(
|
if is_context_field:
|
||||||
builder, builder.function.args[0], globvar_ir, field_data, struct_name
|
# Handle context field access (original behavior)
|
||||||
)
|
struct_name = python_type.__name__
|
||||||
# Return pointer to field and field type
|
globvar_ir, field_data = self.get_field_type(struct_name, field_name)
|
||||||
return field_ptr, field_data
|
builder.function.args[0].type = ir.PointerType(ir.IntType(8))
|
||||||
|
field_ptr = self.load_ctx_field(
|
||||||
|
builder,
|
||||||
|
builder.function.args[0],
|
||||||
|
globvar_ir,
|
||||||
|
field_data,
|
||||||
|
struct_name,
|
||||||
|
)
|
||||||
|
return field_ptr, field_data
|
||||||
|
else:
|
||||||
|
# Handle cast struct field access
|
||||||
|
struct_name = python_type.__name__
|
||||||
|
globvar_ir, field_data = self.get_field_type(struct_name, field_name)
|
||||||
|
|
||||||
|
# Handle cast struct field access (use bpf_probe_read_kernel)
|
||||||
|
# Load the struct pointer from the local variable
|
||||||
|
struct_ptr = builder.load(var_info.var)
|
||||||
|
|
||||||
|
# Determine the preallocated tmp name that assignment pass should have created
|
||||||
|
tmp_name = f"{struct_var_name}_{field_name}_tmp"
|
||||||
|
|
||||||
|
# Use bpf_probe_read_kernel for non-context struct field access
|
||||||
|
field_value = self.load_struct_field(
|
||||||
|
builder,
|
||||||
|
struct_ptr,
|
||||||
|
globvar_ir,
|
||||||
|
field_data,
|
||||||
|
struct_name,
|
||||||
|
local_sym_tab,
|
||||||
|
tmp_name,
|
||||||
|
)
|
||||||
|
# Return field value and field type
|
||||||
|
return field_value, field_data
|
||||||
else:
|
else:
|
||||||
raise RuntimeError("Variable accessed not found in symbol table")
|
raise RuntimeError("Variable accessed not found in symbol table")
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def load_struct_field(
|
||||||
|
builder,
|
||||||
|
struct_ptr_int,
|
||||||
|
offset_global,
|
||||||
|
field_data,
|
||||||
|
struct_name=None,
|
||||||
|
local_sym_tab=None,
|
||||||
|
tmp_name: str | None = None,
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Generate LLVM IR to load a field from a regular (non-context) struct using bpf_probe_read_kernel.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
builder: llvmlite IRBuilder instance
|
||||||
|
struct_ptr_int: The struct pointer as an i64 value (already loaded from alloca)
|
||||||
|
offset_global: Global variable containing the field offset (i64)
|
||||||
|
field_data: contains data about the field
|
||||||
|
struct_name: Name of the struct being accessed (optional)
|
||||||
|
local_sym_tab: symbol table (optional) - used to locate preallocated tmp storage
|
||||||
|
tmp_name: name of the preallocated temporary storage to use (preferred)
|
||||||
|
Returns:
|
||||||
|
The loaded value
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Load the offset value
|
||||||
|
offset = builder.load(offset_global)
|
||||||
|
|
||||||
|
# Convert i64 to pointer type (BPF stores pointers as i64)
|
||||||
|
i8_ptr_type = ir.PointerType(ir.IntType(8))
|
||||||
|
struct_ptr = builder.inttoptr(struct_ptr_int, i8_ptr_type)
|
||||||
|
|
||||||
|
# GEP with offset to get field pointer
|
||||||
|
field_ptr = builder.gep(
|
||||||
|
struct_ptr,
|
||||||
|
[offset],
|
||||||
|
inbounds=False,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Determine the appropriate field size based on field information
|
||||||
|
field_size_bytes = 8 # Default to 8 bytes (64-bit)
|
||||||
|
int_width = 64 # Default to 64-bit
|
||||||
|
needs_zext = False
|
||||||
|
|
||||||
|
if field_data is not None:
|
||||||
|
# Try to determine the size from field metadata
|
||||||
|
if field_data.type.__module__ == ctypes.__name__:
|
||||||
|
try:
|
||||||
|
field_size_bytes = ctypes.sizeof(field_data.type)
|
||||||
|
field_size_bits = field_size_bytes * 8
|
||||||
|
|
||||||
|
if field_size_bits in [8, 16, 32, 64]:
|
||||||
|
int_width = field_size_bits
|
||||||
|
logger.info(
|
||||||
|
f"Determined field size: {int_width} bits ({field_size_bytes} bytes)"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Special handling for struct_xdp_md i32 fields
|
||||||
|
if struct_name == "struct_xdp_md" and int_width == 32:
|
||||||
|
needs_zext = True
|
||||||
|
logger.info(
|
||||||
|
"struct_xdp_md i32 field detected, will zero-extend to i64"
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
logger.warning(
|
||||||
|
f"Unusual field size {field_size_bits} bits, using default 64"
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning(
|
||||||
|
f"Could not determine field size: {e}, using default 64"
|
||||||
|
)
|
||||||
|
|
||||||
|
elif field_data.type.__module__ == "vmlinux":
|
||||||
|
# For pointers to structs or complex vmlinux types
|
||||||
|
if field_data.ctype_complex_type is not None and issubclass(
|
||||||
|
field_data.ctype_complex_type, ctypes._Pointer
|
||||||
|
):
|
||||||
|
int_width = 64 # Pointers are always 64-bit
|
||||||
|
field_size_bytes = 8
|
||||||
|
logger.info("Field is a pointer type, using 64 bits")
|
||||||
|
else:
|
||||||
|
logger.warning("Complex vmlinux field type, using default 64 bits")
|
||||||
|
|
||||||
|
# Use preallocated temporary storage if provided by allocation pass
|
||||||
|
|
||||||
|
local_storage_i8_ptr = None
|
||||||
|
if tmp_name and local_sym_tab and tmp_name in local_sym_tab:
|
||||||
|
# Expect the tmp to be an alloca created during allocation pass
|
||||||
|
tmp_alloca = local_sym_tab[tmp_name].var
|
||||||
|
local_storage_i8_ptr = builder.bitcast(tmp_alloca, i8_ptr_type)
|
||||||
|
else:
|
||||||
|
# Fallback: allocate inline (not ideal, but preserves behavior)
|
||||||
|
local_storage = builder.alloca(ir.IntType(int_width))
|
||||||
|
local_storage_i8_ptr = builder.bitcast(local_storage, i8_ptr_type)
|
||||||
|
logger.warning(f"Temp storage '{tmp_name}' not found. Allocating inline")
|
||||||
|
|
||||||
|
# Use bpf_probe_read_kernel to safely read the field
|
||||||
|
# This generates:
|
||||||
|
# %gep = getelementptr i8, ptr %struct_ptr, i64 %offset (already done above as field_ptr)
|
||||||
|
# %passed = tail call ptr @llvm.bpf.passthrough.p0.p0(i32 2, ptr %gep)
|
||||||
|
# %result = call i64 inttoptr (i64 113 to ptr)(ptr %local_storage, i32 %size, ptr %passed)
|
||||||
|
from pythonbpf.helper import emit_probe_read_kernel_call
|
||||||
|
|
||||||
|
emit_probe_read_kernel_call(
|
||||||
|
builder, local_storage_i8_ptr, field_size_bytes, field_ptr
|
||||||
|
)
|
||||||
|
|
||||||
|
# Load the value from local storage
|
||||||
|
value = builder.load(
|
||||||
|
builder.bitcast(local_storage_i8_ptr, ir.PointerType(ir.IntType(int_width)))
|
||||||
|
)
|
||||||
|
|
||||||
|
# Zero-extend i32 to i64 if needed
|
||||||
|
if needs_zext:
|
||||||
|
value = builder.zext(value, ir.IntType(64))
|
||||||
|
logger.info("Zero-extended i32 value to i64")
|
||||||
|
|
||||||
|
return value
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def load_ctx_field(builder, ctx_arg, offset_global, field_data, struct_name=None):
|
def load_ctx_field(builder, ctx_arg, offset_global, field_data, struct_name=None):
|
||||||
"""
|
"""
|
||||||
|
|||||||
@ -1,23 +1,22 @@
|
|||||||
BPF_CLANG := clang
|
BPF_CLANG := clang
|
||||||
CFLAGS := -emit-llvm -target bpf -c
|
CFLAGS := -emit-llvm -target bpf -c -D__TARGET_ARCH_x86
|
||||||
|
|
||||||
SRC := $(wildcard *.bpf.c)
|
SRC := $(wildcard *.bpf.c)
|
||||||
LL := $(SRC:.bpf.c=.bpf.ll)
|
LL := $(SRC:.bpf.c=.bpf.ll)
|
||||||
LL2 := $(SRC:.bpf.c=.bpf.o2.ll)
|
|
||||||
OBJ := $(SRC:.bpf.c=.bpf.o)
|
OBJ := $(SRC:.bpf.c=.bpf.o)
|
||||||
|
LL0 := $(SRC:.bpf.c=.bpf.o0.ll)
|
||||||
.PHONY: all clean
|
.PHONY: all clean
|
||||||
|
|
||||||
all: $(LL) $(OBJ) $(LL2)
|
all: $(LL) $(OBJ) $(LL0)
|
||||||
|
|
||||||
%.bpf.o: %.bpf.c
|
%.bpf.o: %.bpf.c
|
||||||
$(BPF_CLANG) -O2 -g -target bpf -c $< -o $@
|
$(BPF_CLANG) -O2 -D__TARGET_ARCH_x86 -g -target bpf -c $< -o $@
|
||||||
|
|
||||||
%.bpf.ll: %.bpf.c
|
%.bpf.ll: %.bpf.c
|
||||||
$(BPF_CLANG) -O0 $(CFLAGS) -g -S $< -o $@
|
$(BPF_CLANG) $(CFLAGS) -O2 -g -S $< -o $@
|
||||||
|
|
||||||
%.bpf.o2.ll: %.bpf.c
|
%.bpf.o0.ll: %.bpf.c
|
||||||
$(BPF_CLANG) -O2 $(CFLAGS) -g -S $< -o $@
|
$(BPF_CLANG) $(CFLAGS) -O0 -g -S $< -o $@
|
||||||
|
|
||||||
clean:
|
clean:
|
||||||
rm -f $(LL) $(OBJ) $(LL2)
|
rm -f $(LL) $(OBJ) $(LL0)
|
||||||
|
|||||||
66
tests/c-form/disksnoop.bpf.c
Normal file
66
tests/c-form/disksnoop.bpf.c
Normal file
@ -0,0 +1,66 @@
|
|||||||
|
// disksnoop.bpf.c
|
||||||
|
// eBPF program (compile with: clang -O2 -g -target bpf -c disksnoop.bpf.c -o disksnoop.bpf.o)
|
||||||
|
|
||||||
|
#include "vmlinux.h"
|
||||||
|
#include <bpf/bpf_helpers.h>
|
||||||
|
#include <bpf/bpf_core_read.h>
|
||||||
|
|
||||||
|
char LICENSE[] SEC("license") = "GPL";
|
||||||
|
|
||||||
|
struct {
|
||||||
|
__uint(type, BPF_MAP_TYPE_HASH);
|
||||||
|
__type(key, __u64);
|
||||||
|
__type(value, __u64);
|
||||||
|
__uint(max_entries, 10240);
|
||||||
|
} start_map SEC(".maps");
|
||||||
|
|
||||||
|
/* kprobe: record start timestamp keyed by request pointer */
|
||||||
|
SEC("kprobe/blk_mq_start_request")
|
||||||
|
int trace_start(struct pt_regs *ctx)
|
||||||
|
{
|
||||||
|
/* request * is first arg */
|
||||||
|
__u64 reqp = (__u64)(ctx->di);
|
||||||
|
__u64 ts = bpf_ktime_get_ns();
|
||||||
|
|
||||||
|
bpf_map_update_elem(&start_map, &reqp, &ts, BPF_ANY);
|
||||||
|
|
||||||
|
// /* optional debug:
|
||||||
|
bpf_printk("start: req=%llu ts=%llu\n", reqp, ts);
|
||||||
|
// */
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* completion: compute latency and print data_len, cmd_flags, latency_us */
|
||||||
|
SEC("kprobe/blk_mq_end_request")
|
||||||
|
int trace_completion(struct pt_regs *ctx)
|
||||||
|
{
|
||||||
|
__u64 reqp = (__u64)(ctx->di);
|
||||||
|
__u64 *tsp;
|
||||||
|
__u64 now_ns;
|
||||||
|
__u64 delta_ns;
|
||||||
|
__u64 delta_us = 0;
|
||||||
|
bpf_printk("%lld", reqp);
|
||||||
|
tsp = bpf_map_lookup_elem(&start_map, &reqp);
|
||||||
|
if (!tsp)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
now_ns = bpf_ktime_get_ns();
|
||||||
|
delta_ns = now_ns - *tsp;
|
||||||
|
delta_us = delta_ns / 1000;
|
||||||
|
|
||||||
|
/* read request fields using CO-RE; needs vmlinux.h/BTF */
|
||||||
|
__u32 data_len = 0;
|
||||||
|
__u32 cmd_flags = 0;
|
||||||
|
|
||||||
|
/* __data_len is usually a 32/64-bit; use CORE read to be safe */
|
||||||
|
data_len = ( __u32 ) BPF_CORE_READ((struct request *)reqp, __data_len);
|
||||||
|
cmd_flags = ( __u32 ) BPF_CORE_READ((struct request *)reqp, cmd_flags);
|
||||||
|
|
||||||
|
/* print: "<bytes> <flags_hex> <latency_us>" */
|
||||||
|
bpf_printk("%u %x %llu\n", data_len, cmd_flags, delta_us);
|
||||||
|
|
||||||
|
/* remove from map */
|
||||||
|
bpf_map_delete_elem(&start_map, &reqp);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
18
tests/c-form/requests.bpf.c
Normal file
18
tests/c-form/requests.bpf.c
Normal file
@ -0,0 +1,18 @@
|
|||||||
|
#include "vmlinux.h"
|
||||||
|
#include <bpf/bpf_helpers.h>
|
||||||
|
#include <bpf/bpf_tracing.h>
|
||||||
|
#include <bpf/bpf_core_read.h>
|
||||||
|
|
||||||
|
char LICENSE[] SEC("license") = "GPL";
|
||||||
|
|
||||||
|
SEC("kprobe/blk_mq_start_request")
|
||||||
|
int example(struct pt_regs *ctx)
|
||||||
|
{
|
||||||
|
u64 a = ctx->r15;
|
||||||
|
struct request *req = (struct request *)(ctx->di);
|
||||||
|
unsigned int something_ns = BPF_CORE_READ(req, timeout);
|
||||||
|
unsigned int data_len = BPF_CORE_READ(req, __data_len);
|
||||||
|
bpf_printk("data length %lld %ld %ld\n", data_len, something_ns, a);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
18
tests/c-form/requests2.bpf.c
Normal file
18
tests/c-form/requests2.bpf.c
Normal file
@ -0,0 +1,18 @@
|
|||||||
|
#include "vmlinux.h"
|
||||||
|
#include <bpf/bpf_helpers.h>
|
||||||
|
#include <bpf/bpf_tracing.h>
|
||||||
|
#include <bpf/bpf_core_read.h>
|
||||||
|
|
||||||
|
char LICENSE[] SEC("license") = "GPL";
|
||||||
|
|
||||||
|
SEC("kprobe/blk_mq_start_request")
|
||||||
|
int example(struct pt_regs *ctx)
|
||||||
|
{
|
||||||
|
u64 a = ctx->r15;
|
||||||
|
struct request *req = (struct request *)(ctx->di);
|
||||||
|
unsigned int something_ns = req->timeout;
|
||||||
|
unsigned int data_len = req->__data_len;
|
||||||
|
bpf_printk("data length %lld %ld %ld\n", data_len, something_ns, a);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
31
tests/c-form/xdp_test.bpf.c
Normal file
31
tests/c-form/xdp_test.bpf.c
Normal file
@ -0,0 +1,31 @@
|
|||||||
|
#include "vmlinux.h"
|
||||||
|
#include <bpf/bpf_helpers.h>
|
||||||
|
|
||||||
|
struct fake_iphdr {
|
||||||
|
unsigned short useless;
|
||||||
|
unsigned short tot_len;
|
||||||
|
unsigned short id;
|
||||||
|
unsigned short frag_off;
|
||||||
|
unsigned char ttl;
|
||||||
|
unsigned char protocol;
|
||||||
|
unsigned short check;
|
||||||
|
unsigned int saddr;
|
||||||
|
unsigned int daddr;
|
||||||
|
};
|
||||||
|
|
||||||
|
SEC("xdp")
|
||||||
|
int xdp_prog(struct xdp_md *ctx) {
|
||||||
|
unsigned long data = ctx->data;
|
||||||
|
unsigned long data_end = ctx->data_end;
|
||||||
|
|
||||||
|
if (data + sizeof(struct ethhdr) + sizeof(struct fake_iphdr) > data_end) {
|
||||||
|
return XDP_ABORTED;
|
||||||
|
}
|
||||||
|
struct fake_iphdr *iph = (void *)data + sizeof(struct ethhdr);
|
||||||
|
|
||||||
|
bpf_printk("%d", iph->saddr);
|
||||||
|
|
||||||
|
return XDP_PASS;
|
||||||
|
}
|
||||||
|
|
||||||
|
char _license[] SEC("license") = "GPL";
|
||||||
22
tests/failing_tests/vmlinux/assignment_handling.py
Normal file
22
tests/failing_tests/vmlinux/assignment_handling.py
Normal file
@ -0,0 +1,22 @@
|
|||||||
|
from vmlinux import XDP_PASS
|
||||||
|
from pythonbpf import bpf, section, bpfglobal, compile_to_ir
|
||||||
|
import logging
|
||||||
|
from ctypes import c_int64, c_void_p
|
||||||
|
|
||||||
|
|
||||||
|
@bpf
|
||||||
|
@section("kprobe/blk_mq_start_request")
|
||||||
|
def example(ctx: c_void_p) -> c_int64:
|
||||||
|
d = XDP_PASS # This gives an error, but
|
||||||
|
e = XDP_PASS + 0 # this does not
|
||||||
|
print(f"test1 {e} test2 {d}")
|
||||||
|
return c_int64(0)
|
||||||
|
|
||||||
|
|
||||||
|
@bpf
|
||||||
|
@bpfglobal
|
||||||
|
def LICENSE() -> str:
|
||||||
|
return "GPL"
|
||||||
|
|
||||||
|
|
||||||
|
compile_to_ir("assignment_handling.py", "assignment_handling.ll", loglevel=logging.INFO)
|
||||||
46
tests/failing_tests/xdp/xdp_test_1.py
Normal file
46
tests/failing_tests/xdp/xdp_test_1.py
Normal file
@ -0,0 +1,46 @@
|
|||||||
|
from vmlinux import XDP_PASS, XDP_ABORTED
|
||||||
|
from vmlinux import (
|
||||||
|
struct_xdp_md,
|
||||||
|
)
|
||||||
|
from pythonbpf import bpf, section, bpfglobal, compile, compile_to_ir, struct
|
||||||
|
from ctypes import c_int64, c_ubyte, c_ushort, c_uint32, c_void_p
|
||||||
|
|
||||||
|
|
||||||
|
@bpf
|
||||||
|
@struct
|
||||||
|
class iphdr:
|
||||||
|
useless: c_ushort
|
||||||
|
tot_len: c_ushort
|
||||||
|
id: c_ushort
|
||||||
|
frag_off: c_ushort
|
||||||
|
ttl: c_ubyte
|
||||||
|
protocol: c_ubyte
|
||||||
|
check: c_ushort
|
||||||
|
saddr: c_uint32
|
||||||
|
daddr: c_uint32
|
||||||
|
|
||||||
|
|
||||||
|
@bpf
|
||||||
|
@section("xdp")
|
||||||
|
def ip_detector(ctx: struct_xdp_md) -> c_int64:
|
||||||
|
data = c_void_p(ctx.data)
|
||||||
|
data_end = c_void_p(ctx.data_end)
|
||||||
|
if data + 34 < data_end:
|
||||||
|
hdr = data + 14
|
||||||
|
iph = iphdr(hdr)
|
||||||
|
addr = iph.saddr
|
||||||
|
print(f"ipaddress: {addr}")
|
||||||
|
else:
|
||||||
|
return c_int64(XDP_ABORTED)
|
||||||
|
|
||||||
|
return c_int64(XDP_PASS)
|
||||||
|
|
||||||
|
|
||||||
|
@bpf
|
||||||
|
@bpfglobal
|
||||||
|
def LICENSE() -> str:
|
||||||
|
return "GPL"
|
||||||
|
|
||||||
|
|
||||||
|
compile_to_ir("xdp_test_1.py", "xdp_test_1.ll")
|
||||||
|
compile()
|
||||||
@ -1,4 +1,4 @@
|
|||||||
from pythonbpf import bpf, struct, section, bpfglobal
|
from pythonbpf import bpf, struct, section, bpfglobal, compile
|
||||||
from pythonbpf.helper import comm
|
from pythonbpf.helper import comm
|
||||||
|
|
||||||
from ctypes import c_void_p, c_int64
|
from ctypes import c_void_p, c_int64
|
||||||
@ -26,3 +26,6 @@ def hello(ctx: c_void_p) -> c_int64:
|
|||||||
@bpfglobal
|
@bpfglobal
|
||||||
def LICENSE() -> str:
|
def LICENSE() -> str:
|
||||||
return "GPL"
|
return "GPL"
|
||||||
|
|
||||||
|
|
||||||
|
compile()
|
||||||
|
|||||||
@ -1,6 +1,6 @@
|
|||||||
from pythonbpf import bpf, section, struct, bpfglobal, compile, map
|
from pythonbpf import bpf, section, struct, bpfglobal, compile, map
|
||||||
from pythonbpf.maps import HashMap
|
from pythonbpf.maps import HashMap
|
||||||
from pythonbpf.helper import pid
|
from pythonbpf.helper import pid, comm
|
||||||
from ctypes import c_void_p, c_int64
|
from ctypes import c_void_p, c_int64
|
||||||
|
|
||||||
|
|
||||||
@ -9,6 +9,7 @@ from ctypes import c_void_p, c_int64
|
|||||||
class val_type:
|
class val_type:
|
||||||
counter: c_int64
|
counter: c_int64
|
||||||
shizzle: c_int64
|
shizzle: c_int64
|
||||||
|
comm: str(16)
|
||||||
|
|
||||||
|
|
||||||
@bpf
|
@bpf
|
||||||
@ -22,6 +23,7 @@ def last() -> HashMap:
|
|||||||
def hello_world(ctx: c_void_p) -> c_int64:
|
def hello_world(ctx: c_void_p) -> c_int64:
|
||||||
obj = val_type()
|
obj = val_type()
|
||||||
obj.counter, obj.shizzle = 42, 96
|
obj.counter, obj.shizzle = 42, 96
|
||||||
|
comm(obj.comm)
|
||||||
t = last.lookup(obj)
|
t = last.lookup(obj)
|
||||||
if t:
|
if t:
|
||||||
print(f"Found existing entry: counter={obj.counter}, pid={t}")
|
print(f"Found existing entry: counter={obj.counter}, pid={t}")
|
||||||
|
|||||||
93
tests/passing_tests/struct_pylib.py
Normal file
93
tests/passing_tests/struct_pylib.py
Normal file
@ -0,0 +1,93 @@
|
|||||||
|
"""
|
||||||
|
Test struct values in HashMap.
|
||||||
|
|
||||||
|
This example stores a struct in a HashMap and reads it back,
|
||||||
|
testing the new set_value_struct() functionality in pylibbpf.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from pythonbpf import bpf, map, struct, section, bpfglobal, BPF
|
||||||
|
from pythonbpf.helper import ktime, smp_processor_id, pid, comm
|
||||||
|
from pythonbpf.maps import HashMap
|
||||||
|
from ctypes import c_void_p, c_int64, c_uint32, c_uint64
|
||||||
|
import time
|
||||||
|
import os
|
||||||
|
|
||||||
|
|
||||||
|
@bpf
|
||||||
|
@struct
|
||||||
|
class task_info:
|
||||||
|
pid: c_uint64
|
||||||
|
timestamp: c_uint64
|
||||||
|
comm: str(16)
|
||||||
|
|
||||||
|
|
||||||
|
@bpf
|
||||||
|
@map
|
||||||
|
def cpu_tasks() -> HashMap:
|
||||||
|
return HashMap(key=c_uint32, value=task_info, max_entries=256)
|
||||||
|
|
||||||
|
|
||||||
|
@bpf
|
||||||
|
@section("tracepoint/sched/sched_switch")
|
||||||
|
def trace_sched_switch(ctx: c_void_p) -> c_int64:
|
||||||
|
cpu = smp_processor_id()
|
||||||
|
|
||||||
|
# Create task info struct
|
||||||
|
info = task_info()
|
||||||
|
info.pid = pid()
|
||||||
|
info.timestamp = ktime()
|
||||||
|
comm(info.comm)
|
||||||
|
|
||||||
|
# Store in map
|
||||||
|
cpu_tasks.update(cpu, info)
|
||||||
|
|
||||||
|
return 0 # type: ignore
|
||||||
|
|
||||||
|
|
||||||
|
@bpf
|
||||||
|
@bpfglobal
|
||||||
|
def LICENSE() -> str:
|
||||||
|
return "GPL"
|
||||||
|
|
||||||
|
|
||||||
|
# Compile and load
|
||||||
|
b = BPF()
|
||||||
|
b.load()
|
||||||
|
b.attach_all()
|
||||||
|
|
||||||
|
print("Testing HashMap with Struct Values")
|
||||||
|
|
||||||
|
cpu_map = b["cpu_tasks"]
|
||||||
|
cpu_map.set_value_struct("task_info") # Enable struct deserialization
|
||||||
|
|
||||||
|
print("Listening for context switches.. .\n")
|
||||||
|
|
||||||
|
num_cpus = os.cpu_count() or 16
|
||||||
|
|
||||||
|
try:
|
||||||
|
while True:
|
||||||
|
time.sleep(1)
|
||||||
|
|
||||||
|
print(f"--- Snapshot at {time.strftime('%H:%M:%S')} ---")
|
||||||
|
|
||||||
|
for cpu in range(num_cpus):
|
||||||
|
try:
|
||||||
|
info = cpu_map.lookup(cpu)
|
||||||
|
|
||||||
|
if info:
|
||||||
|
comm_str = (
|
||||||
|
bytes(info.comm).decode("utf-8", errors="ignore").rstrip("\x00")
|
||||||
|
)
|
||||||
|
ts_sec = info.timestamp / 1e9
|
||||||
|
|
||||||
|
print(
|
||||||
|
f" CPU {cpu}: PID={info.pid}, comm={comm_str}, ts={ts_sec:.3f}s"
|
||||||
|
)
|
||||||
|
except KeyError:
|
||||||
|
# No data for this CPU yet
|
||||||
|
pass
|
||||||
|
|
||||||
|
print()
|
||||||
|
|
||||||
|
except KeyboardInterrupt:
|
||||||
|
print("\nStopped")
|
||||||
27
tests/passing_tests/vmlinux/requests.py
Normal file
27
tests/passing_tests/vmlinux/requests.py
Normal file
@ -0,0 +1,27 @@
|
|||||||
|
from vmlinux import struct_request, struct_pt_regs
|
||||||
|
from pythonbpf import bpf, section, bpfglobal, compile_to_ir, compile
|
||||||
|
import logging
|
||||||
|
from ctypes import c_int64
|
||||||
|
|
||||||
|
|
||||||
|
@bpf
|
||||||
|
@section("kprobe/blk_mq_start_request")
|
||||||
|
def example(ctx: struct_pt_regs) -> c_int64:
|
||||||
|
a = ctx.r15
|
||||||
|
req = struct_request(ctx.di)
|
||||||
|
d = req.__data_len
|
||||||
|
b = ctx.r12
|
||||||
|
c = req.timeout
|
||||||
|
print(f"data length {d} and {c} and {a}")
|
||||||
|
print(f"ctx arg {b}")
|
||||||
|
return c_int64(0)
|
||||||
|
|
||||||
|
|
||||||
|
@bpf
|
||||||
|
@bpfglobal
|
||||||
|
def LICENSE() -> str:
|
||||||
|
return "GPL"
|
||||||
|
|
||||||
|
|
||||||
|
compile_to_ir("requests.py", "requests.ll", loglevel=logging.INFO)
|
||||||
|
compile()
|
||||||
21
tests/passing_tests/vmlinux/requests2.py
Normal file
21
tests/passing_tests/vmlinux/requests2.py
Normal file
@ -0,0 +1,21 @@
|
|||||||
|
from vmlinux import struct_pt_regs
|
||||||
|
from pythonbpf import bpf, section, bpfglobal, compile_to_ir
|
||||||
|
import logging
|
||||||
|
from ctypes import c_int64
|
||||||
|
|
||||||
|
|
||||||
|
@bpf
|
||||||
|
@section("kprobe/blk_mq_start_request")
|
||||||
|
def example(ctx: struct_pt_regs) -> c_int64:
|
||||||
|
req = ctx.di
|
||||||
|
print(f"data length {req}")
|
||||||
|
return c_int64(0)
|
||||||
|
|
||||||
|
|
||||||
|
@bpf
|
||||||
|
@bpfglobal
|
||||||
|
def LICENSE() -> str:
|
||||||
|
return "GPL"
|
||||||
|
|
||||||
|
|
||||||
|
compile_to_ir("requests2.py", "requests2.ll", loglevel=logging.INFO)
|
||||||
Reference in New Issue
Block a user