mirror of
https://github.com/varun-r-mallya/Python-BPF.git
synced 2025-12-31 21:06:25 +00:00
Compare commits
13 Commits
24e5829b80
...
c97efb2570
| Author | SHA1 | Date | |
|---|---|---|---|
| c97efb2570 | |||
| 76c982e15e | |||
| 650744f843 | |||
| d73c793989 | |||
| bbe4990878 | |||
| 600993f626 | |||
| 6c55d56ef0 | |||
| 704b0d8cd3 | |||
| 0e50079d88 | |||
| d457f87410 | |||
| 4ea02745b3 | |||
| 84edddb685 | |||
| 6f017a9176 |
2
.github/workflows/format.yml
vendored
2
.github/workflows/format.yml
vendored
@ -12,7 +12,7 @@ jobs:
|
||||
name: Format
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v6
|
||||
- uses: actions/setup-python@v6
|
||||
with:
|
||||
python-version: "3.x"
|
||||
|
||||
2
.github/workflows/python-publish.yml
vendored
2
.github/workflows/python-publish.yml
vendored
@ -20,7 +20,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v6
|
||||
|
||||
- uses: actions/setup-python@v6
|
||||
with:
|
||||
|
||||
49
BCC-Examples/container-monitor/README.md
Normal file
49
BCC-Examples/container-monitor/README.md
Normal file
@ -0,0 +1,49 @@
|
||||
# Container Monitor TUI
|
||||
|
||||
A beautiful terminal-based container monitoring tool that combines syscall tracking, file I/O monitoring, and network traffic analysis using eBPF.
|
||||
|
||||
## Features
|
||||
|
||||
- 🎯 **Interactive Cgroup Selection** - Navigate and select cgroups with arrow keys
|
||||
- 📊 **Real-time Monitoring** - Live graphs and statistics
|
||||
- 🔥 **Syscall Tracking** - Total syscall count per cgroup
|
||||
- 💾 **File I/O Monitoring** - Read/write operations and bytes with graphs
|
||||
- 🌐 **Network Traffic** - RX/TX packets and bytes with live graphs
|
||||
- ⚡ **Efficient Caching** - Reduced /proc lookups for better performance
|
||||
- 🎨 **Beautiful TUI** - Clean, colorful terminal interface
|
||||
|
||||
## Requirements
|
||||
|
||||
- Python 3.7+
|
||||
- pythonbpf
|
||||
- Root privileges (for eBPF)
|
||||
|
||||
## Installation
|
||||
|
||||
```bash
|
||||
# Ensure you have pythonbpf installed
|
||||
pip install pythonbpf
|
||||
|
||||
# Run the monitor
|
||||
sudo $(which python) container_monitor.py
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
1. **Selection Screen**: Use ↑↓ arrow keys to navigate through cgroups, press ENTER to select
|
||||
2. **Monitoring Screen**: View real-time graphs and statistics, press ESC or 'b' to go back
|
||||
3. **Exit**: Press 'q' at any time to quit
|
||||
|
||||
## Architecture
|
||||
|
||||
- `container_monitor.py` - Main BPF program combining all three tracers
|
||||
- `data_collector.py` - Data collection, caching, and history management
|
||||
- `tui. py` - Terminal user interface with selection and monitoring screens
|
||||
|
||||
## BPF Programs
|
||||
|
||||
- **vfs_read/vfs_write** - Track file I/O operations
|
||||
- **__netif_receive_skb/__dev_queue_xmit** - Track network traffic
|
||||
- **raw_syscalls/sys_enter** - Count all syscalls
|
||||
|
||||
All programs filter by cgroup ID for per-container monitoring.
|
||||
220
BCC-Examples/container-monitor/container_monitor.py
Normal file
220
BCC-Examples/container-monitor/container_monitor.py
Normal file
@ -0,0 +1,220 @@
|
||||
"""Container Monitor - TUI-based cgroup monitoring combining syscall, file I/O, and network tracking."""
|
||||
|
||||
from pythonbpf import bpf, map, section, bpfglobal, struct, BPF
|
||||
from pythonbpf.maps import HashMap
|
||||
from pythonbpf.helper import get_current_cgroup_id
|
||||
from ctypes import c_int32, c_uint64, c_void_p
|
||||
from vmlinux import struct_pt_regs, struct_sk_buff
|
||||
|
||||
from data_collection import ContainerDataCollector
|
||||
from tui import ContainerMonitorTUI
|
||||
|
||||
|
||||
# ==================== BPF Structs ====================
|
||||
|
||||
|
||||
@bpf
|
||||
@struct
|
||||
class read_stats:
|
||||
bytes: c_uint64
|
||||
ops: c_uint64
|
||||
|
||||
|
||||
@bpf
|
||||
@struct
|
||||
class write_stats:
|
||||
bytes: c_uint64
|
||||
ops: c_uint64
|
||||
|
||||
|
||||
@bpf
|
||||
@struct
|
||||
class net_stats:
|
||||
rx_packets: c_uint64
|
||||
tx_packets: c_uint64
|
||||
rx_bytes: c_uint64
|
||||
tx_bytes: c_uint64
|
||||
|
||||
|
||||
# ==================== BPF Maps ====================
|
||||
|
||||
|
||||
@bpf
|
||||
@map
|
||||
def read_map() -> HashMap:
|
||||
return HashMap(key=c_uint64, value=read_stats, max_entries=1024)
|
||||
|
||||
|
||||
@bpf
|
||||
@map
|
||||
def write_map() -> HashMap:
|
||||
return HashMap(key=c_uint64, value=write_stats, max_entries=1024)
|
||||
|
||||
|
||||
@bpf
|
||||
@map
|
||||
def net_stats_map() -> HashMap:
|
||||
return HashMap(key=c_uint64, value=net_stats, max_entries=1024)
|
||||
|
||||
|
||||
@bpf
|
||||
@map
|
||||
def syscall_count() -> HashMap:
|
||||
return HashMap(key=c_uint64, value=c_uint64, max_entries=1024)
|
||||
|
||||
|
||||
# ==================== File I/O Tracing ====================
|
||||
|
||||
|
||||
@bpf
|
||||
@section("kprobe/vfs_read")
|
||||
def trace_read(ctx: struct_pt_regs) -> c_int32:
|
||||
cg = get_current_cgroup_id()
|
||||
count = c_uint64(ctx.dx)
|
||||
ptr = read_map.lookup(cg)
|
||||
if ptr:
|
||||
s = read_stats()
|
||||
s.bytes = ptr.bytes + count
|
||||
s.ops = ptr.ops + 1
|
||||
read_map.update(cg, s)
|
||||
else:
|
||||
s = read_stats()
|
||||
s.bytes = count
|
||||
s.ops = c_uint64(1)
|
||||
read_map.update(cg, s)
|
||||
|
||||
return c_int32(0)
|
||||
|
||||
|
||||
@bpf
|
||||
@section("kprobe/vfs_write")
|
||||
def trace_write(ctx1: struct_pt_regs) -> c_int32:
|
||||
cg = get_current_cgroup_id()
|
||||
count = c_uint64(ctx1.dx)
|
||||
ptr = write_map.lookup(cg)
|
||||
|
||||
if ptr:
|
||||
s = write_stats()
|
||||
s.bytes = ptr.bytes + count
|
||||
s.ops = ptr.ops + 1
|
||||
write_map.update(cg, s)
|
||||
else:
|
||||
s = write_stats()
|
||||
s.bytes = count
|
||||
s.ops = c_uint64(1)
|
||||
write_map.update(cg, s)
|
||||
|
||||
return c_int32(0)
|
||||
|
||||
|
||||
# ==================== Network I/O Tracing ====================
|
||||
|
||||
|
||||
@bpf
|
||||
@section("kprobe/__netif_receive_skb")
|
||||
def trace_netif_rx(ctx2: struct_pt_regs) -> c_int32:
|
||||
cgroup_id = get_current_cgroup_id()
|
||||
skb = struct_sk_buff(ctx2.di)
|
||||
pkt_len = c_uint64(skb.len)
|
||||
|
||||
stats_ptr = net_stats_map.lookup(cgroup_id)
|
||||
|
||||
if stats_ptr:
|
||||
stats = net_stats()
|
||||
stats.rx_packets = stats_ptr.rx_packets + 1
|
||||
stats.tx_packets = stats_ptr.tx_packets
|
||||
stats.rx_bytes = stats_ptr.rx_bytes + pkt_len
|
||||
stats.tx_bytes = stats_ptr.tx_bytes
|
||||
net_stats_map.update(cgroup_id, stats)
|
||||
else:
|
||||
stats = net_stats()
|
||||
stats.rx_packets = c_uint64(1)
|
||||
stats.tx_packets = c_uint64(0)
|
||||
stats.rx_bytes = pkt_len
|
||||
stats.tx_bytes = c_uint64(0)
|
||||
net_stats_map.update(cgroup_id, stats)
|
||||
|
||||
return c_int32(0)
|
||||
|
||||
|
||||
@bpf
|
||||
@section("kprobe/__dev_queue_xmit")
|
||||
def trace_dev_xmit(ctx3: struct_pt_regs) -> c_int32:
|
||||
cgroup_id = get_current_cgroup_id()
|
||||
skb = struct_sk_buff(ctx3.di)
|
||||
pkt_len = c_uint64(skb.len)
|
||||
|
||||
stats_ptr = net_stats_map.lookup(cgroup_id)
|
||||
|
||||
if stats_ptr:
|
||||
stats = net_stats()
|
||||
stats.rx_packets = stats_ptr.rx_packets
|
||||
stats.tx_packets = stats_ptr.tx_packets + 1
|
||||
stats.rx_bytes = stats_ptr.rx_bytes
|
||||
stats.tx_bytes = stats_ptr.tx_bytes + pkt_len
|
||||
net_stats_map.update(cgroup_id, stats)
|
||||
else:
|
||||
stats = net_stats()
|
||||
stats.rx_packets = c_uint64(0)
|
||||
stats.tx_packets = c_uint64(1)
|
||||
stats.rx_bytes = c_uint64(0)
|
||||
stats.tx_bytes = pkt_len
|
||||
net_stats_map.update(cgroup_id, stats)
|
||||
|
||||
return c_int32(0)
|
||||
|
||||
|
||||
# ==================== Syscall Tracing ====================
|
||||
|
||||
|
||||
@bpf
|
||||
@section("tracepoint/raw_syscalls/sys_enter")
|
||||
def count_syscalls(ctx: c_void_p) -> c_int32:
|
||||
cgroup_id = get_current_cgroup_id()
|
||||
count_ptr = syscall_count.lookup(cgroup_id)
|
||||
|
||||
if count_ptr:
|
||||
new_count = count_ptr + c_uint64(1)
|
||||
syscall_count.update(cgroup_id, new_count)
|
||||
else:
|
||||
syscall_count.update(cgroup_id, c_uint64(1))
|
||||
|
||||
return c_int32(0)
|
||||
|
||||
|
||||
@bpf
|
||||
@bpfglobal
|
||||
def LICENSE() -> str:
|
||||
return "GPL"
|
||||
|
||||
|
||||
# ==================== Main ====================
|
||||
|
||||
if __name__ == "__main__":
|
||||
print("🔥 Loading BPF programs...")
|
||||
|
||||
# Load and attach BPF program
|
||||
b = BPF()
|
||||
b.load()
|
||||
b.attach_all()
|
||||
|
||||
# Get map references and enable struct deserialization
|
||||
read_map_ref = b["read_map"]
|
||||
write_map_ref = b["write_map"]
|
||||
net_stats_map_ref = b["net_stats_map"]
|
||||
syscall_count_ref = b["syscall_count"]
|
||||
|
||||
read_map_ref.set_value_struct("read_stats")
|
||||
write_map_ref.set_value_struct("write_stats")
|
||||
net_stats_map_ref.set_value_struct("net_stats")
|
||||
|
||||
print("✅ BPF programs loaded and attached")
|
||||
|
||||
# Setup data collector
|
||||
collector = ContainerDataCollector(
|
||||
read_map_ref, write_map_ref, net_stats_map_ref, syscall_count_ref
|
||||
)
|
||||
|
||||
# Create and run TUI
|
||||
tui = ContainerMonitorTUI(collector)
|
||||
tui.run()
|
||||
208
BCC-Examples/container-monitor/data_collection.py
Normal file
208
BCC-Examples/container-monitor/data_collection.py
Normal file
@ -0,0 +1,208 @@
|
||||
"""Data collection and management for container monitoring."""
|
||||
|
||||
import os
|
||||
import time
|
||||
from pathlib import Path
|
||||
from typing import Dict, List, Set, Optional
|
||||
from dataclasses import dataclass
|
||||
from collections import deque, defaultdict
|
||||
|
||||
|
||||
@dataclass
|
||||
class CgroupInfo:
|
||||
"""Information about a cgroup."""
|
||||
|
||||
id: int
|
||||
name: str
|
||||
path: str
|
||||
|
||||
|
||||
@dataclass
|
||||
class ContainerStats:
|
||||
"""Statistics for a container/cgroup."""
|
||||
|
||||
cgroup_id: int
|
||||
cgroup_name: str
|
||||
|
||||
# File I/O
|
||||
read_ops: int = 0
|
||||
read_bytes: int = 0
|
||||
write_ops: int = 0
|
||||
write_bytes: int = 0
|
||||
|
||||
# Network I/O
|
||||
rx_packets: int = 0
|
||||
rx_bytes: int = 0
|
||||
tx_packets: int = 0
|
||||
tx_bytes: int = 0
|
||||
|
||||
# Syscalls
|
||||
syscall_count: int = 0
|
||||
|
||||
# Timestamp
|
||||
timestamp: float = 0.0
|
||||
|
||||
|
||||
class ContainerDataCollector:
|
||||
"""Collects and manages container monitoring data from BPF."""
|
||||
|
||||
def __init__(
|
||||
self, read_map, write_map, net_stats_map, syscall_map, history_size: int = 100
|
||||
):
|
||||
self.read_map = read_map
|
||||
self.write_map = write_map
|
||||
self.net_stats_map = net_stats_map
|
||||
self.syscall_map = syscall_map
|
||||
|
||||
# Caching
|
||||
self._cgroup_cache: Dict[int, CgroupInfo] = {}
|
||||
self._cgroup_cache_time = 0
|
||||
self._cache_ttl = 5.0
|
||||
0 # Refresh cache every 5 seconds
|
||||
|
||||
# Historical data for graphing
|
||||
self._history_size = history_size
|
||||
self._history: Dict[int, deque] = defaultdict(
|
||||
lambda: deque(maxlen=history_size)
|
||||
)
|
||||
|
||||
def get_all_cgroups(self) -> List[CgroupInfo]:
|
||||
"""Get all cgroups with caching."""
|
||||
current_time = time.time()
|
||||
|
||||
# Use cached data if still valid
|
||||
if current_time - self._cgroup_cache_time < self._cache_ttl:
|
||||
return list(self._cgroup_cache.values())
|
||||
|
||||
# Refresh cache
|
||||
self._refresh_cgroup_cache()
|
||||
return list(self._cgroup_cache.values())
|
||||
|
||||
def _refresh_cgroup_cache(self):
|
||||
"""Refresh the cgroup cache from /proc."""
|
||||
cgroup_map: Dict[int, Set[str]] = defaultdict(set)
|
||||
|
||||
# Scan /proc to find all cgroups
|
||||
for proc_dir in Path("/proc").glob("[0-9]*"):
|
||||
try:
|
||||
cgroup_file = proc_dir / "cgroup"
|
||||
if not cgroup_file.exists():
|
||||
continue
|
||||
|
||||
with open(cgroup_file) as f:
|
||||
for line in f:
|
||||
parts = line.strip().split(":")
|
||||
if len(parts) >= 3:
|
||||
cgroup_path = parts[2]
|
||||
cgroup_mount = f"/sys/fs/cgroup{cgroup_path}"
|
||||
|
||||
if os.path.exists(cgroup_mount):
|
||||
stat_info = os.stat(cgroup_mount)
|
||||
cgroup_id = stat_info.st_ino
|
||||
cgroup_map[cgroup_id].add(cgroup_path)
|
||||
|
||||
except (PermissionError, FileNotFoundError, OSError):
|
||||
continue
|
||||
|
||||
# Update cache with best names
|
||||
new_cache = {}
|
||||
for cgroup_id, paths in cgroup_map.items():
|
||||
# Pick the most descriptive path
|
||||
best_path = self._get_best_cgroup_path(paths)
|
||||
name = self._get_cgroup_name(best_path)
|
||||
|
||||
new_cache[cgroup_id] = CgroupInfo(id=cgroup_id, name=name, path=best_path)
|
||||
|
||||
self._cgroup_cache = new_cache
|
||||
self._cgroup_cache_time = time.time()
|
||||
|
||||
def _get_best_cgroup_path(self, paths: Set[str]) -> str:
|
||||
"""Select the most descriptive cgroup path."""
|
||||
path_list = list(paths)
|
||||
|
||||
# Prefer paths with more components (more specific)
|
||||
# Prefer paths containing docker, podman, etc.
|
||||
for keyword in ["docker", "podman", "kubernetes", "k8s", "systemd"]:
|
||||
for path in path_list:
|
||||
if keyword in path.lower():
|
||||
return path
|
||||
|
||||
# Return longest path (most specific)
|
||||
return max(path_list, key=lambda p: (len(p.split("/")), len(p)))
|
||||
|
||||
def _get_cgroup_name(self, path: str) -> str:
|
||||
"""Extract a friendly name from cgroup path."""
|
||||
if not path or path == "/":
|
||||
return "root"
|
||||
|
||||
# Remove leading/trailing slashes
|
||||
path = path.strip("/")
|
||||
|
||||
# Try to extract container ID or service name
|
||||
parts = path.split("/")
|
||||
|
||||
# For Docker: /docker/<container_id>
|
||||
if "docker" in path.lower():
|
||||
for i, part in enumerate(parts):
|
||||
if part.lower() == "docker" and i + 1 < len(parts):
|
||||
container_id = parts[i + 1][:12] # Short ID
|
||||
return f"docker:{container_id}"
|
||||
|
||||
# For systemd services
|
||||
if "system.slice" in path:
|
||||
for part in parts:
|
||||
if part.endswith(".service"):
|
||||
return part.replace(".service", "")
|
||||
|
||||
# For user slices
|
||||
if "user.slice" in path:
|
||||
return f"user:{parts[-1]}" if parts else "user"
|
||||
|
||||
# Default: use last component
|
||||
return parts[-1] if parts else path
|
||||
|
||||
def get_stats_for_cgroup(self, cgroup_id: int) -> ContainerStats:
|
||||
"""Get current statistics for a specific cgroup."""
|
||||
cgroup_info = self._cgroup_cache.get(cgroup_id)
|
||||
cgroup_name = cgroup_info.name if cgroup_info else f"cgroup-{cgroup_id}"
|
||||
|
||||
stats = ContainerStats(
|
||||
cgroup_id=cgroup_id, cgroup_name=cgroup_name, timestamp=time.time()
|
||||
)
|
||||
|
||||
# Get file I/O stats
|
||||
read_stat = self.read_map.lookup(cgroup_id)
|
||||
if read_stat:
|
||||
stats.read_ops = int(read_stat.ops)
|
||||
stats.read_bytes = int(read_stat.bytes)
|
||||
|
||||
write_stat = self.write_map.lookup(cgroup_id)
|
||||
if write_stat:
|
||||
stats.write_ops = int(write_stat.ops)
|
||||
stats.write_bytes = int(write_stat.bytes)
|
||||
|
||||
# Get network stats
|
||||
net_stat = self.net_stats_map.lookup(cgroup_id)
|
||||
if net_stat:
|
||||
stats.rx_packets = int(net_stat.rx_packets)
|
||||
stats.rx_bytes = int(net_stat.rx_bytes)
|
||||
stats.tx_packets = int(net_stat.tx_packets)
|
||||
stats.tx_bytes = int(net_stat.tx_bytes)
|
||||
|
||||
# Get syscall count
|
||||
syscall_cnt = self.syscall_map.lookup(cgroup_id)
|
||||
if syscall_cnt is not None:
|
||||
stats.syscall_count = int(syscall_cnt)
|
||||
|
||||
# Add to history
|
||||
self._history[cgroup_id].append(stats)
|
||||
|
||||
return stats
|
||||
|
||||
def get_history(self, cgroup_id: int) -> List[ContainerStats]:
|
||||
"""Get historical statistics for graphing."""
|
||||
return list(self._history[cgroup_id])
|
||||
|
||||
def get_cgroup_info(self, cgroup_id: int) -> Optional[CgroupInfo]:
|
||||
"""Get cached cgroup information."""
|
||||
return self._cgroup_cache.get(cgroup_id)
|
||||
@ -1,92 +0,0 @@
|
||||
import logging
|
||||
|
||||
from pythonbpf import bpf, map, section, bpfglobal, struct, compile
|
||||
from pythonbpf.maps import HashMap
|
||||
from pythonbpf.helper import get_current_cgroup_id
|
||||
from ctypes import c_int32, c_uint64
|
||||
from vmlinux import struct_pt_regs
|
||||
|
||||
|
||||
@bpf
|
||||
@struct
|
||||
class read_stats:
|
||||
bytes: c_uint64
|
||||
ops: c_uint64
|
||||
|
||||
|
||||
@bpf
|
||||
@struct
|
||||
class write_stats:
|
||||
bytes: c_uint64
|
||||
ops: c_uint64
|
||||
|
||||
|
||||
@bpf
|
||||
@map
|
||||
def read_map() -> HashMap:
|
||||
return HashMap(key=c_uint64, value=read_stats, max_entries=1024)
|
||||
|
||||
|
||||
@bpf
|
||||
@map
|
||||
def write_map() -> HashMap:
|
||||
return HashMap(key=c_uint64, value=write_stats, max_entries=1024)
|
||||
|
||||
|
||||
#
|
||||
# READ PROBE
|
||||
#
|
||||
@bpf
|
||||
@section("kprobe/vfs_read")
|
||||
def trace_read(ctx: struct_pt_regs) -> c_int32:
|
||||
cg = get_current_cgroup_id()
|
||||
count = c_uint64(ctx.dx)
|
||||
ptr = read_map.lookup(cg)
|
||||
|
||||
if ptr:
|
||||
s = read_stats()
|
||||
s.bytes = ptr.bytes + count
|
||||
s.ops = ptr.ops + 1
|
||||
read_map.update(cg, ptr)
|
||||
else:
|
||||
print("read init")
|
||||
s = read_stats()
|
||||
s.bytes = count
|
||||
s.ops = c_uint64(1)
|
||||
read_map.update(cg, s)
|
||||
|
||||
return c_int32(0)
|
||||
|
||||
|
||||
#
|
||||
# WRITE PROBE
|
||||
#
|
||||
@bpf
|
||||
@section("kprobe/vfs_write")
|
||||
def trace_write(ctx1: struct_pt_regs) -> c_int32:
|
||||
cg = get_current_cgroup_id()
|
||||
count = c_uint64(ctx1.dx)
|
||||
ptr = write_map.lookup(cg)
|
||||
|
||||
if ptr:
|
||||
s = write_stats()
|
||||
s.bytes = ptr.bytes + count
|
||||
s.ops = ptr.ops + 1
|
||||
write_map.update(cg, s)
|
||||
else:
|
||||
print("write init")
|
||||
s = write_stats()
|
||||
s.bytes = count
|
||||
s.ops = c_uint64(1)
|
||||
write_map.update(cg, s)
|
||||
|
||||
return c_int32(0)
|
||||
|
||||
|
||||
@bpf
|
||||
@bpfglobal
|
||||
def LICENSE() -> str:
|
||||
return "GPL"
|
||||
|
||||
|
||||
compile(loglevel=logging.INFO)
|
||||
752
BCC-Examples/container-monitor/tui.py
Normal file
752
BCC-Examples/container-monitor/tui.py
Normal file
@ -0,0 +1,752 @@
|
||||
"""Terminal User Interface for container monitoring."""
|
||||
|
||||
import time
|
||||
import curses
|
||||
import threading
|
||||
from typing import Optional, List
|
||||
from data_collection import ContainerDataCollector
|
||||
from web_dashboard import WebDashboard
|
||||
|
||||
|
||||
def _safe_addstr(stdscr, y: int, x: int, text: str, *args):
|
||||
"""Safely add string to screen with bounds checking."""
|
||||
try:
|
||||
height, width = stdscr.getmaxyx()
|
||||
if 0 <= y < height and 0 <= x < width:
|
||||
# Truncate text to fit
|
||||
max_len = width - x - 1
|
||||
if max_len > 0:
|
||||
stdscr.addstr(y, x, text[:max_len], *args)
|
||||
except curses.error:
|
||||
pass
|
||||
|
||||
|
||||
def _draw_fancy_header(stdscr, title: str, subtitle: str):
|
||||
"""Draw a fancy header with title and subtitle."""
|
||||
height, width = stdscr.getmaxyx()
|
||||
|
||||
# Top border
|
||||
_safe_addstr(stdscr, 0, 0, "═" * width, curses.color_pair(6) | curses.A_BOLD)
|
||||
|
||||
# Title
|
||||
_safe_addstr(
|
||||
stdscr,
|
||||
0,
|
||||
max(0, (width - len(title)) // 2),
|
||||
f" {title} ",
|
||||
curses.color_pair(6) | curses.A_BOLD,
|
||||
)
|
||||
|
||||
# Subtitle
|
||||
_safe_addstr(
|
||||
stdscr,
|
||||
1,
|
||||
max(0, (width - len(subtitle)) // 2),
|
||||
subtitle,
|
||||
curses.color_pair(1),
|
||||
)
|
||||
|
||||
# Bottom border
|
||||
_safe_addstr(stdscr, 2, 0, "═" * width, curses.color_pair(6))
|
||||
|
||||
|
||||
def _draw_metric_box(
|
||||
stdscr,
|
||||
y: int,
|
||||
x: int,
|
||||
width: int,
|
||||
label: str,
|
||||
value: str,
|
||||
detail: str,
|
||||
color_pair: int,
|
||||
):
|
||||
"""Draw a fancy box for displaying a metric."""
|
||||
height, _ = stdscr.getmaxyx()
|
||||
|
||||
if y + 4 >= height:
|
||||
return
|
||||
|
||||
# Top border
|
||||
_safe_addstr(
|
||||
stdscr, y, x, "┌" + "─" * (width - 2) + "┐", color_pair | curses.A_BOLD
|
||||
)
|
||||
|
||||
# Label
|
||||
_safe_addstr(stdscr, y + 1, x, "│", color_pair | curses.A_BOLD)
|
||||
_safe_addstr(stdscr, y + 1, x + 2, label, color_pair | curses.A_BOLD)
|
||||
_safe_addstr(stdscr, y + 1, x + width - 1, "│", color_pair | curses.A_BOLD)
|
||||
|
||||
# Value
|
||||
_safe_addstr(stdscr, y + 2, x, "│", color_pair | curses.A_BOLD)
|
||||
_safe_addstr(stdscr, y + 2, x + 4, value, curses.color_pair(2) | curses.A_BOLD)
|
||||
_safe_addstr(
|
||||
stdscr,
|
||||
y + 2,
|
||||
min(x + width - len(detail) - 3, x + width - 2),
|
||||
detail,
|
||||
color_pair | curses.A_BOLD,
|
||||
)
|
||||
_safe_addstr(stdscr, y + 2, x + width - 1, "│", color_pair | curses.A_BOLD)
|
||||
|
||||
# Bottom border
|
||||
_safe_addstr(
|
||||
stdscr, y + 3, x, "└" + "─" * (width - 2) + "┘", color_pair | curses.A_BOLD
|
||||
)
|
||||
|
||||
|
||||
def _draw_section_header(stdscr, y: int, title: str, color_pair: int):
|
||||
"""Draw a section header."""
|
||||
height, width = stdscr.getmaxyx()
|
||||
|
||||
if y >= height:
|
||||
return
|
||||
|
||||
_safe_addstr(stdscr, y, 2, title, curses.color_pair(color_pair) | curses.A_BOLD)
|
||||
_safe_addstr(
|
||||
stdscr,
|
||||
y,
|
||||
len(title) + 3,
|
||||
"─" * (width - len(title) - 5),
|
||||
curses.color_pair(color_pair) | curses.A_BOLD,
|
||||
)
|
||||
|
||||
|
||||
def _calculate_rates(history: List) -> dict:
|
||||
"""Calculate per-second rates from history."""
|
||||
if len(history) < 2:
|
||||
return {
|
||||
"syscalls_per_sec": 0.0,
|
||||
"rx_bytes_per_sec": 0.0,
|
||||
"tx_bytes_per_sec": 0.0,
|
||||
"rx_pkts_per_sec": 0.0,
|
||||
"tx_pkts_per_sec": 0.0,
|
||||
"read_bytes_per_sec": 0.0,
|
||||
"write_bytes_per_sec": 0.0,
|
||||
"read_ops_per_sec": 0.0,
|
||||
"write_ops_per_sec": 0.0,
|
||||
}
|
||||
|
||||
# Calculate delta between last two samples
|
||||
recent = history[-1]
|
||||
previous = history[-2]
|
||||
time_delta = recent.timestamp - previous.timestamp
|
||||
|
||||
if time_delta <= 0:
|
||||
time_delta = 1.0
|
||||
|
||||
return {
|
||||
"syscalls_per_sec": (recent.syscall_count - previous.syscall_count)
|
||||
/ time_delta,
|
||||
"rx_bytes_per_sec": (recent.rx_bytes - previous.rx_bytes) / time_delta,
|
||||
"tx_bytes_per_sec": (recent.tx_bytes - previous.tx_bytes) / time_delta,
|
||||
"rx_pkts_per_sec": (recent.rx_packets - previous.rx_packets) / time_delta,
|
||||
"tx_pkts_per_sec": (recent.tx_packets - previous.tx_packets) / time_delta,
|
||||
"read_bytes_per_sec": (recent.read_bytes - previous.read_bytes) / time_delta,
|
||||
"write_bytes_per_sec": (recent.write_bytes - previous.write_bytes) / time_delta,
|
||||
"read_ops_per_sec": (recent.read_ops - previous.read_ops) / time_delta,
|
||||
"write_ops_per_sec": (recent.write_ops - previous.write_ops) / time_delta,
|
||||
}
|
||||
|
||||
|
||||
def _format_bytes(bytes_val: float) -> str:
|
||||
"""Format bytes into human-readable string."""
|
||||
if bytes_val < 0:
|
||||
bytes_val = 0
|
||||
for unit in ["B", "KB", "MB", "GB", "TB"]:
|
||||
if bytes_val < 1024.0:
|
||||
return f"{bytes_val:.1f}{unit}"
|
||||
bytes_val /= 1024.0
|
||||
return f"{bytes_val:.1f}PB"
|
||||
|
||||
|
||||
def _draw_bar_graph_enhanced(
|
||||
stdscr,
|
||||
y: int,
|
||||
x: int,
|
||||
width: int,
|
||||
height: int,
|
||||
data: List[float],
|
||||
color_pair: int,
|
||||
):
|
||||
"""Draw an enhanced bar graph with axis and scale."""
|
||||
screen_height, screen_width = stdscr.getmaxyx()
|
||||
|
||||
if not data or width < 2 or y + height >= screen_height:
|
||||
return
|
||||
|
||||
# Calculate statistics
|
||||
max_val = max(data) if max(data) > 0 else 1
|
||||
min_val = min(data)
|
||||
avg_val = sum(data) / len(data)
|
||||
|
||||
# Take last 'width - 12' data points (leave room for Y-axis)
|
||||
graph_width = max(1, width - 12)
|
||||
recent_data = data[-graph_width:] if len(data) > graph_width else data
|
||||
|
||||
# Draw Y-axis labels (with bounds checking)
|
||||
if y < screen_height:
|
||||
_safe_addstr(
|
||||
stdscr, y, x, f"│{_format_bytes(max_val):>9}", curses.color_pair(7)
|
||||
)
|
||||
if y + height // 2 < screen_height:
|
||||
_safe_addstr(
|
||||
stdscr,
|
||||
y + height // 2,
|
||||
x,
|
||||
f"│{_format_bytes(avg_val):>9}",
|
||||
curses.color_pair(7),
|
||||
)
|
||||
if y + height - 1 < screen_height:
|
||||
_safe_addstr(
|
||||
stdscr,
|
||||
y + height - 1,
|
||||
x,
|
||||
f"│{_format_bytes(min_val):>9}",
|
||||
curses.color_pair(7),
|
||||
)
|
||||
|
||||
# Draw bars
|
||||
for row in range(height):
|
||||
if y + row >= screen_height:
|
||||
break
|
||||
|
||||
threshold = (height - row) / height
|
||||
bar_line = ""
|
||||
|
||||
for val in recent_data:
|
||||
normalized = val / max_val if max_val > 0 else 0
|
||||
if normalized >= threshold:
|
||||
bar_line += "█"
|
||||
elif normalized >= threshold - 0.15:
|
||||
bar_line += "▓"
|
||||
elif normalized >= threshold - 0.35:
|
||||
bar_line += "▒"
|
||||
elif normalized >= threshold - 0.5:
|
||||
bar_line += "░"
|
||||
else:
|
||||
bar_line += " "
|
||||
|
||||
_safe_addstr(stdscr, y + row, x + 11, bar_line, color_pair)
|
||||
|
||||
# Draw X-axis
|
||||
if y + height < screen_height:
|
||||
_safe_addstr(
|
||||
stdscr,
|
||||
y + height,
|
||||
x + 10,
|
||||
"├" + "─" * len(recent_data),
|
||||
curses.color_pair(7),
|
||||
)
|
||||
_safe_addstr(
|
||||
stdscr,
|
||||
y + height,
|
||||
x + 10 + len(recent_data),
|
||||
"→ time",
|
||||
curses.color_pair(7),
|
||||
)
|
||||
|
||||
|
||||
def _draw_labeled_graph(
|
||||
stdscr,
|
||||
y: int,
|
||||
x: int,
|
||||
width: int,
|
||||
height: int,
|
||||
label: str,
|
||||
rate: str,
|
||||
detail: str,
|
||||
data: List[float],
|
||||
color_pair: int,
|
||||
description: str,
|
||||
):
|
||||
"""Draw a graph with labels and legend."""
|
||||
screen_height, screen_width = stdscr.getmaxyx()
|
||||
|
||||
if y >= screen_height or y + height + 2 >= screen_height:
|
||||
return
|
||||
|
||||
# Header with metrics
|
||||
_safe_addstr(stdscr, y, x, label, curses.color_pair(1) | curses.A_BOLD)
|
||||
_safe_addstr(stdscr, y, x + len(label) + 2, rate, curses.color_pair(2))
|
||||
_safe_addstr(
|
||||
stdscr, y, x + len(label) + len(rate) + 4, detail, curses.color_pair(7)
|
||||
)
|
||||
|
||||
# Draw the graph
|
||||
if len(data) > 1:
|
||||
_draw_bar_graph_enhanced(stdscr, y + 1, x, width, height, data, color_pair)
|
||||
else:
|
||||
_safe_addstr(stdscr, y + 2, x + 2, "Collecting data...", curses.color_pair(7))
|
||||
|
||||
# Graph legend
|
||||
if y + height + 1 < screen_height:
|
||||
_safe_addstr(
|
||||
stdscr, y + height + 1, x, f"└─ {description}", curses.color_pair(7)
|
||||
)
|
||||
|
||||
|
||||
class ContainerMonitorTUI:
|
||||
"""TUI for container monitoring with cgroup selection and live graphs."""
|
||||
|
||||
def __init__(self, collector: ContainerDataCollector):
|
||||
self.collector = collector
|
||||
self.selected_cgroup: Optional[int] = None
|
||||
self.current_screen = "selection" # "selection" or "monitoring"
|
||||
self.selected_index = 0
|
||||
self.scroll_offset = 0
|
||||
self.web_dashboard = None
|
||||
self.web_thread = None
|
||||
|
||||
def run(self):
|
||||
"""Run the TUI application."""
|
||||
curses.wrapper(self._main_loop)
|
||||
|
||||
def _main_loop(self, stdscr):
|
||||
"""Main curses loop."""
|
||||
# Configure curses
|
||||
curses.curs_set(0) # Hide cursor
|
||||
stdscr.nodelay(True) # Non-blocking input
|
||||
stdscr.timeout(100) # Refresh every 100ms
|
||||
|
||||
# Initialize colors
|
||||
curses.start_color()
|
||||
curses.init_pair(1, curses.COLOR_CYAN, curses.COLOR_BLACK)
|
||||
curses.init_pair(2, curses.COLOR_GREEN, curses.COLOR_BLACK)
|
||||
curses.init_pair(3, curses.COLOR_YELLOW, curses.COLOR_BLACK)
|
||||
curses.init_pair(4, curses.COLOR_RED, curses.COLOR_BLACK)
|
||||
curses.init_pair(5, curses.COLOR_MAGENTA, curses.COLOR_BLACK)
|
||||
curses.init_pair(6, curses.COLOR_WHITE, curses.COLOR_BLUE)
|
||||
curses.init_pair(7, curses.COLOR_BLUE, curses.COLOR_BLACK)
|
||||
curses.init_pair(8, curses.COLOR_WHITE, curses.COLOR_CYAN)
|
||||
|
||||
while True:
|
||||
stdscr.clear()
|
||||
|
||||
try:
|
||||
height, width = stdscr.getmaxyx()
|
||||
|
||||
# Check minimum terminal size
|
||||
if height < 25 or width < 80:
|
||||
msg = "Terminal too small! Minimum: 80x25"
|
||||
stdscr.attron(curses.color_pair(4) | curses.A_BOLD)
|
||||
stdscr.addstr(
|
||||
height // 2, max(0, (width - len(msg)) // 2), msg[: width - 1]
|
||||
)
|
||||
stdscr.attroff(curses.color_pair(4) | curses.A_BOLD)
|
||||
stdscr.refresh()
|
||||
key = stdscr.getch()
|
||||
if key == ord("q") or key == ord("Q"):
|
||||
break
|
||||
continue
|
||||
|
||||
if self.current_screen == "selection":
|
||||
self._draw_selection_screen(stdscr)
|
||||
elif self.current_screen == "monitoring":
|
||||
self._draw_monitoring_screen(stdscr)
|
||||
|
||||
stdscr.refresh()
|
||||
|
||||
# Handle input
|
||||
key = stdscr.getch()
|
||||
if key != -1:
|
||||
if not self._handle_input(key, stdscr):
|
||||
break # Exit requested
|
||||
|
||||
except KeyboardInterrupt:
|
||||
break
|
||||
except curses.error:
|
||||
# Curses error - likely terminal too small, just continue
|
||||
pass
|
||||
except Exception as e:
|
||||
# Show error briefly
|
||||
height, width = stdscr.getmaxyx()
|
||||
error_msg = f"Error: {str(e)[: width - 10]}"
|
||||
stdscr.addstr(0, 0, error_msg[: width - 1])
|
||||
stdscr.refresh()
|
||||
time.sleep(1)
|
||||
|
||||
def _draw_selection_screen(self, stdscr):
|
||||
"""Draw the cgroup selection screen."""
|
||||
height, width = stdscr.getmaxyx()
|
||||
|
||||
# Draw fancy header box
|
||||
_draw_fancy_header(stdscr, "🐳 CONTAINER MONITOR", "Select a Cgroup to Monitor")
|
||||
|
||||
# Instructions
|
||||
instructions = (
|
||||
"↑↓: Navigate | ENTER: Select | w: Web Mode | q: Quit | r: Refresh"
|
||||
)
|
||||
_safe_addstr(
|
||||
stdscr,
|
||||
3,
|
||||
max(0, (width - len(instructions)) // 2),
|
||||
instructions,
|
||||
curses.color_pair(3),
|
||||
)
|
||||
|
||||
# Get cgroups
|
||||
cgroups = self.collector.get_all_cgroups()
|
||||
|
||||
if not cgroups:
|
||||
msg = "No cgroups found. Waiting for activity..."
|
||||
_safe_addstr(
|
||||
stdscr,
|
||||
height // 2,
|
||||
max(0, (width - len(msg)) // 2),
|
||||
msg,
|
||||
curses.color_pair(4),
|
||||
)
|
||||
return
|
||||
|
||||
# Sort cgroups by name
|
||||
cgroups.sort(key=lambda c: c.name)
|
||||
|
||||
# Adjust selection bounds
|
||||
if self.selected_index >= len(cgroups):
|
||||
self.selected_index = len(cgroups) - 1
|
||||
if self.selected_index < 0:
|
||||
self.selected_index = 0
|
||||
|
||||
# Calculate visible range
|
||||
list_height = max(1, height - 8)
|
||||
if self.selected_index < self.scroll_offset:
|
||||
self.scroll_offset = self.selected_index
|
||||
elif self.selected_index >= self.scroll_offset + list_height:
|
||||
self.scroll_offset = self.selected_index - list_height + 1
|
||||
|
||||
# Calculate max name length and ID width for alignment
|
||||
max_name_len = min(50, max(len(cg.name) for cg in cgroups))
|
||||
max_id_len = max(len(str(cg.id)) for cg in cgroups)
|
||||
|
||||
# Draw cgroup list with fancy borders
|
||||
start_y = 5
|
||||
_safe_addstr(
|
||||
stdscr, start_y, 2, "╔" + "═" * (width - 6) + "╗", curses.color_pair(1)
|
||||
)
|
||||
|
||||
# Header row
|
||||
header = f" {'CGROUP NAME':<{max_name_len}} │ {'ID':>{max_id_len}} "
|
||||
_safe_addstr(stdscr, start_y + 1, 2, "║", curses.color_pair(1))
|
||||
_safe_addstr(
|
||||
stdscr, start_y + 1, 3, header, curses.color_pair(1) | curses.A_BOLD
|
||||
)
|
||||
_safe_addstr(stdscr, start_y + 1, width - 3, "║", curses.color_pair(1))
|
||||
|
||||
# Separator
|
||||
_safe_addstr(
|
||||
stdscr, start_y + 2, 2, "╟" + "─" * (width - 6) + "╢", curses.color_pair(1)
|
||||
)
|
||||
|
||||
for i in range(list_height):
|
||||
idx = self.scroll_offset + i
|
||||
y = start_y + 3 + i
|
||||
|
||||
if y >= height - 2:
|
||||
break
|
||||
|
||||
_safe_addstr(stdscr, y, 2, "║", curses.color_pair(1))
|
||||
_safe_addstr(stdscr, y, width - 3, "║", curses.color_pair(1))
|
||||
|
||||
if idx >= len(cgroups):
|
||||
continue
|
||||
|
||||
cgroup = cgroups[idx]
|
||||
|
||||
# Truncate name if too long
|
||||
display_name = (
|
||||
cgroup.name
|
||||
if len(cgroup.name) <= max_name_len
|
||||
else cgroup.name[: max_name_len - 3] + "..."
|
||||
)
|
||||
|
||||
if idx == self.selected_index:
|
||||
# Highlight selected with proper alignment
|
||||
line = f" ► {display_name:<{max_name_len}} │ {cgroup.id:>{max_id_len}} "
|
||||
_safe_addstr(stdscr, y, 3, line, curses.color_pair(8) | curses.A_BOLD)
|
||||
else:
|
||||
line = f" {display_name:<{max_name_len}} │ {cgroup.id:>{max_id_len}} "
|
||||
_safe_addstr(stdscr, y, 3, line, curses.color_pair(7))
|
||||
|
||||
# Bottom border
|
||||
bottom_y = min(start_y + 3 + list_height, height - 3)
|
||||
_safe_addstr(
|
||||
stdscr, bottom_y, 2, "╚" + "═" * (width - 6) + "╝", curses.color_pair(1)
|
||||
)
|
||||
|
||||
# Footer
|
||||
footer = f"Total: {len(cgroups)} cgroups"
|
||||
if len(cgroups) > list_height:
|
||||
footer += f" │ Showing {self.scroll_offset + 1}-{min(self.scroll_offset + list_height, len(cgroups))}"
|
||||
_safe_addstr(
|
||||
stdscr,
|
||||
height - 2,
|
||||
max(0, (width - len(footer)) // 2),
|
||||
footer,
|
||||
curses.color_pair(1),
|
||||
)
|
||||
|
||||
def _draw_monitoring_screen(self, stdscr):
|
||||
"""Draw the monitoring screen for selected cgroup."""
|
||||
height, width = stdscr.getmaxyx()
|
||||
|
||||
if self.selected_cgroup is None:
|
||||
return
|
||||
|
||||
# Get current stats
|
||||
stats = self.collector.get_stats_for_cgroup(self.selected_cgroup)
|
||||
history = self.collector.get_history(self.selected_cgroup)
|
||||
|
||||
# Draw fancy header
|
||||
_draw_fancy_header(
|
||||
stdscr, f"📊 {stats.cgroup_name[:40]}", "Live Performance Metrics"
|
||||
)
|
||||
|
||||
# Instructions
|
||||
instructions = "ESC/b: Back to List | w: Web Mode | q: Quit"
|
||||
_safe_addstr(
|
||||
stdscr,
|
||||
3,
|
||||
max(0, (width - len(instructions)) // 2),
|
||||
instructions,
|
||||
curses.color_pair(3),
|
||||
)
|
||||
|
||||
# Calculate metrics for rate display
|
||||
rates = _calculate_rates(history)
|
||||
|
||||
y = 5
|
||||
|
||||
# Syscall count in a fancy box
|
||||
if y + 4 < height:
|
||||
_draw_metric_box(
|
||||
stdscr,
|
||||
y,
|
||||
2,
|
||||
min(width - 4, 80),
|
||||
"⚡ SYSTEM CALLS",
|
||||
f"{stats.syscall_count:,}",
|
||||
f"Rate: {rates['syscalls_per_sec']:.1f}/sec",
|
||||
curses.color_pair(5),
|
||||
)
|
||||
y += 4
|
||||
|
||||
# Network I/O Section
|
||||
if y + 8 < height:
|
||||
_draw_section_header(stdscr, y, "🌐 NETWORK I/O", 1)
|
||||
y += 1
|
||||
|
||||
# RX graph
|
||||
rx_label = f"RX: {_format_bytes(stats.rx_bytes)}"
|
||||
rx_rate = f"{_format_bytes(rates['rx_bytes_per_sec'])}/s"
|
||||
rx_pkts = f"{stats.rx_packets:,} pkts ({rates['rx_pkts_per_sec']:.1f}/s)"
|
||||
|
||||
_draw_labeled_graph(
|
||||
stdscr,
|
||||
y,
|
||||
2,
|
||||
width - 4,
|
||||
4,
|
||||
rx_label,
|
||||
rx_rate,
|
||||
rx_pkts,
|
||||
[s.rx_bytes for s in history],
|
||||
curses.color_pair(2),
|
||||
"Received Traffic (last 100 samples)",
|
||||
)
|
||||
y += 6
|
||||
|
||||
# TX graph
|
||||
if y + 8 < height:
|
||||
tx_label = f"TX: {_format_bytes(stats.tx_bytes)}"
|
||||
tx_rate = f"{_format_bytes(rates['tx_bytes_per_sec'])}/s"
|
||||
tx_pkts = f"{stats.tx_packets:,} pkts ({rates['tx_pkts_per_sec']:.1f}/s)"
|
||||
|
||||
_draw_labeled_graph(
|
||||
stdscr,
|
||||
y,
|
||||
2,
|
||||
width - 4,
|
||||
4,
|
||||
tx_label,
|
||||
tx_rate,
|
||||
tx_pkts,
|
||||
[s.tx_bytes for s in history],
|
||||
curses.color_pair(3),
|
||||
"Transmitted Traffic (last 100 samples)",
|
||||
)
|
||||
y += 6
|
||||
|
||||
# File I/O Section
|
||||
if y + 8 < height:
|
||||
_draw_section_header(stdscr, y, "💾 FILE I/O", 1)
|
||||
y += 1
|
||||
|
||||
# Read graph
|
||||
read_label = f"READ: {_format_bytes(stats.read_bytes)}"
|
||||
read_rate = f"{_format_bytes(rates['read_bytes_per_sec'])}/s"
|
||||
read_ops = f"{stats.read_ops:,} ops ({rates['read_ops_per_sec']:.1f}/s)"
|
||||
|
||||
_draw_labeled_graph(
|
||||
stdscr,
|
||||
y,
|
||||
2,
|
||||
width - 4,
|
||||
4,
|
||||
read_label,
|
||||
read_rate,
|
||||
read_ops,
|
||||
[s.read_bytes for s in history],
|
||||
curses.color_pair(4),
|
||||
"Read Operations (last 100 samples)",
|
||||
)
|
||||
y += 6
|
||||
|
||||
# Write graph
|
||||
if y + 8 < height:
|
||||
write_label = f"WRITE: {_format_bytes(stats.write_bytes)}"
|
||||
write_rate = f"{_format_bytes(rates['write_bytes_per_sec'])}/s"
|
||||
write_ops = f"{stats.write_ops:,} ops ({rates['write_ops_per_sec']:.1f}/s)"
|
||||
|
||||
_draw_labeled_graph(
|
||||
stdscr,
|
||||
y,
|
||||
2,
|
||||
width - 4,
|
||||
4,
|
||||
write_label,
|
||||
write_rate,
|
||||
write_ops,
|
||||
[s.write_bytes for s in history],
|
||||
curses.color_pair(5),
|
||||
"Write Operations (last 100 samples)",
|
||||
)
|
||||
|
||||
def _launch_web_mode(self, stdscr):
|
||||
"""Launch web dashboard mode."""
|
||||
height, width = stdscr.getmaxyx()
|
||||
|
||||
# Show transition message
|
||||
stdscr.clear()
|
||||
|
||||
msg1 = "🌐 LAUNCHING WEB DASHBOARD"
|
||||
_safe_addstr(
|
||||
stdscr,
|
||||
height // 2 - 2,
|
||||
max(0, (width - len(msg1)) // 2),
|
||||
msg1,
|
||||
curses.color_pair(6) | curses.A_BOLD,
|
||||
)
|
||||
|
||||
msg2 = "Server starting at http://localhost:8050"
|
||||
_safe_addstr(
|
||||
stdscr,
|
||||
height // 2,
|
||||
max(0, (width - len(msg2)) // 2),
|
||||
msg2,
|
||||
curses.color_pair(2),
|
||||
)
|
||||
|
||||
msg3 = "Press 'q' to stop web server and return to TUI"
|
||||
_safe_addstr(
|
||||
stdscr,
|
||||
height // 2 + 2,
|
||||
max(0, (width - len(msg3)) // 2),
|
||||
msg3,
|
||||
curses.color_pair(3),
|
||||
)
|
||||
|
||||
stdscr.refresh()
|
||||
time.sleep(1)
|
||||
|
||||
try:
|
||||
# Create and start web dashboard
|
||||
self.web_dashboard = WebDashboard(
|
||||
self.collector, selected_cgroup=self.selected_cgroup
|
||||
)
|
||||
|
||||
# Start in background thread
|
||||
self.web_thread = threading.Thread(
|
||||
target=self.web_dashboard.run, daemon=True
|
||||
)
|
||||
self.web_thread.start()
|
||||
|
||||
time.sleep(2) # Give server time to start
|
||||
|
||||
# Wait for user to press 'q' to return
|
||||
msg4 = "Web dashboard running at http://localhost:8050"
|
||||
msg5 = "Press 'q' to return to TUI"
|
||||
_safe_addstr(
|
||||
stdscr,
|
||||
height // 2 + 4,
|
||||
max(0, (width - len(msg4)) // 2),
|
||||
msg4,
|
||||
curses.color_pair(1) | curses.A_BOLD,
|
||||
)
|
||||
_safe_addstr(
|
||||
stdscr,
|
||||
height // 2 + 5,
|
||||
max(0, (width - len(msg5)) // 2),
|
||||
msg5,
|
||||
curses.color_pair(3) | curses.A_BOLD,
|
||||
)
|
||||
stdscr.refresh()
|
||||
|
||||
stdscr.nodelay(False) # Blocking mode
|
||||
while True:
|
||||
key = stdscr.getch()
|
||||
if key == ord("q") or key == ord("Q"):
|
||||
break
|
||||
|
||||
# Stop web server
|
||||
if self.web_dashboard:
|
||||
self.web_dashboard.stop()
|
||||
|
||||
except Exception as e:
|
||||
error_msg = f"Error starting web dashboard: {str(e)}"
|
||||
_safe_addstr(
|
||||
stdscr,
|
||||
height // 2 + 4,
|
||||
max(0, (width - len(error_msg)) // 2),
|
||||
error_msg,
|
||||
curses.color_pair(4),
|
||||
)
|
||||
stdscr.refresh()
|
||||
time.sleep(3)
|
||||
|
||||
# Restore TUI settings
|
||||
stdscr.nodelay(True)
|
||||
stdscr.timeout(100)
|
||||
|
||||
def _handle_input(self, key: int, stdscr) -> bool:
|
||||
"""Handle keyboard input. Returns False to exit."""
|
||||
if key == ord("q") or key == ord("Q"):
|
||||
return False # Exit
|
||||
|
||||
if key == ord("w") or key == ord("W"):
|
||||
# Launch web mode
|
||||
self._launch_web_mode(stdscr)
|
||||
return True
|
||||
|
||||
if self.current_screen == "selection":
|
||||
if key == curses.KEY_UP:
|
||||
self.selected_index = max(0, self.selected_index - 1)
|
||||
elif key == curses.KEY_DOWN:
|
||||
cgroups = self.collector.get_all_cgroups()
|
||||
self.selected_index = min(len(cgroups) - 1, self.selected_index + 1)
|
||||
elif key == ord("\n") or key == curses.KEY_ENTER or key == 10:
|
||||
# Select cgroup
|
||||
cgroups = self.collector.get_all_cgroups()
|
||||
if cgroups and 0 <= self.selected_index < len(cgroups):
|
||||
cgroups.sort(key=lambda c: c.name)
|
||||
self.selected_cgroup = cgroups[self.selected_index].id
|
||||
self.current_screen = "monitoring"
|
||||
elif key == ord("r") or key == ord("R"):
|
||||
# Force refresh cache
|
||||
self.collector._cgroup_cache_time = 0
|
||||
|
||||
elif self.current_screen == "monitoring":
|
||||
if key == 27 or key == ord("b") or key == ord("B"): # ESC or 'b'
|
||||
self.current_screen = "selection"
|
||||
self.selected_cgroup = None
|
||||
|
||||
return True # Continue running
|
||||
826
BCC-Examples/container-monitor/web_dashboard.py
Normal file
826
BCC-Examples/container-monitor/web_dashboard.py
Normal file
@ -0,0 +1,826 @@
|
||||
"""Beautiful web dashboard for container monitoring using Plotly Dash."""
|
||||
|
||||
import dash
|
||||
from dash import dcc, html
|
||||
from dash.dependencies import Input, Output
|
||||
import plotly.graph_objects as go
|
||||
from plotly.subplots import make_subplots
|
||||
from typing import Optional
|
||||
from data_collection import ContainerDataCollector
|
||||
|
||||
|
||||
class WebDashboard:
|
||||
"""Beautiful web dashboard for container monitoring."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
collector: ContainerDataCollector,
|
||||
selected_cgroup: Optional[int] = None,
|
||||
host: str = "0.0.0.0",
|
||||
port: int = 8050,
|
||||
):
|
||||
self.collector = collector
|
||||
self.selected_cgroup = selected_cgroup
|
||||
self.host = host
|
||||
self.port = port
|
||||
|
||||
# Suppress Dash dev tools and debug output
|
||||
self.app = dash.Dash(
|
||||
__name__,
|
||||
title="pythonBPF Container Monitor",
|
||||
suppress_callback_exceptions=True,
|
||||
)
|
||||
|
||||
self._setup_layout()
|
||||
self._setup_callbacks()
|
||||
self._running = False
|
||||
|
||||
def _setup_layout(self):
|
||||
"""Create the dashboard layout."""
|
||||
self.app.layout = html.Div(
|
||||
[
|
||||
# Futuristic Header with pythonBPF branding
|
||||
html.Div(
|
||||
[
|
||||
html.Div(
|
||||
[
|
||||
html.Div(
|
||||
[
|
||||
html.Span(
|
||||
"python",
|
||||
style={
|
||||
"fontSize": "52px",
|
||||
"fontWeight": "300",
|
||||
"color": "#00ff88",
|
||||
"fontFamily": "'Courier New', monospace",
|
||||
"textShadow": "0 0 20px rgba(0,255,136,0.5)",
|
||||
},
|
||||
),
|
||||
html.Span(
|
||||
"BPF",
|
||||
style={
|
||||
"fontSize": "52px",
|
||||
"fontWeight": "900",
|
||||
"color": "#00d4ff",
|
||||
"fontFamily": "'Courier New', monospace",
|
||||
"textShadow": "0 0 20px rgba(0,212,255,0.5)",
|
||||
},
|
||||
),
|
||||
],
|
||||
style={"marginBottom": "5px"},
|
||||
),
|
||||
html.Div(
|
||||
"CONTAINER PERFORMANCE MONITOR",
|
||||
style={
|
||||
"fontSize": "16px",
|
||||
"letterSpacing": "8px",
|
||||
"color": "#8899ff",
|
||||
"fontWeight": "300",
|
||||
"fontFamily": "'Courier New', monospace",
|
||||
},
|
||||
),
|
||||
],
|
||||
style={
|
||||
"textAlign": "center",
|
||||
},
|
||||
),
|
||||
html.Div(
|
||||
id="cgroup-name",
|
||||
style={
|
||||
"textAlign": "center",
|
||||
"color": "#00ff88",
|
||||
"fontSize": "20px",
|
||||
"marginTop": "15px",
|
||||
"fontFamily": "'Courier New', monospace",
|
||||
"fontWeight": "bold",
|
||||
"textShadow": "0 0 10px rgba(0,255,136,0.3)",
|
||||
},
|
||||
),
|
||||
],
|
||||
style={
|
||||
"background": "linear-gradient(135deg, #0a0e27 0%, #1a1f3a 50%, #0a0e27 100%)",
|
||||
"padding": "40px 20px",
|
||||
"borderRadius": "0",
|
||||
"marginBottom": "0",
|
||||
"boxShadow": "0 10px 40px rgba(0,212,255,0.2)",
|
||||
"border": "1px solid rgba(0,212,255,0.3)",
|
||||
"borderTop": "3px solid #00d4ff",
|
||||
"borderBottom": "3px solid #00ff88",
|
||||
"position": "relative",
|
||||
"overflow": "hidden",
|
||||
},
|
||||
),
|
||||
# Cgroup selector (if no cgroup selected)
|
||||
html.Div(
|
||||
[
|
||||
html.Label(
|
||||
"SELECT CGROUP:",
|
||||
style={
|
||||
"fontSize": "14px",
|
||||
"fontWeight": "bold",
|
||||
"color": "#00d4ff",
|
||||
"marginRight": "15px",
|
||||
"fontFamily": "'Courier New', monospace",
|
||||
"letterSpacing": "2px",
|
||||
},
|
||||
),
|
||||
dcc.Dropdown(
|
||||
id="cgroup-selector",
|
||||
style={
|
||||
"width": "600px",
|
||||
"display": "inline-block",
|
||||
"background": "#1a1f3a",
|
||||
"border": "1px solid #00d4ff",
|
||||
},
|
||||
),
|
||||
],
|
||||
id="selector-container",
|
||||
style={
|
||||
"textAlign": "center",
|
||||
"marginTop": "30px",
|
||||
"marginBottom": "30px",
|
||||
"padding": "20px",
|
||||
"background": "rgba(26,31,58,0.5)",
|
||||
"borderRadius": "10px",
|
||||
"border": "1px solid rgba(0,212,255,0.2)",
|
||||
"display": "block" if self.selected_cgroup is None else "none",
|
||||
},
|
||||
),
|
||||
# Stats cards row
|
||||
html.Div(
|
||||
[
|
||||
self._create_stat_card(
|
||||
"syscall-card", "⚡ SYSCALLS", "#00ff88"
|
||||
),
|
||||
self._create_stat_card("network-card", "🌐 NETWORK", "#00d4ff"),
|
||||
self._create_stat_card("file-card", "💾 FILE I/O", "#ff0088"),
|
||||
],
|
||||
style={
|
||||
"display": "flex",
|
||||
"justifyContent": "space-around",
|
||||
"marginBottom": "30px",
|
||||
"marginTop": "30px",
|
||||
"gap": "25px",
|
||||
"flexWrap": "wrap",
|
||||
"padding": "0 20px",
|
||||
},
|
||||
),
|
||||
# Graphs container
|
||||
html.Div(
|
||||
[
|
||||
# Network graphs
|
||||
html.Div(
|
||||
[
|
||||
html.Div(
|
||||
[
|
||||
html.Span("🌐 ", style={"fontSize": "24px"}),
|
||||
html.Span(
|
||||
"NETWORK",
|
||||
style={
|
||||
"fontFamily": "'Courier New', monospace",
|
||||
"letterSpacing": "3px",
|
||||
"fontWeight": "bold",
|
||||
},
|
||||
),
|
||||
html.Span(
|
||||
" I/O",
|
||||
style={
|
||||
"fontFamily": "'Courier New', monospace",
|
||||
"letterSpacing": "3px",
|
||||
"color": "#00d4ff",
|
||||
},
|
||||
),
|
||||
],
|
||||
style={
|
||||
"color": "#ffffff",
|
||||
"fontSize": "20px",
|
||||
"borderBottom": "2px solid #00d4ff",
|
||||
"paddingBottom": "15px",
|
||||
"marginBottom": "25px",
|
||||
"textShadow": "0 0 10px rgba(0,212,255,0.3)",
|
||||
},
|
||||
),
|
||||
dcc.Graph(
|
||||
id="network-graph", style={"height": "400px"}
|
||||
),
|
||||
],
|
||||
style={
|
||||
"background": "linear-gradient(135deg, #0a0e27 0%, #1a1f3a 100%)",
|
||||
"padding": "30px",
|
||||
"borderRadius": "15px",
|
||||
"boxShadow": "0 8px 32px rgba(0,212,255,0.15)",
|
||||
"marginBottom": "30px",
|
||||
"border": "1px solid rgba(0,212,255,0.2)",
|
||||
},
|
||||
),
|
||||
# File I/O graphs
|
||||
html.Div(
|
||||
[
|
||||
html.Div(
|
||||
[
|
||||
html.Span("💾 ", style={"fontSize": "24px"}),
|
||||
html.Span(
|
||||
"FILE",
|
||||
style={
|
||||
"fontFamily": "'Courier New', monospace",
|
||||
"letterSpacing": "3px",
|
||||
"fontWeight": "bold",
|
||||
},
|
||||
),
|
||||
html.Span(
|
||||
" I/O",
|
||||
style={
|
||||
"fontFamily": "'Courier New', monospace",
|
||||
"letterSpacing": "3px",
|
||||
"color": "#ff0088",
|
||||
},
|
||||
),
|
||||
],
|
||||
style={
|
||||
"color": "#ffffff",
|
||||
"fontSize": "20px",
|
||||
"borderBottom": "2px solid #ff0088",
|
||||
"paddingBottom": "15px",
|
||||
"marginBottom": "25px",
|
||||
"textShadow": "0 0 10px rgba(255,0,136,0.3)",
|
||||
},
|
||||
),
|
||||
dcc.Graph(
|
||||
id="file-io-graph", style={"height": "400px"}
|
||||
),
|
||||
],
|
||||
style={
|
||||
"background": "linear-gradient(135deg, #0a0e27 0%, #1a1f3a 100%)",
|
||||
"padding": "30px",
|
||||
"borderRadius": "15px",
|
||||
"boxShadow": "0 8px 32px rgba(255,0,136,0.15)",
|
||||
"marginBottom": "30px",
|
||||
"border": "1px solid rgba(255,0,136,0.2)",
|
||||
},
|
||||
),
|
||||
# Combined time series
|
||||
html.Div(
|
||||
[
|
||||
html.Div(
|
||||
[
|
||||
html.Span("📈 ", style={"fontSize": "24px"}),
|
||||
html.Span(
|
||||
"REAL-TIME",
|
||||
style={
|
||||
"fontFamily": "'Courier New', monospace",
|
||||
"letterSpacing": "3px",
|
||||
"fontWeight": "bold",
|
||||
},
|
||||
),
|
||||
html.Span(
|
||||
" METRICS",
|
||||
style={
|
||||
"fontFamily": "'Courier New', monospace",
|
||||
"letterSpacing": "3px",
|
||||
"color": "#00ff88",
|
||||
},
|
||||
),
|
||||
],
|
||||
style={
|
||||
"color": "#ffffff",
|
||||
"fontSize": "20px",
|
||||
"borderBottom": "2px solid #00ff88",
|
||||
"paddingBottom": "15px",
|
||||
"marginBottom": "25px",
|
||||
"textShadow": "0 0 10px rgba(0,255,136,0.3)",
|
||||
},
|
||||
),
|
||||
dcc.Graph(
|
||||
id="timeseries-graph", style={"height": "500px"}
|
||||
),
|
||||
],
|
||||
style={
|
||||
"background": "linear-gradient(135deg, #0a0e27 0%, #1a1f3a 100%)",
|
||||
"padding": "30px",
|
||||
"borderRadius": "15px",
|
||||
"boxShadow": "0 8px 32px rgba(0,255,136,0.15)",
|
||||
"border": "1px solid rgba(0,255,136,0.2)",
|
||||
},
|
||||
),
|
||||
],
|
||||
style={"padding": "0 20px"},
|
||||
),
|
||||
# Footer with pythonBPF branding
|
||||
html.Div(
|
||||
[
|
||||
html.Div(
|
||||
[
|
||||
html.Span(
|
||||
"Powered by ",
|
||||
style={"color": "#8899ff", "fontSize": "12px"},
|
||||
),
|
||||
html.Span(
|
||||
"pythonBPF",
|
||||
style={
|
||||
"color": "#00d4ff",
|
||||
"fontSize": "14px",
|
||||
"fontWeight": "bold",
|
||||
"fontFamily": "'Courier New', monospace",
|
||||
},
|
||||
),
|
||||
html.Span(
|
||||
" | eBPF Container Monitoring",
|
||||
style={
|
||||
"color": "#8899ff",
|
||||
"fontSize": "12px",
|
||||
"marginLeft": "10px",
|
||||
},
|
||||
),
|
||||
]
|
||||
)
|
||||
],
|
||||
style={
|
||||
"textAlign": "center",
|
||||
"padding": "20px",
|
||||
"marginTop": "40px",
|
||||
"background": "linear-gradient(135deg, #0a0e27 0%, #1a1f3a 100%)",
|
||||
"borderTop": "1px solid rgba(0,212,255,0.2)",
|
||||
},
|
||||
),
|
||||
# Auto-update interval
|
||||
dcc.Interval(id="interval-component", interval=1000, n_intervals=0),
|
||||
],
|
||||
style={
|
||||
"padding": "0",
|
||||
"fontFamily": "'Segoe UI', 'Courier New', monospace",
|
||||
"background": "linear-gradient(to bottom, #050813 0%, #0a0e27 100%)",
|
||||
"minHeight": "100vh",
|
||||
"margin": "0",
|
||||
},
|
||||
)
|
||||
|
||||
def _create_stat_card(self, card_id: str, title: str, color: str):
|
||||
"""Create a statistics card with futuristic styling."""
|
||||
return html.Div(
|
||||
[
|
||||
html.H3(
|
||||
title,
|
||||
style={
|
||||
"color": color,
|
||||
"fontSize": "16px",
|
||||
"marginBottom": "20px",
|
||||
"fontWeight": "bold",
|
||||
"fontFamily": "'Courier New', monospace",
|
||||
"letterSpacing": "2px",
|
||||
"textShadow": f"0 0 10px {color}50",
|
||||
},
|
||||
),
|
||||
html.Div(
|
||||
[
|
||||
html.Div(
|
||||
id=f"{card_id}-value",
|
||||
style={
|
||||
"fontSize": "42px",
|
||||
"fontWeight": "bold",
|
||||
"color": "#ffffff",
|
||||
"marginBottom": "10px",
|
||||
"fontFamily": "'Courier New', monospace",
|
||||
"textShadow": f"0 0 20px {color}40",
|
||||
},
|
||||
),
|
||||
html.Div(
|
||||
id=f"{card_id}-rate",
|
||||
style={
|
||||
"fontSize": "14px",
|
||||
"color": "#8899ff",
|
||||
"fontFamily": "'Courier New', monospace",
|
||||
},
|
||||
),
|
||||
]
|
||||
),
|
||||
],
|
||||
style={
|
||||
"flex": "1",
|
||||
"minWidth": "280px",
|
||||
"background": "linear-gradient(135deg, #0a0e27 0%, #1a1f3a 100%)",
|
||||
"padding": "30px",
|
||||
"borderRadius": "15px",
|
||||
"boxShadow": f"0 8px 32px {color}20",
|
||||
"border": f"1px solid {color}40",
|
||||
"borderLeft": f"4px solid {color}",
|
||||
"transition": "transform 0.3s, box-shadow 0.3s",
|
||||
"position": "relative",
|
||||
"overflow": "hidden",
|
||||
},
|
||||
)
|
||||
|
||||
def _setup_callbacks(self):
|
||||
"""Setup dashboard callbacks."""
|
||||
|
||||
@self.app.callback(
|
||||
[Output("cgroup-selector", "options"), Output("cgroup-selector", "value")],
|
||||
[Input("interval-component", "n_intervals")],
|
||||
)
|
||||
def update_cgroup_selector(n):
|
||||
if self.selected_cgroup is not None:
|
||||
return [], self.selected_cgroup
|
||||
|
||||
cgroups = self.collector.get_all_cgroups()
|
||||
options = [
|
||||
{"label": f"{cg.name} (ID: {cg.id})", "value": cg.id}
|
||||
for cg in sorted(cgroups, key=lambda c: c.name)
|
||||
]
|
||||
value = options[0]["value"] if options else None
|
||||
|
||||
if value and self.selected_cgroup is None:
|
||||
self.selected_cgroup = value
|
||||
|
||||
return options, self.selected_cgroup
|
||||
|
||||
@self.app.callback(
|
||||
Output("cgroup-selector", "value", allow_duplicate=True),
|
||||
[Input("cgroup-selector", "value")],
|
||||
prevent_initial_call=True,
|
||||
)
|
||||
def select_cgroup(value):
|
||||
if value:
|
||||
self.selected_cgroup = value
|
||||
return value
|
||||
|
||||
@self.app.callback(
|
||||
[
|
||||
Output("cgroup-name", "children"),
|
||||
Output("syscall-card-value", "children"),
|
||||
Output("syscall-card-rate", "children"),
|
||||
Output("network-card-value", "children"),
|
||||
Output("network-card-rate", "children"),
|
||||
Output("file-card-value", "children"),
|
||||
Output("file-card-rate", "children"),
|
||||
Output("network-graph", "figure"),
|
||||
Output("file-io-graph", "figure"),
|
||||
Output("timeseries-graph", "figure"),
|
||||
],
|
||||
[Input("interval-component", "n_intervals")],
|
||||
)
|
||||
def update_dashboard(n):
|
||||
if self.selected_cgroup is None:
|
||||
empty_fig = self._create_empty_figure(
|
||||
"Select a cgroup to begin monitoring"
|
||||
)
|
||||
return (
|
||||
"SELECT A CGROUP TO START",
|
||||
"0",
|
||||
"",
|
||||
"0 B",
|
||||
"",
|
||||
"0 B",
|
||||
"",
|
||||
empty_fig,
|
||||
empty_fig,
|
||||
empty_fig,
|
||||
)
|
||||
|
||||
try:
|
||||
stats = self.collector.get_stats_for_cgroup(self.selected_cgroup)
|
||||
history = self.collector.get_history(self.selected_cgroup)
|
||||
rates = self._calculate_rates(history)
|
||||
|
||||
return (
|
||||
f"► {stats.cgroup_name}",
|
||||
f"{stats.syscall_count:,}",
|
||||
f"{rates['syscalls_per_sec']:.1f} calls/sec",
|
||||
f"{self._format_bytes(stats.rx_bytes + stats.tx_bytes)}",
|
||||
f"↓ {self._format_bytes(rates['rx_bytes_per_sec'])}/s ↑ {self._format_bytes(rates['tx_bytes_per_sec'])}/s",
|
||||
f"{self._format_bytes(stats.read_bytes + stats.write_bytes)}",
|
||||
f"R: {self._format_bytes(rates['read_bytes_per_sec'])}/s W: {self._format_bytes(rates['write_bytes_per_sec'])}/s",
|
||||
self._create_network_graph(history),
|
||||
self._create_file_io_graph(history),
|
||||
self._create_timeseries_graph(history),
|
||||
)
|
||||
except Exception as e:
|
||||
empty_fig = self._create_empty_figure(f"Error: {str(e)}")
|
||||
return (
|
||||
"ERROR",
|
||||
"0",
|
||||
str(e),
|
||||
"0 B",
|
||||
"",
|
||||
"0 B",
|
||||
"",
|
||||
empty_fig,
|
||||
empty_fig,
|
||||
empty_fig,
|
||||
)
|
||||
|
||||
def _create_empty_figure(self, message: str):
|
||||
"""Create an empty figure with a message."""
|
||||
fig = go.Figure()
|
||||
fig.update_layout(
|
||||
title=message,
|
||||
template="plotly_dark",
|
||||
paper_bgcolor="#0a0e27",
|
||||
plot_bgcolor="#0a0e27",
|
||||
font=dict(color="#8899ff", family="Courier New, monospace"),
|
||||
)
|
||||
return fig
|
||||
|
||||
def _create_network_graph(self, history):
|
||||
"""Create network I/O graph with futuristic styling."""
|
||||
if len(history) < 2:
|
||||
return self._create_empty_figure("Collecting data...")
|
||||
|
||||
times = [i for i in range(len(history))]
|
||||
rx_bytes = [s.rx_bytes for s in history]
|
||||
tx_bytes = [s.tx_bytes for s in history]
|
||||
|
||||
fig = make_subplots(
|
||||
rows=2,
|
||||
cols=1,
|
||||
subplot_titles=("RECEIVED (RX)", "TRANSMITTED (TX)"),
|
||||
vertical_spacing=0.15,
|
||||
)
|
||||
|
||||
fig.add_trace(
|
||||
go.Scatter(
|
||||
x=times,
|
||||
y=rx_bytes,
|
||||
mode="lines",
|
||||
name="RX",
|
||||
fill="tozeroy",
|
||||
line=dict(color="#00d4ff", width=3, shape="spline"),
|
||||
fillcolor="rgba(0, 212, 255, 0.2)",
|
||||
),
|
||||
row=1,
|
||||
col=1,
|
||||
)
|
||||
|
||||
fig.add_trace(
|
||||
go.Scatter(
|
||||
x=times,
|
||||
y=tx_bytes,
|
||||
mode="lines",
|
||||
name="TX",
|
||||
fill="tozeroy",
|
||||
line=dict(color="#00ff88", width=3, shape="spline"),
|
||||
fillcolor="rgba(0, 255, 136, 0.2)",
|
||||
),
|
||||
row=2,
|
||||
col=1,
|
||||
)
|
||||
|
||||
fig.update_xaxes(title_text="Time (samples)", row=2, col=1, color="#8899ff")
|
||||
fig.update_yaxes(title_text="Bytes", row=1, col=1, color="#8899ff")
|
||||
fig.update_yaxes(title_text="Bytes", row=2, col=1, color="#8899ff")
|
||||
|
||||
fig.update_layout(
|
||||
height=400,
|
||||
template="plotly_dark",
|
||||
paper_bgcolor="rgba(0,0,0,0)",
|
||||
plot_bgcolor="#0a0e27",
|
||||
showlegend=False,
|
||||
hovermode="x unified",
|
||||
font=dict(family="Courier New, monospace", color="#8899ff"),
|
||||
)
|
||||
|
||||
return fig
|
||||
|
||||
def _create_file_io_graph(self, history):
|
||||
"""Create file I/O graph with futuristic styling."""
|
||||
if len(history) < 2:
|
||||
return self._create_empty_figure("Collecting data...")
|
||||
|
||||
times = [i for i in range(len(history))]
|
||||
read_bytes = [s.read_bytes for s in history]
|
||||
write_bytes = [s.write_bytes for s in history]
|
||||
|
||||
fig = make_subplots(
|
||||
rows=2,
|
||||
cols=1,
|
||||
subplot_titles=("READ OPERATIONS", "WRITE OPERATIONS"),
|
||||
vertical_spacing=0.15,
|
||||
)
|
||||
|
||||
fig.add_trace(
|
||||
go.Scatter(
|
||||
x=times,
|
||||
y=read_bytes,
|
||||
mode="lines",
|
||||
name="Read",
|
||||
fill="tozeroy",
|
||||
line=dict(color="#ff0088", width=3, shape="spline"),
|
||||
fillcolor="rgba(255, 0, 136, 0.2)",
|
||||
),
|
||||
row=1,
|
||||
col=1,
|
||||
)
|
||||
|
||||
fig.add_trace(
|
||||
go.Scatter(
|
||||
x=times,
|
||||
y=write_bytes,
|
||||
mode="lines",
|
||||
name="Write",
|
||||
fill="tozeroy",
|
||||
line=dict(color="#8844ff", width=3, shape="spline"),
|
||||
fillcolor="rgba(136, 68, 255, 0.2)",
|
||||
),
|
||||
row=2,
|
||||
col=1,
|
||||
)
|
||||
|
||||
fig.update_xaxes(title_text="Time (samples)", row=2, col=1, color="#8899ff")
|
||||
fig.update_yaxes(title_text="Bytes", row=1, col=1, color="#8899ff")
|
||||
fig.update_yaxes(title_text="Bytes", row=2, col=1, color="#8899ff")
|
||||
|
||||
fig.update_layout(
|
||||
height=400,
|
||||
template="plotly_dark",
|
||||
paper_bgcolor="rgba(0,0,0,0)",
|
||||
plot_bgcolor="#0a0e27",
|
||||
showlegend=False,
|
||||
hovermode="x unified",
|
||||
font=dict(family="Courier New, monospace", color="#8899ff"),
|
||||
)
|
||||
|
||||
return fig
|
||||
|
||||
def _create_timeseries_graph(self, history):
|
||||
"""Create combined time series graph with futuristic styling."""
|
||||
if len(history) < 2:
|
||||
return self._create_empty_figure("Collecting data...")
|
||||
|
||||
times = [i for i in range(len(history))]
|
||||
|
||||
fig = make_subplots(
|
||||
rows=3,
|
||||
cols=1,
|
||||
subplot_titles=(
|
||||
"SYSTEM CALLS",
|
||||
"NETWORK TRAFFIC (Bytes)",
|
||||
"FILE I/O (Bytes)",
|
||||
),
|
||||
vertical_spacing=0.1,
|
||||
specs=[
|
||||
[{"secondary_y": False}],
|
||||
[{"secondary_y": True}],
|
||||
[{"secondary_y": True}],
|
||||
],
|
||||
)
|
||||
|
||||
# Syscalls
|
||||
fig.add_trace(
|
||||
go.Scatter(
|
||||
x=times,
|
||||
y=[s.syscall_count for s in history],
|
||||
mode="lines",
|
||||
name="Syscalls",
|
||||
line=dict(color="#00ff88", width=3, shape="spline"),
|
||||
),
|
||||
row=1,
|
||||
col=1,
|
||||
)
|
||||
|
||||
# Network
|
||||
fig.add_trace(
|
||||
go.Scatter(
|
||||
x=times,
|
||||
y=[s.rx_bytes for s in history],
|
||||
mode="lines",
|
||||
name="RX",
|
||||
line=dict(color="#00d4ff", width=2, shape="spline"),
|
||||
),
|
||||
row=2,
|
||||
col=1,
|
||||
secondary_y=False,
|
||||
)
|
||||
|
||||
fig.add_trace(
|
||||
go.Scatter(
|
||||
x=times,
|
||||
y=[s.tx_bytes for s in history],
|
||||
mode="lines",
|
||||
name="TX",
|
||||
line=dict(color="#00ff88", width=2, shape="spline", dash="dot"),
|
||||
),
|
||||
row=2,
|
||||
col=1,
|
||||
secondary_y=True,
|
||||
)
|
||||
|
||||
# File I/O
|
||||
fig.add_trace(
|
||||
go.Scatter(
|
||||
x=times,
|
||||
y=[s.read_bytes for s in history],
|
||||
mode="lines",
|
||||
name="Read",
|
||||
line=dict(color="#ff0088", width=2, shape="spline"),
|
||||
),
|
||||
row=3,
|
||||
col=1,
|
||||
secondary_y=False,
|
||||
)
|
||||
|
||||
fig.add_trace(
|
||||
go.Scatter(
|
||||
x=times,
|
||||
y=[s.write_bytes for s in history],
|
||||
mode="lines",
|
||||
name="Write",
|
||||
line=dict(color="#8844ff", width=2, shape="spline", dash="dot"),
|
||||
),
|
||||
row=3,
|
||||
col=1,
|
||||
secondary_y=True,
|
||||
)
|
||||
|
||||
fig.update_xaxes(title_text="Time (samples)", row=3, col=1, color="#8899ff")
|
||||
fig.update_yaxes(title_text="Count", row=1, col=1, color="#8899ff")
|
||||
fig.update_yaxes(
|
||||
title_text="RX Bytes", row=2, col=1, secondary_y=False, color="#00d4ff"
|
||||
)
|
||||
fig.update_yaxes(
|
||||
title_text="TX Bytes", row=2, col=1, secondary_y=True, color="#00ff88"
|
||||
)
|
||||
fig.update_yaxes(
|
||||
title_text="Read Bytes", row=3, col=1, secondary_y=False, color="#ff0088"
|
||||
)
|
||||
fig.update_yaxes(
|
||||
title_text="Write Bytes", row=3, col=1, secondary_y=True, color="#8844ff"
|
||||
)
|
||||
|
||||
fig.update_layout(
|
||||
height=500,
|
||||
template="plotly_dark",
|
||||
paper_bgcolor="rgba(0,0,0,0)",
|
||||
plot_bgcolor="#0a0e27",
|
||||
hovermode="x unified",
|
||||
showlegend=True,
|
||||
legend=dict(
|
||||
orientation="h",
|
||||
yanchor="bottom",
|
||||
y=1.02,
|
||||
xanchor="right",
|
||||
x=1,
|
||||
font=dict(color="#8899ff"),
|
||||
),
|
||||
font=dict(family="Courier New, monospace", color="#8899ff"),
|
||||
)
|
||||
|
||||
return fig
|
||||
|
||||
def _calculate_rates(self, history):
|
||||
"""Calculate rates from history."""
|
||||
if len(history) < 2:
|
||||
return {
|
||||
"syscalls_per_sec": 0.0,
|
||||
"rx_bytes_per_sec": 0.0,
|
||||
"tx_bytes_per_sec": 0.0,
|
||||
"read_bytes_per_sec": 0.0,
|
||||
"write_bytes_per_sec": 0.0,
|
||||
}
|
||||
|
||||
recent = history[-1]
|
||||
previous = history[-2]
|
||||
time_delta = recent.timestamp - previous.timestamp
|
||||
|
||||
if time_delta <= 0:
|
||||
time_delta = 1.0
|
||||
|
||||
return {
|
||||
"syscalls_per_sec": max(
|
||||
0, (recent.syscall_count - previous.syscall_count) / time_delta
|
||||
),
|
||||
"rx_bytes_per_sec": max(
|
||||
0, (recent.rx_bytes - previous.rx_bytes) / time_delta
|
||||
),
|
||||
"tx_bytes_per_sec": max(
|
||||
0, (recent.tx_bytes - previous.tx_bytes) / time_delta
|
||||
),
|
||||
"read_bytes_per_sec": max(
|
||||
0, (recent.read_bytes - previous.read_bytes) / time_delta
|
||||
),
|
||||
"write_bytes_per_sec": max(
|
||||
0, (recent.write_bytes - previous.write_bytes) / time_delta
|
||||
),
|
||||
}
|
||||
|
||||
def _format_bytes(self, bytes_val: float) -> str:
|
||||
"""Format bytes into human-readable string."""
|
||||
if bytes_val < 0:
|
||||
bytes_val = 0
|
||||
for unit in ["B", "KB", "MB", "GB", "TB"]:
|
||||
if bytes_val < 1024.0:
|
||||
return f"{bytes_val:.2f} {unit}"
|
||||
bytes_val /= 1024.0
|
||||
return f"{bytes_val:.2f} PB"
|
||||
|
||||
def run(self):
|
||||
"""Run the web dashboard."""
|
||||
self._running = True
|
||||
# Suppress Werkzeug logging
|
||||
import logging
|
||||
|
||||
log = logging.getLogger("werkzeug")
|
||||
log.setLevel(logging.ERROR)
|
||||
|
||||
self.app.run(debug=False, host=self.host, port=self.port, use_reloader=False)
|
||||
|
||||
def stop(self):
|
||||
"""Stop the web dashboard."""
|
||||
self._running = False
|
||||
@ -1,6 +1,6 @@
|
||||
from .expr_pass import eval_expr, handle_expr, get_operand_value
|
||||
from .type_normalization import convert_to_bool, get_base_type_and_depth
|
||||
from .ir_ops import deref_to_depth
|
||||
from .ir_ops import deref_to_depth, access_struct_field
|
||||
from .call_registry import CallHandlerRegistry
|
||||
from .vmlinux_registry import VmlinuxHandlerRegistry
|
||||
|
||||
@ -10,6 +10,7 @@ __all__ = [
|
||||
"convert_to_bool",
|
||||
"get_base_type_and_depth",
|
||||
"deref_to_depth",
|
||||
"access_struct_field",
|
||||
"get_operand_value",
|
||||
"CallHandlerRegistry",
|
||||
"VmlinuxHandlerRegistry",
|
||||
|
||||
@ -6,11 +6,11 @@ from typing import Dict
|
||||
|
||||
from pythonbpf.type_deducer import ctypes_to_ir, is_ctypes
|
||||
from .call_registry import CallHandlerRegistry
|
||||
from .ir_ops import deref_to_depth, access_struct_field
|
||||
from .type_normalization import (
|
||||
convert_to_bool,
|
||||
handle_comparator,
|
||||
get_base_type_and_depth,
|
||||
deref_to_depth,
|
||||
)
|
||||
from .vmlinux_registry import VmlinuxHandlerRegistry
|
||||
from ..vmlinux_parser.dependency_node import Field
|
||||
@ -77,89 +77,6 @@ def _handle_attribute_expr(
|
||||
logger.info(
|
||||
f"Variable type: {var_type}, Variable ptr: {var_ptr}, Variable Metadata: {var_metadata}"
|
||||
)
|
||||
# Check if this is a pointer to a struct (from map lookup)
|
||||
if (
|
||||
isinstance(var_type, ir.PointerType)
|
||||
and var_metadata
|
||||
and isinstance(var_metadata, str)
|
||||
):
|
||||
if var_metadata in structs_sym_tab:
|
||||
logger.info(
|
||||
f"Handling pointer to struct {var_metadata} from map lookup"
|
||||
)
|
||||
|
||||
if func is None:
|
||||
raise ValueError(
|
||||
f"func parameter required for null-safe pointer access to {var_name}.{attr_name}"
|
||||
)
|
||||
|
||||
# Load the pointer value (ptr<struct>)
|
||||
struct_ptr = builder.load(var_ptr)
|
||||
|
||||
# Create blocks for null check
|
||||
null_check_block = builder.block
|
||||
not_null_block = func.append_basic_block(
|
||||
name=f"{var_name}_not_null"
|
||||
)
|
||||
merge_block = func.append_basic_block(name=f"{var_name}_merge")
|
||||
|
||||
# Check if pointer is null
|
||||
null_ptr = ir.Constant(struct_ptr.type, None)
|
||||
is_not_null = builder.icmp_signed("!=", struct_ptr, null_ptr)
|
||||
logger.info(f"Inserted null check for pointer {var_name}")
|
||||
|
||||
builder.cbranch(is_not_null, not_null_block, merge_block)
|
||||
|
||||
# Not-null block: Access the field
|
||||
builder.position_at_end(not_null_block)
|
||||
|
||||
# Get struct metadata
|
||||
metadata = structs_sym_tab[var_metadata]
|
||||
struct_ptr = builder.bitcast(
|
||||
struct_ptr, metadata.ir_type.as_pointer()
|
||||
)
|
||||
|
||||
if attr_name not in metadata.fields:
|
||||
raise ValueError(
|
||||
f"Field '{attr_name}' not found in struct '{var_metadata}'"
|
||||
)
|
||||
|
||||
# GEP to field
|
||||
field_gep = metadata.gep(builder, struct_ptr, attr_name)
|
||||
|
||||
# Load field value
|
||||
field_val = builder.load(field_gep)
|
||||
field_type = metadata.field_type(attr_name)
|
||||
|
||||
logger.info(
|
||||
f"Loaded field {attr_name} from struct pointer, type: {field_type}"
|
||||
)
|
||||
|
||||
# Branch to merge
|
||||
not_null_after_load = builder.block
|
||||
builder.branch(merge_block)
|
||||
|
||||
# Merge block: PHI node for the result
|
||||
builder.position_at_end(merge_block)
|
||||
phi = builder.phi(field_type, name=f"{var_name}_{attr_name}")
|
||||
|
||||
# If null, return zero/default value
|
||||
if isinstance(field_type, ir.IntType):
|
||||
zero_value = ir.Constant(field_type, 0)
|
||||
elif isinstance(field_type, ir.PointerType):
|
||||
zero_value = ir.Constant(field_type, None)
|
||||
elif isinstance(field_type, ir.ArrayType):
|
||||
# For arrays, we can't easily create a zero constant
|
||||
# This case is tricky - for now, just use undef
|
||||
zero_value = ir.Constant(field_type, ir.Undefined)
|
||||
else:
|
||||
zero_value = ir.Constant(field_type, ir.Undefined)
|
||||
|
||||
phi.add_incoming(zero_value, null_check_block)
|
||||
phi.add_incoming(field_val, not_null_after_load)
|
||||
|
||||
logger.info(f"Created PHI node for {var_name}.{attr_name}")
|
||||
return phi, field_type
|
||||
if (
|
||||
hasattr(var_metadata, "__module__")
|
||||
and var_metadata.__module__ == "vmlinux"
|
||||
@ -180,13 +97,23 @@ def _handle_attribute_expr(
|
||||
)
|
||||
return None
|
||||
|
||||
# Regular user-defined struct
|
||||
metadata = structs_sym_tab.get(var_metadata)
|
||||
if metadata and attr_name in metadata.fields:
|
||||
gep = metadata.gep(builder, var_ptr, attr_name)
|
||||
val = builder.load(gep)
|
||||
field_type = metadata.field_type(attr_name)
|
||||
return val, field_type
|
||||
if var_metadata in structs_sym_tab:
|
||||
return access_struct_field(
|
||||
builder,
|
||||
var_ptr,
|
||||
var_type,
|
||||
var_metadata,
|
||||
expr.attr,
|
||||
structs_sym_tab,
|
||||
func,
|
||||
)
|
||||
else:
|
||||
logger.error(f"Struct metadata for '{var_name}' not found")
|
||||
else:
|
||||
logger.error(f"Undefined variable '{var_name}' for attribute access")
|
||||
else:
|
||||
logger.error("Unsupported attribute base expression type")
|
||||
|
||||
return None
|
||||
|
||||
|
||||
|
||||
@ -17,41 +17,100 @@ def deref_to_depth(func, builder, val, target_depth):
|
||||
|
||||
# dereference with null check
|
||||
pointee_type = cur_type.pointee
|
||||
null_check_block = builder.block
|
||||
not_null_block = func.append_basic_block(name=f"deref_not_null_{depth}")
|
||||
merge_block = func.append_basic_block(name=f"deref_merge_{depth}")
|
||||
|
||||
null_ptr = ir.Constant(cur_type, None)
|
||||
is_not_null = builder.icmp_signed("!=", cur_val, null_ptr)
|
||||
logger.debug(f"Inserted null check for pointer at depth {depth}")
|
||||
def load_op(builder, ptr):
|
||||
return builder.load(ptr)
|
||||
|
||||
builder.cbranch(is_not_null, not_null_block, merge_block)
|
||||
|
||||
builder.position_at_end(not_null_block)
|
||||
dereferenced_val = builder.load(cur_val)
|
||||
logger.debug(f"Dereferenced to depth {depth - 1}, type: {pointee_type}")
|
||||
builder.branch(merge_block)
|
||||
|
||||
builder.position_at_end(merge_block)
|
||||
phi = builder.phi(pointee_type, name=f"deref_result_{depth}")
|
||||
|
||||
zero_value = (
|
||||
ir.Constant(pointee_type, 0)
|
||||
if isinstance(pointee_type, ir.IntType)
|
||||
else ir.Constant(pointee_type, None)
|
||||
cur_val = _null_checked_operation(
|
||||
func, builder, cur_val, load_op, pointee_type, f"deref_{depth}"
|
||||
)
|
||||
phi.add_incoming(zero_value, null_check_block)
|
||||
|
||||
phi.add_incoming(dereferenced_val, not_null_block)
|
||||
|
||||
# Continue with phi result
|
||||
cur_val = phi
|
||||
cur_type = pointee_type
|
||||
logger.debug(f"Dereferenced to depth {depth}, type: {pointee_type}")
|
||||
return cur_val
|
||||
|
||||
|
||||
def deref_struct_ptr(
|
||||
func, builder, struct_ptr, struct_metadata, field_name, structs_sym_tab
|
||||
def _null_checked_operation(func, builder, ptr, operation, result_type, name_prefix):
|
||||
"""
|
||||
Generic null-checked operation on a pointer.
|
||||
"""
|
||||
curr_block = builder.block
|
||||
not_null_block = func.append_basic_block(name=f"{name_prefix}_not_null")
|
||||
merge_block = func.append_basic_block(name=f"{name_prefix}_merge")
|
||||
|
||||
null_ptr = ir.Constant(ptr.type, None)
|
||||
is_not_null = builder.icmp_signed("!=", ptr, null_ptr)
|
||||
builder.cbranch(is_not_null, not_null_block, merge_block)
|
||||
|
||||
builder.position_at_end(not_null_block)
|
||||
result = operation(builder, ptr)
|
||||
not_null_after = builder.block
|
||||
builder.branch(merge_block)
|
||||
|
||||
builder.position_at_end(merge_block)
|
||||
phi = builder.phi(result_type, name=f"{name_prefix}_result")
|
||||
|
||||
if isinstance(result_type, ir.IntType):
|
||||
null_val = ir.Constant(result_type, 0)
|
||||
elif isinstance(result_type, ir.PointerType):
|
||||
null_val = ir.Constant(result_type, None)
|
||||
else:
|
||||
null_val = ir.Constant(result_type, ir.Undefined)
|
||||
|
||||
phi.add_incoming(null_val, curr_block)
|
||||
phi.add_incoming(result, not_null_after)
|
||||
|
||||
return phi
|
||||
|
||||
|
||||
def access_struct_field(
|
||||
builder, var_ptr, var_type, var_metadata, field_name, structs_sym_tab, func=None
|
||||
):
|
||||
"""Dereference a pointer to a struct type."""
|
||||
return deref_to_depth(func, builder, struct_ptr, 1)
|
||||
"""
|
||||
Access a struct field - automatically returns value or pointer based on field type.
|
||||
"""
|
||||
metadata = (
|
||||
structs_sym_tab.get(var_metadata)
|
||||
if isinstance(var_metadata, str)
|
||||
else var_metadata
|
||||
)
|
||||
if not metadata or field_name not in metadata.fields:
|
||||
raise ValueError(f"Field '{field_name}' not found in struct")
|
||||
|
||||
field_type = metadata.field_type(field_name)
|
||||
is_ptr_to_struct = isinstance(var_type, ir.PointerType) and isinstance(
|
||||
var_metadata, str
|
||||
)
|
||||
|
||||
# Get struct pointer
|
||||
struct_ptr = builder.load(var_ptr) if is_ptr_to_struct else var_ptr
|
||||
|
||||
should_load = not isinstance(field_type, ir.ArrayType)
|
||||
|
||||
def field_access_op(builder, ptr):
|
||||
typed_ptr = builder.bitcast(ptr, metadata.ir_type.as_pointer())
|
||||
field_ptr = metadata.gep(builder, typed_ptr, field_name)
|
||||
return builder.load(field_ptr) if should_load else field_ptr
|
||||
|
||||
# Handle null check for pointer-to-struct
|
||||
if is_ptr_to_struct:
|
||||
if func is None:
|
||||
raise ValueError("func required for null-safe struct pointer access")
|
||||
|
||||
if should_load:
|
||||
result_type = field_type
|
||||
else:
|
||||
result_type = field_type.as_pointer()
|
||||
|
||||
result = _null_checked_operation(
|
||||
func,
|
||||
builder,
|
||||
struct_ptr,
|
||||
field_access_op,
|
||||
result_type,
|
||||
f"field_{field_name}",
|
||||
)
|
||||
return result, field_type
|
||||
|
||||
field_ptr = metadata.gep(builder, struct_ptr, field_name)
|
||||
result = builder.load(field_ptr) if should_load else field_ptr
|
||||
return result, field_type
|
||||
|
||||
@ -5,6 +5,7 @@ from llvmlite import ir
|
||||
from pythonbpf.expr import (
|
||||
get_operand_value,
|
||||
eval_expr,
|
||||
access_struct_field,
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@ -135,7 +136,7 @@ def get_or_create_ptr_from_arg(
|
||||
and field_type.element.width == 8
|
||||
):
|
||||
ptr, sz = get_char_array_ptr_and_size(
|
||||
arg, builder, local_sym_tab, struct_sym_tab
|
||||
arg, builder, local_sym_tab, struct_sym_tab, func
|
||||
)
|
||||
if not ptr:
|
||||
raise ValueError("Failed to get char array pointer from struct field")
|
||||
@ -266,7 +267,9 @@ def get_buffer_ptr_and_size(buf_arg, builder, local_sym_tab, struct_sym_tab):
|
||||
)
|
||||
|
||||
|
||||
def get_char_array_ptr_and_size(buf_arg, builder, local_sym_tab, struct_sym_tab):
|
||||
def get_char_array_ptr_and_size(
|
||||
buf_arg, builder, local_sym_tab, struct_sym_tab, func=None
|
||||
):
|
||||
"""Get pointer to char array and its size."""
|
||||
|
||||
# Struct field: obj.field
|
||||
@ -277,11 +280,11 @@ def get_char_array_ptr_and_size(buf_arg, builder, local_sym_tab, struct_sym_tab)
|
||||
if not (local_sym_tab and var_name in local_sym_tab):
|
||||
raise ValueError(f"Variable '{var_name}' not found")
|
||||
|
||||
struct_type = local_sym_tab[var_name].metadata
|
||||
if not (struct_sym_tab and struct_type in struct_sym_tab):
|
||||
raise ValueError(f"Struct type '{struct_type}' not found")
|
||||
struct_ptr, struct_type, struct_metadata = local_sym_tab[var_name]
|
||||
if not (struct_sym_tab and struct_metadata in struct_sym_tab):
|
||||
raise ValueError(f"Struct type '{struct_metadata}' not found")
|
||||
|
||||
struct_info = struct_sym_tab[struct_type]
|
||||
struct_info = struct_sym_tab[struct_metadata]
|
||||
if field_name not in struct_info.fields:
|
||||
raise ValueError(f"Field '{field_name}' not found")
|
||||
|
||||
@ -292,8 +295,24 @@ def get_char_array_ptr_and_size(buf_arg, builder, local_sym_tab, struct_sym_tab)
|
||||
)
|
||||
return None, 0
|
||||
|
||||
struct_ptr = local_sym_tab[var_name].var
|
||||
field_ptr = struct_info.gep(builder, struct_ptr, field_name)
|
||||
# Check if char array
|
||||
if not (
|
||||
isinstance(field_type, ir.ArrayType)
|
||||
and isinstance(field_type.element, ir.IntType)
|
||||
and field_type.element.width == 8
|
||||
):
|
||||
logger.warning("Field is not a char array")
|
||||
return None, 0
|
||||
|
||||
field_ptr, _ = access_struct_field(
|
||||
builder,
|
||||
struct_ptr,
|
||||
struct_type,
|
||||
struct_metadata,
|
||||
field_name,
|
||||
struct_sym_tab,
|
||||
func,
|
||||
)
|
||||
|
||||
# GEP to first element: [N x i8]* -> i8*
|
||||
buf_ptr = builder.gep(
|
||||
|
||||
@ -222,7 +222,7 @@ def _prepare_expr_args(expr, func, module, builder, local_sym_tab, struct_sym_ta
|
||||
# Special case: struct field char array needs pointer to first element
|
||||
if isinstance(expr, ast.Attribute):
|
||||
char_array_ptr, _ = get_char_array_ptr_and_size(
|
||||
expr, builder, local_sym_tab, struct_sym_tab
|
||||
expr, builder, local_sym_tab, struct_sym_tab, func
|
||||
)
|
||||
if char_array_ptr:
|
||||
return char_array_ptr
|
||||
|
||||
@ -135,7 +135,7 @@ def process_perf_event_map(map_name, rval, module, structs_sym_tab):
|
||||
logger.info(f"Map parameters: {map_params}")
|
||||
map_global = create_bpf_map(module, map_name, map_params)
|
||||
# Generate debug info for BTF
|
||||
create_map_debug_info(module, map_global.sym, map_name, map_params)
|
||||
create_map_debug_info(module, map_global.sym, map_name, map_params, structs_sym_tab)
|
||||
return map_global
|
||||
|
||||
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
from pythonbpf import bpf, section, struct, bpfglobal, compile, map
|
||||
from pythonbpf.maps import HashMap
|
||||
from pythonbpf.helper import pid
|
||||
from pythonbpf.helper import pid, comm
|
||||
from ctypes import c_void_p, c_int64
|
||||
|
||||
|
||||
@ -9,6 +9,7 @@ from ctypes import c_void_p, c_int64
|
||||
class val_type:
|
||||
counter: c_int64
|
||||
shizzle: c_int64
|
||||
comm: str(16)
|
||||
|
||||
|
||||
@bpf
|
||||
@ -22,6 +23,7 @@ def last() -> HashMap:
|
||||
def hello_world(ctx: c_void_p) -> c_int64:
|
||||
obj = val_type()
|
||||
obj.counter, obj.shizzle = 42, 96
|
||||
comm(obj.comm)
|
||||
t = last.lookup(obj)
|
||||
if t:
|
||||
print(f"Found existing entry: counter={obj.counter}, pid={t}")
|
||||
|
||||
93
tests/passing_tests/struct_pylib.py
Normal file
93
tests/passing_tests/struct_pylib.py
Normal file
@ -0,0 +1,93 @@
|
||||
"""
|
||||
Test struct values in HashMap.
|
||||
|
||||
This example stores a struct in a HashMap and reads it back,
|
||||
testing the new set_value_struct() functionality in pylibbpf.
|
||||
"""
|
||||
|
||||
from pythonbpf import bpf, map, struct, section, bpfglobal, BPF
|
||||
from pythonbpf.helper import ktime, smp_processor_id, pid, comm
|
||||
from pythonbpf.maps import HashMap
|
||||
from ctypes import c_void_p, c_int64, c_uint32, c_uint64
|
||||
import time
|
||||
import os
|
||||
|
||||
|
||||
@bpf
|
||||
@struct
|
||||
class task_info:
|
||||
pid: c_uint64
|
||||
timestamp: c_uint64
|
||||
comm: str(16)
|
||||
|
||||
|
||||
@bpf
|
||||
@map
|
||||
def cpu_tasks() -> HashMap:
|
||||
return HashMap(key=c_uint32, value=task_info, max_entries=256)
|
||||
|
||||
|
||||
@bpf
|
||||
@section("tracepoint/sched/sched_switch")
|
||||
def trace_sched_switch(ctx: c_void_p) -> c_int64:
|
||||
cpu = smp_processor_id()
|
||||
|
||||
# Create task info struct
|
||||
info = task_info()
|
||||
info.pid = pid()
|
||||
info.timestamp = ktime()
|
||||
comm(info.comm)
|
||||
|
||||
# Store in map
|
||||
cpu_tasks.update(cpu, info)
|
||||
|
||||
return 0 # type: ignore
|
||||
|
||||
|
||||
@bpf
|
||||
@bpfglobal
|
||||
def LICENSE() -> str:
|
||||
return "GPL"
|
||||
|
||||
|
||||
# Compile and load
|
||||
b = BPF()
|
||||
b.load()
|
||||
b.attach_all()
|
||||
|
||||
print("Testing HashMap with Struct Values")
|
||||
|
||||
cpu_map = b["cpu_tasks"]
|
||||
cpu_map.set_value_struct("task_info") # Enable struct deserialization
|
||||
|
||||
print("Listening for context switches.. .\n")
|
||||
|
||||
num_cpus = os.cpu_count() or 16
|
||||
|
||||
try:
|
||||
while True:
|
||||
time.sleep(1)
|
||||
|
||||
print(f"--- Snapshot at {time.strftime('%H:%M:%S')} ---")
|
||||
|
||||
for cpu in range(num_cpus):
|
||||
try:
|
||||
info = cpu_map.lookup(cpu)
|
||||
|
||||
if info:
|
||||
comm_str = (
|
||||
bytes(info.comm).decode("utf-8", errors="ignore").rstrip("\x00")
|
||||
)
|
||||
ts_sec = info.timestamp / 1e9
|
||||
|
||||
print(
|
||||
f" CPU {cpu}: PID={info.pid}, comm={comm_str}, ts={ts_sec:.3f}s"
|
||||
)
|
||||
except KeyError:
|
||||
# No data for this CPU yet
|
||||
pass
|
||||
|
||||
print()
|
||||
|
||||
except KeyboardInterrupt:
|
||||
print("\nStopped")
|
||||
Reference in New Issue
Block a user