mirror of
https://github.com/varun-r-mallya/py-libp2p.git
synced 2025-12-31 20:36:24 +00:00
Merge branch 'main' into fix_expose_timeout_muxer_multistream
This commit is contained in:
@ -37,3 +37,4 @@ SyncValidatorFn = Callable[[ID, rpc_pb2.Message], bool]
|
||||
AsyncValidatorFn = Callable[[ID, rpc_pb2.Message], Awaitable[bool]]
|
||||
ValidatorFn = Union[SyncValidatorFn, AsyncValidatorFn]
|
||||
UnsubscribeFn = Callable[[], Awaitable[None]]
|
||||
MessageID = NewType("MessageID", str)
|
||||
|
||||
@ -2,15 +2,20 @@ import logging
|
||||
|
||||
from multiaddr import Multiaddr
|
||||
from multiaddr.resolvers import DNSResolver
|
||||
import trio
|
||||
|
||||
from libp2p.abc import ID, INetworkService, PeerInfo
|
||||
from libp2p.discovery.bootstrap.utils import validate_bootstrap_addresses
|
||||
from libp2p.discovery.events.peerDiscovery import peerDiscovery
|
||||
from libp2p.network.exceptions import SwarmException
|
||||
from libp2p.peer.peerinfo import info_from_p2p_addr
|
||||
from libp2p.peer.peerstore import PERMANENT_ADDR_TTL
|
||||
|
||||
logger = logging.getLogger("libp2p.discovery.bootstrap")
|
||||
resolver = DNSResolver()
|
||||
|
||||
DEFAULT_CONNECTION_TIMEOUT = 10
|
||||
|
||||
|
||||
class BootstrapDiscovery:
|
||||
"""
|
||||
@ -19,68 +24,147 @@ class BootstrapDiscovery:
|
||||
"""
|
||||
|
||||
def __init__(self, swarm: INetworkService, bootstrap_addrs: list[str]):
|
||||
"""
|
||||
Initialize BootstrapDiscovery.
|
||||
|
||||
Args:
|
||||
swarm: The network service (swarm) instance
|
||||
bootstrap_addrs: List of bootstrap peer multiaddresses
|
||||
|
||||
"""
|
||||
self.swarm = swarm
|
||||
self.peerstore = swarm.peerstore
|
||||
self.bootstrap_addrs = bootstrap_addrs or []
|
||||
self.discovered_peers: set[str] = set()
|
||||
self.connection_timeout: int = DEFAULT_CONNECTION_TIMEOUT
|
||||
|
||||
async def start(self) -> None:
|
||||
"""Process bootstrap addresses and emit peer discovery events."""
|
||||
logger.debug(
|
||||
"""Process bootstrap addresses and emit peer discovery events in parallel."""
|
||||
logger.info(
|
||||
f"Starting bootstrap discovery with "
|
||||
f"{len(self.bootstrap_addrs)} bootstrap addresses"
|
||||
)
|
||||
|
||||
# Show all bootstrap addresses being processed
|
||||
for i, addr in enumerate(self.bootstrap_addrs):
|
||||
logger.debug(f"{i + 1}. {addr}")
|
||||
|
||||
# Validate and filter bootstrap addresses
|
||||
self.bootstrap_addrs = validate_bootstrap_addresses(self.bootstrap_addrs)
|
||||
logger.info(f"Valid addresses after validation: {len(self.bootstrap_addrs)}")
|
||||
|
||||
for addr_str in self.bootstrap_addrs:
|
||||
try:
|
||||
await self._process_bootstrap_addr(addr_str)
|
||||
except Exception as e:
|
||||
logger.debug(f"Failed to process bootstrap address {addr_str}: {e}")
|
||||
# Use Trio nursery for PARALLEL address processing
|
||||
try:
|
||||
async with trio.open_nursery() as nursery:
|
||||
logger.debug(
|
||||
f"Starting {len(self.bootstrap_addrs)} parallel address "
|
||||
f"processing tasks"
|
||||
)
|
||||
|
||||
# Start all bootstrap address processing tasks in parallel
|
||||
for addr_str in self.bootstrap_addrs:
|
||||
logger.debug(f"Starting parallel task for: {addr_str}")
|
||||
nursery.start_soon(self._process_bootstrap_addr, addr_str)
|
||||
|
||||
# The nursery will wait for all address processing tasks to complete
|
||||
logger.debug(
|
||||
"Nursery active - waiting for address processing tasks to complete"
|
||||
)
|
||||
|
||||
except trio.Cancelled:
|
||||
logger.debug("Bootstrap address processing cancelled - cleaning up tasks")
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f"Bootstrap address processing failed: {e}")
|
||||
raise
|
||||
|
||||
logger.info("Bootstrap discovery startup complete - all tasks finished")
|
||||
|
||||
def stop(self) -> None:
|
||||
"""Clean up bootstrap discovery resources."""
|
||||
logger.debug("Stopping bootstrap discovery")
|
||||
logger.info("Stopping bootstrap discovery and cleaning up tasks")
|
||||
|
||||
# Clear discovered peers
|
||||
self.discovered_peers.clear()
|
||||
|
||||
logger.debug("Bootstrap discovery cleanup completed")
|
||||
|
||||
async def _process_bootstrap_addr(self, addr_str: str) -> None:
|
||||
"""Convert string address to PeerInfo and add to peerstore."""
|
||||
try:
|
||||
multiaddr = Multiaddr(addr_str)
|
||||
try:
|
||||
multiaddr = Multiaddr(addr_str)
|
||||
except Exception as e:
|
||||
logger.debug(f"Invalid multiaddr format '{addr_str}': {e}")
|
||||
return
|
||||
|
||||
if self.is_dns_addr(multiaddr):
|
||||
resolved_addrs = await resolver.resolve(multiaddr)
|
||||
if resolved_addrs is None:
|
||||
logger.warning(f"DNS resolution returned None for: {addr_str}")
|
||||
return
|
||||
|
||||
peer_id_str = multiaddr.get_peer_id()
|
||||
if peer_id_str is None:
|
||||
logger.warning(f"Missing peer ID in DNS address: {addr_str}")
|
||||
return
|
||||
peer_id = ID.from_base58(peer_id_str)
|
||||
addrs = [addr for addr in resolved_addrs]
|
||||
if not addrs:
|
||||
logger.warning(f"No addresses resolved for DNS address: {addr_str}")
|
||||
return
|
||||
peer_info = PeerInfo(peer_id, addrs)
|
||||
await self.add_addr(peer_info)
|
||||
else:
|
||||
peer_info = info_from_p2p_addr(multiaddr)
|
||||
await self.add_addr(peer_info)
|
||||
except Exception as e:
|
||||
logger.debug(f"Invalid multiaddr format '{addr_str}': {e}")
|
||||
return
|
||||
if self.is_dns_addr(multiaddr):
|
||||
resolved_addrs = await resolver.resolve(multiaddr)
|
||||
peer_id_str = multiaddr.get_peer_id()
|
||||
if peer_id_str is None:
|
||||
logger.warning(f"Missing peer ID in DNS address: {addr_str}")
|
||||
return
|
||||
peer_id = ID.from_base58(peer_id_str)
|
||||
addrs = [addr for addr in resolved_addrs]
|
||||
if not addrs:
|
||||
logger.warning(f"No addresses resolved for DNS address: {addr_str}")
|
||||
return
|
||||
peer_info = PeerInfo(peer_id, addrs)
|
||||
self.add_addr(peer_info)
|
||||
else:
|
||||
self.add_addr(info_from_p2p_addr(multiaddr))
|
||||
logger.warning(f"Failed to process bootstrap address {addr_str}: {e}")
|
||||
|
||||
def is_dns_addr(self, addr: Multiaddr) -> bool:
|
||||
"""Check if the address is a DNS address."""
|
||||
return any(protocol.name == "dnsaddr" for protocol in addr.protocols())
|
||||
|
||||
def add_addr(self, peer_info: PeerInfo) -> None:
|
||||
"""Add a peer to the peerstore and emit discovery event."""
|
||||
async def add_addr(self, peer_info: PeerInfo) -> None:
|
||||
"""
|
||||
Add a peer to the peerstore, emit discovery event,
|
||||
and attempt connection in parallel.
|
||||
"""
|
||||
logger.debug(
|
||||
f"Adding peer {peer_info.peer_id} with {len(peer_info.addrs)} addresses"
|
||||
)
|
||||
|
||||
# Skip if it's our own peer
|
||||
if peer_info.peer_id == self.swarm.get_peer_id():
|
||||
logger.debug(f"Skipping own peer ID: {peer_info.peer_id}")
|
||||
return
|
||||
|
||||
# Always add addresses to peerstore (allows multiple addresses for same peer)
|
||||
self.peerstore.add_addrs(peer_info.peer_id, peer_info.addrs, 10)
|
||||
# Filter addresses to only include IPv4+TCP (only supported protocol)
|
||||
ipv4_tcp_addrs = []
|
||||
filtered_out_addrs = []
|
||||
|
||||
for addr in peer_info.addrs:
|
||||
if self._is_ipv4_tcp_addr(addr):
|
||||
ipv4_tcp_addrs.append(addr)
|
||||
else:
|
||||
filtered_out_addrs.append(addr)
|
||||
|
||||
# Log filtering results
|
||||
logger.debug(
|
||||
f"Address filtering for {peer_info.peer_id}: "
|
||||
f"{len(ipv4_tcp_addrs)} IPv4+TCP, {len(filtered_out_addrs)} filtered"
|
||||
)
|
||||
|
||||
# Skip peer if no IPv4+TCP addresses available
|
||||
if not ipv4_tcp_addrs:
|
||||
logger.warning(
|
||||
f"❌ No IPv4+TCP addresses for {peer_info.peer_id} - "
|
||||
f"skipping connection attempts"
|
||||
)
|
||||
return
|
||||
|
||||
# Add only IPv4+TCP addresses to peerstore
|
||||
self.peerstore.add_addrs(peer_info.peer_id, ipv4_tcp_addrs, PERMANENT_ADDR_TTL)
|
||||
|
||||
# Only emit discovery event if this is the first time we see this peer
|
||||
peer_id_str = str(peer_info.peer_id)
|
||||
@ -89,6 +173,140 @@ class BootstrapDiscovery:
|
||||
self.discovered_peers.add(peer_id_str)
|
||||
# Emit peer discovery event
|
||||
peerDiscovery.emit_peer_discovered(peer_info)
|
||||
logger.debug(f"Peer discovered: {peer_info.peer_id}")
|
||||
logger.info(f"Peer discovered: {peer_info.peer_id}")
|
||||
|
||||
# Connect to peer (parallel across different bootstrap addresses)
|
||||
logger.debug("Connecting to discovered peer...")
|
||||
await self._connect_to_peer(peer_info.peer_id)
|
||||
|
||||
else:
|
||||
logger.debug(f"Additional addresses added for peer: {peer_info.peer_id}")
|
||||
logger.debug(
|
||||
f"Additional addresses added for existing peer: {peer_info.peer_id}"
|
||||
)
|
||||
# Even for existing peers, try to connect if not already connected
|
||||
if peer_info.peer_id not in self.swarm.connections:
|
||||
logger.debug("Connecting to existing peer...")
|
||||
await self._connect_to_peer(peer_info.peer_id)
|
||||
|
||||
async def _connect_to_peer(self, peer_id: ID) -> None:
|
||||
"""
|
||||
Attempt to establish a connection to a peer with timeout.
|
||||
|
||||
Uses swarm.dial_peer to connect using addresses stored in peerstore.
|
||||
Times out after self.connection_timeout seconds to prevent hanging.
|
||||
"""
|
||||
logger.debug(f"Connection attempt for peer: {peer_id}")
|
||||
|
||||
# Pre-connection validation: Check if already connected
|
||||
if peer_id in self.swarm.connections:
|
||||
logger.debug(
|
||||
f"Already connected to {peer_id} - skipping connection attempt"
|
||||
)
|
||||
return
|
||||
|
||||
# Check available addresses before attempting connection
|
||||
available_addrs = self.peerstore.addrs(peer_id)
|
||||
logger.debug(f"Connecting to {peer_id} ({len(available_addrs)} addresses)")
|
||||
|
||||
if not available_addrs:
|
||||
logger.error(f"❌ No addresses available for {peer_id} - cannot connect")
|
||||
return
|
||||
|
||||
# Record start time for connection attempt monitoring
|
||||
connection_start_time = trio.current_time()
|
||||
|
||||
try:
|
||||
with trio.move_on_after(self.connection_timeout):
|
||||
# Log connection attempt
|
||||
logger.debug(
|
||||
f"Attempting connection to {peer_id} using "
|
||||
f"{len(available_addrs)} addresses"
|
||||
)
|
||||
|
||||
# Use swarm.dial_peer to connect using stored addresses
|
||||
await self.swarm.dial_peer(peer_id)
|
||||
|
||||
# Calculate connection time
|
||||
connection_time = trio.current_time() - connection_start_time
|
||||
|
||||
# Post-connection validation: Verify connection was actually established
|
||||
if peer_id in self.swarm.connections:
|
||||
logger.info(
|
||||
f"✅ Connected to {peer_id} (took {connection_time:.2f}s)"
|
||||
)
|
||||
|
||||
else:
|
||||
logger.warning(
|
||||
f"Dial succeeded but connection not found for {peer_id}"
|
||||
)
|
||||
except trio.TooSlowError:
|
||||
logger.warning(
|
||||
f"❌ Connection to {peer_id} timed out after {self.connection_timeout}s"
|
||||
)
|
||||
except SwarmException as e:
|
||||
# Calculate failed connection time
|
||||
failed_connection_time = trio.current_time() - connection_start_time
|
||||
|
||||
# Enhanced error logging
|
||||
error_msg = str(e)
|
||||
if "no addresses established a successful connection" in error_msg:
|
||||
logger.warning(
|
||||
f"❌ Failed to connect to {peer_id} after trying all "
|
||||
f"{len(available_addrs)} addresses "
|
||||
f"(took {failed_connection_time:.2f}s)"
|
||||
)
|
||||
# Log individual address failures if this is a MultiError
|
||||
if (
|
||||
e.__cause__ is not None
|
||||
and hasattr(e.__cause__, "exceptions")
|
||||
and getattr(e.__cause__, "exceptions", None) is not None
|
||||
):
|
||||
exceptions_list = getattr(e.__cause__, "exceptions")
|
||||
logger.debug("📋 Individual address failure details:")
|
||||
for i, addr_exception in enumerate(exceptions_list, 1):
|
||||
logger.debug(f"Address {i}: {addr_exception}")
|
||||
# Also log the actual address that failed
|
||||
if i <= len(available_addrs):
|
||||
logger.debug(f"Failed address: {available_addrs[i - 1]}")
|
||||
else:
|
||||
logger.warning("No detailed exception information available")
|
||||
else:
|
||||
logger.warning(
|
||||
f"❌ Failed to connect to {peer_id}: {e} "
|
||||
f"(took {failed_connection_time:.2f}s)"
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
# Handle unexpected errors that aren't swarm-specific
|
||||
failed_connection_time = trio.current_time() - connection_start_time
|
||||
logger.error(
|
||||
f"❌ Unexpected error connecting to {peer_id}: "
|
||||
f"{e} (took {failed_connection_time:.2f}s)"
|
||||
)
|
||||
# Don't re-raise to prevent killing the nursery and other parallel tasks
|
||||
|
||||
def _is_ipv4_tcp_addr(self, addr: Multiaddr) -> bool:
|
||||
"""
|
||||
Check if address is IPv4 with TCP protocol only.
|
||||
|
||||
Filters out IPv6, UDP, QUIC, WebSocket, and other unsupported protocols.
|
||||
Only IPv4+TCP addresses are supported by the current transport.
|
||||
"""
|
||||
try:
|
||||
protocols = addr.protocols()
|
||||
|
||||
# Must have IPv4 protocol
|
||||
has_ipv4 = any(p.name == "ip4" for p in protocols)
|
||||
if not has_ipv4:
|
||||
return False
|
||||
|
||||
# Must have TCP protocol
|
||||
has_tcp = any(p.name == "tcp" for p in protocols)
|
||||
if not has_tcp:
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
except Exception:
|
||||
# If we can't parse the address, don't use it
|
||||
return False
|
||||
|
||||
@ -1,6 +1,3 @@
|
||||
from ast import (
|
||||
literal_eval,
|
||||
)
|
||||
from collections import (
|
||||
defaultdict,
|
||||
)
|
||||
@ -22,6 +19,7 @@ from libp2p.abc import (
|
||||
IPubsubRouter,
|
||||
)
|
||||
from libp2p.custom_types import (
|
||||
MessageID,
|
||||
TProtocol,
|
||||
)
|
||||
from libp2p.peer.id import (
|
||||
@ -56,6 +54,10 @@ from .pb import (
|
||||
from .pubsub import (
|
||||
Pubsub,
|
||||
)
|
||||
from .utils import (
|
||||
parse_message_id_safe,
|
||||
safe_parse_message_id,
|
||||
)
|
||||
|
||||
PROTOCOL_ID = TProtocol("/meshsub/1.0.0")
|
||||
PROTOCOL_ID_V11 = TProtocol("/meshsub/1.1.0")
|
||||
@ -794,8 +796,8 @@ class GossipSub(IPubsubRouter, Service):
|
||||
|
||||
# Add all unknown message ids (ids that appear in ihave_msg but not in
|
||||
# seen_seqnos) to list of messages we want to request
|
||||
msg_ids_wanted: list[str] = [
|
||||
msg_id
|
||||
msg_ids_wanted: list[MessageID] = [
|
||||
parse_message_id_safe(msg_id)
|
||||
for msg_id in ihave_msg.messageIDs
|
||||
if msg_id not in seen_seqnos_and_peers
|
||||
]
|
||||
@ -811,9 +813,9 @@ class GossipSub(IPubsubRouter, Service):
|
||||
Forwards all request messages that are present in mcache to the
|
||||
requesting peer.
|
||||
"""
|
||||
# FIXME: Update type of message ID
|
||||
# FIXME: Find a better way to parse the msg ids
|
||||
msg_ids: list[Any] = [literal_eval(msg) for msg in iwant_msg.messageIDs]
|
||||
msg_ids: list[tuple[bytes, bytes]] = [
|
||||
safe_parse_message_id(msg) for msg in iwant_msg.messageIDs
|
||||
]
|
||||
msgs_to_forward: list[rpc_pb2.Message] = []
|
||||
for msg_id_iwant in msg_ids:
|
||||
# Check if the wanted message ID is present in mcache
|
||||
|
||||
@ -1,6 +1,10 @@
|
||||
import ast
|
||||
import logging
|
||||
|
||||
from libp2p.abc import IHost
|
||||
from libp2p.custom_types import (
|
||||
MessageID,
|
||||
)
|
||||
from libp2p.peer.envelope import consume_envelope
|
||||
from libp2p.peer.id import ID
|
||||
from libp2p.pubsub.pb.rpc_pb2 import RPC
|
||||
@ -48,3 +52,29 @@ def maybe_consume_signed_record(msg: RPC, host: IHost, peer_id: ID) -> bool:
|
||||
logger.error("Failed to update the Certified-Addr-Book: %s", e)
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def parse_message_id_safe(msg_id_str: str) -> MessageID:
|
||||
"""Safely handle message ID as string."""
|
||||
return MessageID(msg_id_str)
|
||||
|
||||
|
||||
def safe_parse_message_id(msg_id_str: str) -> tuple[bytes, bytes]:
|
||||
"""
|
||||
Safely parse message ID using ast.literal_eval with validation.
|
||||
:param msg_id_str: String representation of message ID
|
||||
:return: Tuple of (seqno, from_id) as bytes
|
||||
:raises ValueError: If parsing fails
|
||||
"""
|
||||
try:
|
||||
parsed = ast.literal_eval(msg_id_str)
|
||||
if not isinstance(parsed, tuple) or len(parsed) != 2:
|
||||
raise ValueError("Invalid message ID format")
|
||||
|
||||
seqno, from_id = parsed
|
||||
if not isinstance(seqno, bytes) or not isinstance(from_id, bytes):
|
||||
raise ValueError("Message ID components must be bytes")
|
||||
|
||||
return (seqno, from_id)
|
||||
except (ValueError, SyntaxError) as e:
|
||||
raise ValueError(f"Invalid message ID format: {e}")
|
||||
|
||||
@ -1,7 +1,4 @@
|
||||
import atexit
|
||||
from datetime import (
|
||||
datetime,
|
||||
)
|
||||
import logging
|
||||
import logging.handlers
|
||||
import os
|
||||
@ -21,6 +18,9 @@ log_queue: "queue.Queue[Any]" = queue.Queue()
|
||||
# Store the current listener to stop it on exit
|
||||
_current_listener: logging.handlers.QueueListener | None = None
|
||||
|
||||
# Store the handlers for proper cleanup
|
||||
_current_handlers: list[logging.Handler] = []
|
||||
|
||||
# Event to track when the listener is ready
|
||||
_listener_ready = threading.Event()
|
||||
|
||||
@ -95,7 +95,7 @@ def setup_logging() -> None:
|
||||
- Child loggers inherit their parent's level unless explicitly set
|
||||
- The root libp2p logger controls the default level
|
||||
"""
|
||||
global _current_listener, _listener_ready
|
||||
global _current_listener, _listener_ready, _current_handlers
|
||||
|
||||
# Reset the event
|
||||
_listener_ready.clear()
|
||||
@ -105,6 +105,12 @@ def setup_logging() -> None:
|
||||
_current_listener.stop()
|
||||
_current_listener = None
|
||||
|
||||
# Close and clear existing handlers
|
||||
for handler in _current_handlers:
|
||||
if isinstance(handler, logging.FileHandler):
|
||||
handler.close()
|
||||
_current_handlers.clear()
|
||||
|
||||
# Get the log level from environment variable
|
||||
debug_str = os.environ.get("LIBP2P_DEBUG", "")
|
||||
|
||||
@ -148,13 +154,10 @@ def setup_logging() -> None:
|
||||
log_path = Path(log_file)
|
||||
log_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
else:
|
||||
# Default log file with timestamp and unique identifier
|
||||
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S_%f")
|
||||
unique_id = os.urandom(4).hex() # Add a unique identifier to prevent collisions
|
||||
if os.name == "nt": # Windows
|
||||
log_file = f"C:\\Windows\\Temp\\py-libp2p_{timestamp}_{unique_id}.log"
|
||||
else: # Unix-like
|
||||
log_file = f"/tmp/py-libp2p_{timestamp}_{unique_id}.log"
|
||||
# Use cross-platform temp file creation
|
||||
from libp2p.utils.paths import create_temp_file
|
||||
|
||||
log_file = str(create_temp_file(prefix="py-libp2p_", suffix=".log"))
|
||||
|
||||
# Print the log file path so users know where to find it
|
||||
print(f"Logging to: {log_file}", file=sys.stderr)
|
||||
@ -195,6 +198,9 @@ def setup_logging() -> None:
|
||||
logger.setLevel(level)
|
||||
logger.propagate = False # Prevent message duplication
|
||||
|
||||
# Store handlers globally for cleanup
|
||||
_current_handlers.extend(handlers)
|
||||
|
||||
# Start the listener AFTER configuring all loggers
|
||||
_current_listener = logging.handlers.QueueListener(
|
||||
log_queue, *handlers, respect_handler_level=True
|
||||
@ -209,7 +215,13 @@ def setup_logging() -> None:
|
||||
@atexit.register
|
||||
def cleanup_logging() -> None:
|
||||
"""Clean up logging resources on exit."""
|
||||
global _current_listener
|
||||
global _current_listener, _current_handlers
|
||||
if _current_listener is not None:
|
||||
_current_listener.stop()
|
||||
_current_listener = None
|
||||
|
||||
# Close all file handlers to ensure proper cleanup on Windows
|
||||
for handler in _current_handlers:
|
||||
if isinstance(handler, logging.FileHandler):
|
||||
handler.close()
|
||||
_current_handlers.clear()
|
||||
|
||||
267
libp2p/utils/paths.py
Normal file
267
libp2p/utils/paths.py
Normal file
@ -0,0 +1,267 @@
|
||||
"""
|
||||
Cross-platform path utilities for py-libp2p.
|
||||
|
||||
This module provides standardized path operations to ensure consistent
|
||||
behavior across Windows, macOS, and Linux platforms.
|
||||
"""
|
||||
|
||||
import os
|
||||
from pathlib import Path
|
||||
import sys
|
||||
import tempfile
|
||||
from typing import Union
|
||||
|
||||
PathLike = Union[str, Path]
|
||||
|
||||
|
||||
def get_temp_dir() -> Path:
|
||||
"""
|
||||
Get cross-platform temporary directory.
|
||||
|
||||
Returns:
|
||||
Path: Platform-specific temporary directory path
|
||||
|
||||
"""
|
||||
return Path(tempfile.gettempdir())
|
||||
|
||||
|
||||
def get_project_root() -> Path:
|
||||
"""
|
||||
Get the project root directory.
|
||||
|
||||
Returns:
|
||||
Path: Path to the py-libp2p project root
|
||||
|
||||
"""
|
||||
# Navigate from libp2p/utils/paths.py to project root
|
||||
return Path(__file__).parent.parent.parent
|
||||
|
||||
|
||||
def join_paths(*parts: PathLike) -> Path:
|
||||
"""
|
||||
Cross-platform path joining.
|
||||
|
||||
Args:
|
||||
*parts: Path components to join
|
||||
|
||||
Returns:
|
||||
Path: Joined path using platform-appropriate separator
|
||||
|
||||
"""
|
||||
return Path(*parts)
|
||||
|
||||
|
||||
def ensure_dir_exists(path: PathLike) -> Path:
|
||||
"""
|
||||
Ensure directory exists, create if needed.
|
||||
|
||||
Args:
|
||||
path: Directory path to ensure exists
|
||||
|
||||
Returns:
|
||||
Path: Path object for the directory
|
||||
|
||||
"""
|
||||
path_obj = Path(path)
|
||||
path_obj.mkdir(parents=True, exist_ok=True)
|
||||
return path_obj
|
||||
|
||||
|
||||
def get_config_dir() -> Path:
|
||||
"""
|
||||
Get user config directory (cross-platform).
|
||||
|
||||
Returns:
|
||||
Path: Platform-specific config directory
|
||||
|
||||
"""
|
||||
if os.name == "nt": # Windows
|
||||
appdata = os.environ.get("APPDATA", "")
|
||||
if appdata:
|
||||
return Path(appdata) / "py-libp2p"
|
||||
else:
|
||||
# Fallback to user home directory
|
||||
return Path.home() / "AppData" / "Roaming" / "py-libp2p"
|
||||
else: # Unix-like (Linux, macOS)
|
||||
return Path.home() / ".config" / "py-libp2p"
|
||||
|
||||
|
||||
def get_script_dir(script_path: PathLike | None = None) -> Path:
|
||||
"""
|
||||
Get the directory containing a script file.
|
||||
|
||||
Args:
|
||||
script_path: Path to the script file. If None, uses __file__
|
||||
|
||||
Returns:
|
||||
Path: Directory containing the script
|
||||
|
||||
Raises:
|
||||
RuntimeError: If script path cannot be determined
|
||||
|
||||
"""
|
||||
if script_path is None:
|
||||
# This will be the directory of the calling script
|
||||
import inspect
|
||||
|
||||
frame = inspect.currentframe()
|
||||
if frame and frame.f_back:
|
||||
script_path = frame.f_back.f_globals.get("__file__")
|
||||
else:
|
||||
raise RuntimeError("Could not determine script path")
|
||||
|
||||
if script_path is None:
|
||||
raise RuntimeError("Script path is None")
|
||||
|
||||
return Path(script_path).parent.absolute()
|
||||
|
||||
|
||||
def create_temp_file(prefix: str = "py-libp2p_", suffix: str = ".log") -> Path:
|
||||
"""
|
||||
Create a temporary file with a unique name.
|
||||
|
||||
Args:
|
||||
prefix: File name prefix
|
||||
suffix: File name suffix
|
||||
|
||||
Returns:
|
||||
Path: Path to the created temporary file
|
||||
|
||||
"""
|
||||
temp_dir = get_temp_dir()
|
||||
# Create a unique filename using timestamp and random bytes
|
||||
import secrets
|
||||
import time
|
||||
|
||||
timestamp = time.strftime("%Y%m%d_%H%M%S")
|
||||
microseconds = f"{time.time() % 1:.6f}"[2:] # Get microseconds as string
|
||||
unique_id = secrets.token_hex(4)
|
||||
filename = f"{prefix}{timestamp}_{microseconds}_{unique_id}{suffix}"
|
||||
|
||||
temp_file = temp_dir / filename
|
||||
# Create the file by touching it
|
||||
temp_file.touch()
|
||||
return temp_file
|
||||
|
||||
|
||||
def resolve_relative_path(base_path: PathLike, relative_path: PathLike) -> Path:
|
||||
"""
|
||||
Resolve a relative path from a base path.
|
||||
|
||||
Args:
|
||||
base_path: Base directory path
|
||||
relative_path: Relative path to resolve
|
||||
|
||||
Returns:
|
||||
Path: Resolved absolute path
|
||||
|
||||
"""
|
||||
base = Path(base_path).resolve()
|
||||
relative = Path(relative_path)
|
||||
|
||||
if relative.is_absolute():
|
||||
return relative
|
||||
else:
|
||||
return (base / relative).resolve()
|
||||
|
||||
|
||||
def normalize_path(path: PathLike) -> Path:
|
||||
"""
|
||||
Normalize a path, resolving any symbolic links and relative components.
|
||||
|
||||
Args:
|
||||
path: Path to normalize
|
||||
|
||||
Returns:
|
||||
Path: Normalized absolute path
|
||||
|
||||
"""
|
||||
return Path(path).resolve()
|
||||
|
||||
|
||||
def get_venv_path() -> Path | None:
|
||||
"""
|
||||
Get virtual environment path if active.
|
||||
|
||||
Returns:
|
||||
Path: Virtual environment path if active, None otherwise
|
||||
|
||||
"""
|
||||
venv_path = os.environ.get("VIRTUAL_ENV")
|
||||
if venv_path:
|
||||
return Path(venv_path)
|
||||
return None
|
||||
|
||||
|
||||
def get_python_executable() -> Path:
|
||||
"""
|
||||
Get current Python executable path.
|
||||
|
||||
Returns:
|
||||
Path: Path to the current Python executable
|
||||
|
||||
"""
|
||||
return Path(sys.executable)
|
||||
|
||||
|
||||
def find_executable(name: str) -> Path | None:
|
||||
"""
|
||||
Find executable in system PATH.
|
||||
|
||||
Args:
|
||||
name: Name of the executable to find
|
||||
|
||||
Returns:
|
||||
Path: Path to executable if found, None otherwise
|
||||
|
||||
"""
|
||||
# Check if name already contains path
|
||||
if os.path.dirname(name):
|
||||
path = Path(name)
|
||||
if path.exists() and os.access(path, os.X_OK):
|
||||
return path
|
||||
return None
|
||||
|
||||
# Search in PATH
|
||||
for path_dir in os.environ.get("PATH", "").split(os.pathsep):
|
||||
if not path_dir:
|
||||
continue
|
||||
path = Path(path_dir) / name
|
||||
if path.exists() and os.access(path, os.X_OK):
|
||||
return path
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def get_script_binary_path() -> Path:
|
||||
"""
|
||||
Get path to script's binary directory.
|
||||
|
||||
Returns:
|
||||
Path: Directory containing the script's binary
|
||||
|
||||
"""
|
||||
return get_python_executable().parent
|
||||
|
||||
|
||||
def get_binary_path(binary_name: str) -> Path | None:
|
||||
"""
|
||||
Find binary in PATH or virtual environment.
|
||||
|
||||
Args:
|
||||
binary_name: Name of the binary to find
|
||||
|
||||
Returns:
|
||||
Path: Path to binary if found, None otherwise
|
||||
|
||||
"""
|
||||
# First check in virtual environment if active
|
||||
venv_path = get_venv_path()
|
||||
if venv_path:
|
||||
venv_bin = venv_path / "bin" if os.name != "nt" else venv_path / "Scripts"
|
||||
binary_path = venv_bin / binary_name
|
||||
if binary_path.exists() and os.access(binary_path, os.X_OK):
|
||||
return binary_path
|
||||
|
||||
# Fall back to system PATH
|
||||
return find_executable(binary_name)
|
||||
Reference in New Issue
Block a user