Merge upstream/main into add-ws-transport

Resolved conflicts in:
- .gitignore: Combined JavaScript interop and Sphinx build ignores
- libp2p/__init__.py: Integrated QUIC transport support with WebSocket transport
- libp2p/network/swarm.py: Used upstream's improved listener handling
- pyproject.toml: Kept both WebSocket and QUIC dependencies

This merge brings in:
- QUIC transport implementation
- Enhanced swarm functionality
- Improved peer discovery
- Better error handling
- Updated dependencies and documentation

WebSocket transport implementation remains intact and functional.
This commit is contained in:
acul71
2025-09-07 23:47:41 +02:00
105 changed files with 13904 additions and 730 deletions

View File

@ -0,0 +1,63 @@
"""
Advanced demonstration of Thin Waist address handling.
Run:
python -m examples.advanced.network_discovery
"""
from __future__ import annotations
from multiaddr import Multiaddr
try:
from libp2p.utils.address_validation import (
expand_wildcard_address,
get_available_interfaces,
get_optimal_binding_address,
)
except ImportError:
# Fallbacks if utilities are missing
def get_available_interfaces(port: int, protocol: str = "tcp"):
return [Multiaddr(f"/ip4/0.0.0.0/{protocol}/{port}")]
def expand_wildcard_address(addr: Multiaddr, port: int | None = None):
if port is None:
return [addr]
addr_str = str(addr).rsplit("/", 1)[0]
return [Multiaddr(addr_str + f"/{port}")]
def get_optimal_binding_address(port: int, protocol: str = "tcp"):
return Multiaddr(f"/ip4/0.0.0.0/{protocol}/{port}")
def main() -> None:
port = 8080
interfaces = get_available_interfaces(port)
print(f"Discovered interfaces for port {port}:")
for a in interfaces:
print(f" - {a}")
wildcard_v4 = Multiaddr(f"/ip4/0.0.0.0/tcp/{port}")
expanded_v4 = expand_wildcard_address(wildcard_v4)
print("\nExpanded IPv4 wildcard:")
for a in expanded_v4:
print(f" - {a}")
wildcard_v6 = Multiaddr(f"/ip6/::/tcp/{port}")
expanded_v6 = expand_wildcard_address(wildcard_v6)
print("\nExpanded IPv6 wildcard:")
for a in expanded_v6:
print(f" - {a}")
print("\nOptimal binding address heuristic result:")
print(f" -> {get_optimal_binding_address(port)}")
override_port = 9000
overridden = expand_wildcard_address(wildcard_v4, port=override_port)
print(f"\nPort override expansion to {override_port}:")
for a in overridden:
print(f" - {a}")
if __name__ == "__main__":
main()

View File

@ -0,0 +1,35 @@
import secrets
import multiaddr
import trio
from libp2p import (
new_host,
)
from libp2p.crypto.secp256k1 import (
create_new_key_pair,
)
async def main():
# Create a key pair for the host
secret = secrets.token_bytes(32)
key_pair = create_new_key_pair(secret)
# Create a host with the key pair
host = new_host(key_pair=key_pair, enable_quic=True)
# Configure the listening address
port = 8000
listen_addr = multiaddr.Multiaddr(f"/ip4/0.0.0.0/udp/{port}/quic-v1")
# Start the host
async with host.run(listen_addrs=[listen_addr]):
print("libp2p has started with QUIC transport")
print("libp2p is listening on:", host.get_addrs())
# Keep the host running
await trio.sleep_forever()
# Run the async function
trio.run(main)

View File

@ -0,0 +1,170 @@
#!/usr/bin/env python3
"""
Example demonstrating multiple connections per peer support in libp2p.
This example shows how to:
1. Configure multiple connections per peer
2. Use different load balancing strategies
3. Access multiple connections through the new API
4. Maintain backward compatibility
"""
import logging
import trio
from libp2p import new_swarm
from libp2p.network.swarm import ConnectionConfig, RetryConfig
# Set up logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
async def example_basic_multiple_connections() -> None:
"""Example of basic multiple connections per peer usage."""
logger.info("Creating swarm with multiple connections support...")
# Create swarm with default configuration
swarm = new_swarm()
default_connection = ConnectionConfig()
logger.info(f"Swarm created with peer ID: {swarm.get_peer_id()}")
logger.info(
f"Connection config: max_connections_per_peer="
f"{default_connection.max_connections_per_peer}"
)
await swarm.close()
logger.info("Basic multiple connections example completed")
async def example_custom_connection_config() -> None:
"""Example of custom connection configuration."""
logger.info("Creating swarm with custom connection configuration...")
# Custom connection configuration for high-performance scenarios
connection_config = ConnectionConfig(
max_connections_per_peer=5, # More connections per peer
connection_timeout=60.0, # Longer timeout
load_balancing_strategy="least_loaded", # Use least loaded strategy
)
# Create swarm with custom connection config
swarm = new_swarm(connection_config=connection_config)
logger.info("Custom connection config applied:")
logger.info(
f" Max connections per peer: {connection_config.max_connections_per_peer}"
)
logger.info(f" Connection timeout: {connection_config.connection_timeout}s")
logger.info(
f" Load balancing strategy: {connection_config.load_balancing_strategy}"
)
await swarm.close()
logger.info("Custom connection config example completed")
async def example_multiple_connections_api() -> None:
"""Example of using the new multiple connections API."""
logger.info("Demonstrating multiple connections API...")
connection_config = ConnectionConfig(
max_connections_per_peer=3, load_balancing_strategy="round_robin"
)
swarm = new_swarm(connection_config=connection_config)
logger.info("Multiple connections API features:")
logger.info(" - dial_peer() returns list[INetConn]")
logger.info(" - get_connections(peer_id) returns list[INetConn]")
logger.info(" - get_connections_map() returns dict[ID, list[INetConn]]")
logger.info(
" - get_connection(peer_id) returns INetConn | None (backward compatibility)"
)
await swarm.close()
logger.info("Multiple connections API example completed")
async def example_backward_compatibility() -> None:
"""Example of backward compatibility features."""
logger.info("Demonstrating backward compatibility...")
swarm = new_swarm()
logger.info("Backward compatibility features:")
logger.info(" - connections_legacy property provides 1:1 mapping")
logger.info(" - get_connection() method for single connection access")
logger.info(" - Existing code continues to work")
await swarm.close()
logger.info("Backward compatibility example completed")
async def example_production_ready_config() -> None:
"""Example of production-ready configuration."""
logger.info("Creating swarm with production-ready configuration...")
# Production-ready retry configuration
retry_config = RetryConfig(
max_retries=3, # Reasonable retry limit
initial_delay=0.1, # Quick initial retry
max_delay=30.0, # Cap exponential backoff
backoff_multiplier=2.0, # Standard exponential backoff
jitter_factor=0.1, # Small jitter to prevent thundering herd
)
# Production-ready connection configuration
connection_config = ConnectionConfig(
max_connections_per_peer=3, # Balance between performance and resource usage
connection_timeout=30.0, # Reasonable timeout
load_balancing_strategy="round_robin", # Simple, predictable strategy
)
# Create swarm with production config
swarm = new_swarm(retry_config=retry_config, connection_config=connection_config)
logger.info("Production-ready configuration applied:")
logger.info(
f" Retry: {retry_config.max_retries} retries, "
f"{retry_config.max_delay}s max delay"
)
logger.info(f" Connections: {connection_config.max_connections_per_peer} per peer")
logger.info(f" Load balancing: {connection_config.load_balancing_strategy}")
await swarm.close()
logger.info("Production-ready configuration example completed")
async def main() -> None:
"""Run all examples."""
logger.info("Multiple Connections Per Peer Examples")
logger.info("=" * 50)
try:
await example_basic_multiple_connections()
logger.info("-" * 30)
await example_custom_connection_config()
logger.info("-" * 30)
await example_multiple_connections_api()
logger.info("-" * 30)
await example_backward_compatibility()
logger.info("-" * 30)
await example_production_ready_config()
logger.info("-" * 30)
logger.info("All examples completed successfully!")
except Exception as e:
logger.error(f"Example failed: {e}")
raise
if __name__ == "__main__":
trio.run(main)

View File

@ -1,4 +1,6 @@
import argparse
import random
import secrets
import multiaddr
import trio
@ -12,40 +14,54 @@ from libp2p.crypto.secp256k1 import (
from libp2p.custom_types import (
TProtocol,
)
from libp2p.network.stream.exceptions import (
StreamEOF,
)
from libp2p.network.stream.net_stream import (
INetStream,
)
from libp2p.peer.peerinfo import (
info_from_p2p_addr,
)
from libp2p.utils.address_validation import (
find_free_port,
get_available_interfaces,
)
PROTOCOL_ID = TProtocol("/echo/1.0.0")
MAX_READ_LEN = 2**32 - 1
async def _echo_stream_handler(stream: INetStream) -> None:
# Wait until EOF
msg = await stream.read(MAX_READ_LEN)
await stream.write(msg)
await stream.close()
try:
peer_id = stream.muxed_conn.peer_id
print(f"Received connection from {peer_id}")
# Wait until EOF
msg = await stream.read(MAX_READ_LEN)
print(f"Echoing message: {msg.decode('utf-8')}")
await stream.write(msg)
except StreamEOF:
print("Stream closed by remote peer.")
except Exception as e:
print(f"Error in echo handler: {e}")
finally:
await stream.close()
async def run(port: int, destination: str, seed: int | None = None) -> None:
listen_addr = multiaddr.Multiaddr(f"/ip4/0.0.0.0/tcp/{port}")
if port <= 0:
port = find_free_port()
listen_addr = get_available_interfaces(port)
if seed:
import random
random.seed(seed)
secret_number = random.getrandbits(32 * 8)
secret = secret_number.to_bytes(length=32, byteorder="big")
else:
import secrets
secret = secrets.token_bytes(32)
host = new_host(key_pair=create_new_key_pair(secret))
async with host.run(listen_addrs=[listen_addr]), trio.open_nursery() as nursery:
async with host.run(listen_addrs=listen_addr), trio.open_nursery() as nursery:
# Start the peer-store cleanup task
nursery.start_soon(host.get_peerstore().start_cleanup_task, 60)
@ -54,10 +70,15 @@ async def run(port: int, destination: str, seed: int | None = None) -> None:
if not destination: # its the server
host.set_stream_handler(PROTOCOL_ID, _echo_stream_handler)
# Print all listen addresses with peer ID (JS parity)
print("Listener ready, listening on:\n")
peer_id = host.get_id().to_string()
for addr in listen_addr:
print(f"{addr}/p2p/{peer_id}")
print(
"Run this from the same folder in another console:\n\n"
f"echo-demo "
f"-d {host.get_addrs()[0]}\n"
"\nRun this from the same folder in another console:\n\n"
f"echo-demo -d {host.get_addrs()[0]}\n"
)
print("Waiting for incoming connections...")
await trio.sleep_forever()

178
examples/echo/echo_quic.py Normal file
View File

@ -0,0 +1,178 @@
#!/usr/bin/env python3
"""
QUIC Echo Example - Fixed version with proper client/server separation
This program demonstrates a simple echo protocol using QUIC transport where a peer
listens for connections and copies back any input received on a stream.
Fixed to properly separate client and server modes - clients don't start listeners.
"""
import argparse
import logging
from multiaddr import Multiaddr
import trio
from libp2p import new_host
from libp2p.crypto.secp256k1 import create_new_key_pair
from libp2p.custom_types import TProtocol
from libp2p.network.stream.net_stream import INetStream
from libp2p.peer.peerinfo import info_from_p2p_addr
PROTOCOL_ID = TProtocol("/echo/1.0.0")
async def _echo_stream_handler(stream: INetStream) -> None:
try:
msg = await stream.read()
await stream.write(msg)
await stream.close()
except Exception as e:
print(f"Echo handler error: {e}")
try:
await stream.close()
except: # noqa: E722
pass
async def run_server(port: int, seed: int | None = None) -> None:
"""Run echo server with QUIC transport."""
listen_addr = Multiaddr(f"/ip4/0.0.0.0/udp/{port}/quic")
if seed:
import random
random.seed(seed)
secret_number = random.getrandbits(32 * 8)
secret = secret_number.to_bytes(length=32, byteorder="big")
else:
import secrets
secret = secrets.token_bytes(32)
# Create host with QUIC transport
host = new_host(
enable_quic=True,
key_pair=create_new_key_pair(secret),
)
# Server mode: start listener
async with host.run(listen_addrs=[listen_addr]):
try:
print(f"I am {host.get_id().to_string()}")
host.set_stream_handler(PROTOCOL_ID, _echo_stream_handler)
print(
"Run this from the same folder in another console:\n\n"
f"python3 ./examples/echo/echo_quic.py "
f"-d {host.get_addrs()[0]}\n"
)
print("Waiting for incoming QUIC connections...")
await trio.sleep_forever()
except KeyboardInterrupt:
print("Closing server gracefully...")
await host.close()
return
async def run_client(destination: str, seed: int | None = None) -> None:
"""Run echo client with QUIC transport."""
if seed:
import random
random.seed(seed)
secret_number = random.getrandbits(32 * 8)
secret = secret_number.to_bytes(length=32, byteorder="big")
else:
import secrets
secret = secrets.token_bytes(32)
# Create host with QUIC transport
host = new_host(
enable_quic=True,
key_pair=create_new_key_pair(secret),
)
# Client mode: NO listener, just connect
async with host.run(listen_addrs=[]): # Empty listen_addrs for client
print(f"I am {host.get_id().to_string()}")
maddr = Multiaddr(destination)
info = info_from_p2p_addr(maddr)
# Connect to server
print("STARTING CLIENT CONNECTION PROCESS")
await host.connect(info)
print("CLIENT CONNECTED TO SERVER")
# Start a stream with the destination
stream = await host.new_stream(info.peer_id, [PROTOCOL_ID])
msg = b"hi, there!\n"
await stream.write(msg)
response = await stream.read()
print(f"Sent: {msg.decode('utf-8')}")
print(f"Got: {response.decode('utf-8')}")
await stream.close()
await host.disconnect(info.peer_id)
async def run(port: int, destination: str, seed: int | None = None) -> None:
"""
Run echo server or client with QUIC transport.
Fixed version that properly separates client and server modes.
"""
if not destination: # Server mode
await run_server(port, seed)
else: # Client mode
await run_client(destination, seed)
def main() -> None:
"""Main function - help text updated for QUIC."""
description = """
This program demonstrates a simple echo protocol using QUIC
transport where a peer listens for connections and copies back
any input received on a stream.
QUIC provides built-in TLS security and stream multiplexing over UDP.
To use it, first run 'echo-quic-demo -p <PORT>', where <PORT> is
the UDP port number. Then, run another host with ,
'echo-quic-demo -d <DESTINATION>'
where <DESTINATION> is the QUIC multiaddress of the previous listener host.
"""
example_maddr = "/ip4/127.0.0.1/udp/8000/quic/p2p/QmQn4SwGkDZKkUEpBRBv"
parser = argparse.ArgumentParser(description=description)
parser.add_argument("-p", "--port", default=0, type=int, help="UDP port number")
parser.add_argument(
"-d",
"--destination",
type=str,
help=f"destination multiaddr string, e.g. {example_maddr}",
)
parser.add_argument(
"-s",
"--seed",
type=int,
help="provide a seed to the random number generator",
)
args = parser.parse_args()
try:
trio.run(run, args.port, args.destination, args.seed)
except KeyboardInterrupt:
pass
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
logging.getLogger("aioquic").setLevel(logging.DEBUG)
main()

View File

@ -41,6 +41,7 @@ from libp2p.tools.async_service import (
from libp2p.tools.utils import (
info_from_p2p_addr,
)
from libp2p.utils.paths import get_script_dir, join_paths
# Configure logging
logging.basicConfig(
@ -53,8 +54,8 @@ logger = logging.getLogger("kademlia-example")
# Configure DHT module loggers to inherit from the parent logger
# This ensures all kademlia-example.* loggers use the same configuration
# Get the directory where this script is located
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
SERVER_ADDR_LOG = os.path.join(SCRIPT_DIR, "server_node_addr.txt")
SCRIPT_DIR = get_script_dir(__file__)
SERVER_ADDR_LOG = join_paths(SCRIPT_DIR, "server_node_addr.txt")
# Set the level for all child loggers
for module in [

View File

@ -1,6 +1,5 @@
import argparse
import logging
import socket
import base58
import multiaddr
@ -31,6 +30,9 @@ from libp2p.stream_muxer.mplex.mplex import (
from libp2p.tools.async_service.trio_service import (
background_trio_service,
)
from libp2p.utils.address_validation import (
find_free_port,
)
# Configure logging
logging.basicConfig(
@ -77,13 +79,6 @@ async def publish_loop(pubsub, topic, termination_event):
await trio.sleep(1) # Avoid tight loop on error
def find_free_port():
"""Find a free port on localhost."""
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.bind(("", 0)) # Bind to a free port provided by the OS
return s.getsockname()[1]
async def monitor_peer_topics(pubsub, nursery, termination_event):
"""
Monitor for new topics that peers are subscribed to and