Merge branch 'main' into keyerror-fix

This commit is contained in:
Manu Sheel Gupta
2025-09-22 01:56:52 +05:30
committed by GitHub
48 changed files with 9477 additions and 100 deletions

View File

@ -1,5 +1,11 @@
"""Libp2p Python implementation."""
import logging
from libp2p.transport.quic.utils import is_quic_multiaddr
from typing import Any
from libp2p.transport.quic.transport import QUICTransport
from libp2p.transport.quic.config import QUICTransportConfig
from collections.abc import (
Mapping,
Sequence,
@ -38,10 +44,12 @@ from libp2p.host.routed_host import (
RoutedHost,
)
from libp2p.network.swarm import (
ConnectionConfig,
RetryConfig,
Swarm,
)
from libp2p.network.config import (
ConnectionConfig,
RetryConfig
)
from libp2p.peer.id import (
ID,
)
@ -87,6 +95,7 @@ MUXER_YAMUX = "YAMUX"
MUXER_MPLEX = "MPLEX"
DEFAULT_NEGOTIATE_TIMEOUT = 5
logger = logging.getLogger(__name__)
def set_default_muxer(muxer_name: Literal["YAMUX", "MPLEX"]) -> None:
"""
@ -162,8 +171,9 @@ def new_swarm(
peerstore_opt: IPeerStore | None = None,
muxer_preference: Literal["YAMUX", "MPLEX"] | None = None,
listen_addrs: Sequence[multiaddr.Multiaddr] | None = None,
enable_quic: bool = False,
retry_config: Optional["RetryConfig"] = None,
connection_config: Optional["ConnectionConfig"] = None,
connection_config: ConnectionConfig | QUICTransportConfig | None = None,
) -> INetworkService:
"""
Create a swarm instance based on the parameters.
@ -174,6 +184,8 @@ def new_swarm(
:param peerstore_opt: optional peerstore
:param muxer_preference: optional explicit muxer preference
:param listen_addrs: optional list of multiaddrs to listen on
:param enable_quic: enable quic for transport
:param quic_transport_opt: options for transport
:return: return a default swarm instance
Note: Yamux (/yamux/1.0.0) is the preferred stream multiplexer
@ -186,14 +198,21 @@ def new_swarm(
id_opt = generate_peer_id_from(key_pair)
transport: TCP | QUICTransport
quic_transport_opt = connection_config if isinstance(connection_config, QUICTransportConfig) else None
if listen_addrs is None:
transport = TCP()
if enable_quic:
transport = QUICTransport(key_pair.private_key, config=quic_transport_opt)
else:
transport = TCP()
else:
addr = listen_addrs[0]
is_quic = is_quic_multiaddr(addr)
if addr.__contains__("tcp"):
transport = TCP()
elif addr.__contains__("quic"):
raise ValueError("QUIC not yet supported")
elif is_quic:
transport = QUICTransport(key_pair.private_key, config=quic_transport_opt)
else:
raise ValueError(f"Unknown transport in listen_addrs: {listen_addrs}")
@ -261,6 +280,8 @@ def new_host(
enable_mDNS: bool = False,
bootstrap: list[str] | None = None,
negotiate_timeout: int = DEFAULT_NEGOTIATE_TIMEOUT,
enable_quic: bool = False,
quic_transport_opt: QUICTransportConfig | None = None,
) -> IHost:
"""
Create a new libp2p host based on the given parameters.
@ -274,15 +295,23 @@ def new_host(
:param listen_addrs: optional list of multiaddrs to listen on
:param enable_mDNS: whether to enable mDNS discovery
:param bootstrap: optional list of bootstrap peer addresses as strings
:param enable_quic: optinal choice to use QUIC for transport
:param transport_opt: optional configuration for quic transport
:return: return a host instance
"""
if not enable_quic and quic_transport_opt is not None:
logger.warning(f"QUIC config provided but QUIC not enabled, ignoring QUIC config")
swarm = new_swarm(
enable_quic=enable_quic,
key_pair=key_pair,
muxer_opt=muxer_opt,
sec_opt=sec_opt,
peerstore_opt=peerstore_opt,
muxer_preference=muxer_preference,
listen_addrs=listen_addrs,
connection_config=quic_transport_opt if enable_quic else None
)
if disc_opt is not None:

View File

@ -5,17 +5,17 @@ from collections.abc import (
)
from typing import TYPE_CHECKING, NewType, Union, cast
from libp2p.transport.quic.stream import QUICStream
if TYPE_CHECKING:
from libp2p.abc import (
IMuxedConn,
INetStream,
ISecureTransport,
)
from libp2p.abc import IMuxedConn, IMuxedStream, INetStream, ISecureTransport
from libp2p.transport.quic.connection import QUICConnection
else:
IMuxedConn = cast(type, object)
INetStream = cast(type, object)
ISecureTransport = cast(type, object)
IMuxedStream = cast(type, object)
QUICConnection = cast(type, object)
from libp2p.io.abc import (
ReadWriteCloser,
@ -37,4 +37,6 @@ SyncValidatorFn = Callable[[ID, rpc_pb2.Message], bool]
AsyncValidatorFn = Callable[[ID, rpc_pb2.Message], Awaitable[bool]]
ValidatorFn = Union[SyncValidatorFn, AsyncValidatorFn]
UnsubscribeFn = Callable[[], Awaitable[None]]
TQUICStreamHandlerFn = Callable[[QUICStream], Awaitable[None]]
TQUICConnHandlerFn = Callable[[QUICConnection], Awaitable[None]]
MessageID = NewType("MessageID", str)

View File

@ -213,7 +213,6 @@ class BasicHost(IHost):
self,
peer_id: ID,
protocol_ids: Sequence[TProtocol],
negotitate_timeout: int = DEFAULT_NEGOTIATE_TIMEOUT,
) -> INetStream:
"""
:param peer_id: peer_id that host is connecting
@ -227,7 +226,7 @@ class BasicHost(IHost):
selected_protocol = await self.multiselect_client.select_one_of(
list(protocol_ids),
MultiselectCommunicator(net_stream),
negotitate_timeout,
self.negotiate_timeout,
)
except MultiselectClientError as error:
logger.debug("fail to open a stream to peer %s, error=%s", peer_id, error)

70
libp2p/network/config.py Normal file
View File

@ -0,0 +1,70 @@
from dataclasses import dataclass
@dataclass
class RetryConfig:
"""
Configuration for retry logic with exponential backoff.
This configuration controls how connection attempts are retried when they fail.
The retry mechanism uses exponential backoff with jitter to prevent thundering
herd problems in distributed systems.
Attributes:
max_retries: Maximum number of retry attempts before giving up.
Default: 3 attempts
initial_delay: Initial delay in seconds before the first retry.
Default: 0.1 seconds (100ms)
max_delay: Maximum delay cap in seconds to prevent excessive wait times.
Default: 30.0 seconds
backoff_multiplier: Multiplier for exponential backoff (each retry multiplies
the delay by this factor). Default: 2.0 (doubles each time)
jitter_factor: Random jitter factor (0.0-1.0) to add randomness to delays
and prevent synchronized retries. Default: 0.1 (10% jitter)
"""
max_retries: int = 3
initial_delay: float = 0.1
max_delay: float = 30.0
backoff_multiplier: float = 2.0
jitter_factor: float = 0.1
@dataclass
class ConnectionConfig:
"""
Configuration for multi-connection support.
This configuration controls how multiple connections per peer are managed,
including connection limits, timeouts, and load balancing strategies.
Attributes:
max_connections_per_peer: Maximum number of connections allowed to a single
peer. Default: 3 connections
connection_timeout: Timeout in seconds for establishing new connections.
Default: 30.0 seconds
load_balancing_strategy: Strategy for distributing streams across connections.
Options: "round_robin" (default) or "least_loaded"
"""
max_connections_per_peer: int = 3
connection_timeout: float = 30.0
load_balancing_strategy: str = "round_robin" # or "least_loaded"
def __post_init__(self) -> None:
"""Validate configuration after initialization."""
if not (
self.load_balancing_strategy == "round_robin"
or self.load_balancing_strategy == "least_loaded"
):
raise ValueError(
"Load balancing strategy can only be 'round_robin' or 'least_loaded'"
)
if self.max_connections_per_peer < 1:
raise ValueError("Max connection per peer should be atleast 1")
if self.connection_timeout < 0:
raise ValueError("Connection timeout should be positive")

View File

@ -17,6 +17,7 @@ from libp2p.stream_muxer.exceptions import (
MuxedStreamError,
MuxedStreamReset,
)
from libp2p.transport.quic.exceptions import QUICStreamClosedError, QUICStreamResetError
from .exceptions import (
StreamClosed,
@ -170,7 +171,7 @@ class NetStream(INetStream):
elif self.__stream_state == StreamState.OPEN:
self.__stream_state = StreamState.CLOSE_READ
raise StreamEOF() from error
except MuxedStreamReset as error:
except (MuxedStreamReset, QUICStreamClosedError, QUICStreamResetError) as error:
async with self._state_lock:
if self.__stream_state in [
StreamState.OPEN,
@ -199,7 +200,12 @@ class NetStream(INetStream):
try:
await self.muxed_stream.write(data)
except (MuxedStreamClosed, MuxedStreamError) as error:
except (
MuxedStreamClosed,
MuxedStreamError,
QUICStreamClosedError,
QUICStreamResetError,
) as error:
async with self._state_lock:
if self.__stream_state == StreamState.OPEN:
self.__stream_state = StreamState.CLOSE_WRITE

View File

@ -2,9 +2,9 @@ from collections.abc import (
Awaitable,
Callable,
)
from dataclasses import dataclass
import logging
import random
from typing import cast
from multiaddr import (
Multiaddr,
@ -27,6 +27,7 @@ from libp2p.custom_types import (
from libp2p.io.abc import (
ReadWriteCloser,
)
from libp2p.network.config import ConnectionConfig, RetryConfig
from libp2p.peer.id import (
ID,
)
@ -41,6 +42,9 @@ from libp2p.transport.exceptions import (
OpenConnectionError,
SecurityUpgradeFailure,
)
from libp2p.transport.quic.config import QUICTransportConfig
from libp2p.transport.quic.connection import QUICConnection
from libp2p.transport.quic.transport import QUICTransport
from libp2p.transport.upgrader import (
TransportUpgrader,
)
@ -61,59 +65,6 @@ from .exceptions import (
logger = logging.getLogger("libp2p.network.swarm")
@dataclass
class RetryConfig:
"""
Configuration for retry logic with exponential backoff.
This configuration controls how connection attempts are retried when they fail.
The retry mechanism uses exponential backoff with jitter to prevent thundering
herd problems in distributed systems.
Attributes:
max_retries: Maximum number of retry attempts before giving up.
Default: 3 attempts
initial_delay: Initial delay in seconds before the first retry.
Default: 0.1 seconds (100ms)
max_delay: Maximum delay cap in seconds to prevent excessive wait times.
Default: 30.0 seconds
backoff_multiplier: Multiplier for exponential backoff (each retry multiplies
the delay by this factor). Default: 2.0 (doubles each time)
jitter_factor: Random jitter factor (0.0-1.0) to add randomness to delays
and prevent synchronized retries. Default: 0.1 (10% jitter)
"""
max_retries: int = 3
initial_delay: float = 0.1
max_delay: float = 30.0
backoff_multiplier: float = 2.0
jitter_factor: float = 0.1
@dataclass
class ConnectionConfig:
"""
Configuration for multi-connection support.
This configuration controls how multiple connections per peer are managed,
including connection limits, timeouts, and load balancing strategies.
Attributes:
max_connections_per_peer: Maximum number of connections allowed to a single
peer. Default: 3 connections
connection_timeout: Timeout in seconds for establishing new connections.
Default: 30.0 seconds
load_balancing_strategy: Strategy for distributing streams across connections.
Options: "round_robin" (default) or "least_loaded"
"""
max_connections_per_peer: int = 3
connection_timeout: float = 30.0
load_balancing_strategy: str = "round_robin" # or "least_loaded"
def create_default_stream_handler(network: INetworkService) -> StreamHandlerFn:
async def stream_handler(stream: INetStream) -> None:
await network.get_manager().wait_finished()
@ -126,8 +77,7 @@ class Swarm(Service, INetworkService):
peerstore: IPeerStore
upgrader: TransportUpgrader
transport: ITransport
# Enhanced: Support for multiple connections per peer
connections: dict[ID, list[INetConn]] # Multiple connections per peer
connections: dict[ID, list[INetConn]]
listeners: dict[str, IListener]
common_stream_handler: StreamHandlerFn
listener_nursery: trio.Nursery | None
@ -137,7 +87,7 @@ class Swarm(Service, INetworkService):
# Enhanced: New configuration
retry_config: RetryConfig
connection_config: ConnectionConfig
connection_config: ConnectionConfig | QUICTransportConfig
_round_robin_index: dict[ID, int]
def __init__(
@ -147,7 +97,7 @@ class Swarm(Service, INetworkService):
upgrader: TransportUpgrader,
transport: ITransport,
retry_config: RetryConfig | None = None,
connection_config: ConnectionConfig | None = None,
connection_config: ConnectionConfig | QUICTransportConfig | None = None,
):
self.self_id = peer_id
self.peerstore = peerstore
@ -178,6 +128,11 @@ class Swarm(Service, INetworkService):
# Create a nursery for listener tasks.
self.listener_nursery = nursery
self.event_listener_nursery_created.set()
if isinstance(self.transport, QUICTransport):
self.transport.set_background_nursery(nursery)
self.transport.set_swarm(self)
try:
await self.manager.wait_finished()
finally:
@ -370,6 +325,7 @@ class Swarm(Service, INetworkService):
# Dial peer (connection to peer does not yet exist)
# Transport dials peer (gets back a raw conn)
try:
addr = Multiaddr(f"{addr}/p2p/{peer_id}")
raw_conn = await self.transport.dial(addr)
except OpenConnectionError as error:
logger.debug("fail to dial peer %s over base transport", peer_id)
@ -377,6 +333,15 @@ class Swarm(Service, INetworkService):
f"fail to open connection to peer {peer_id}"
) from error
if isinstance(self.transport, QUICTransport) and isinstance(
raw_conn, IMuxedConn
):
logger.info(
"Skipping upgrade for QUIC, QUIC connections are already multiplexed"
)
swarm_conn = await self.add_conn(raw_conn)
return swarm_conn
logger.debug("dialed peer %s over base transport", peer_id)
# Per, https://discuss.libp2p.io/t/multistream-security/130, we first secure
@ -402,9 +367,7 @@ class Swarm(Service, INetworkService):
logger.debug("upgraded mux for peer %s", peer_id)
swarm_conn = await self.add_conn(muxed_conn)
logger.debug("successfully dialed peer %s", peer_id)
return swarm_conn
async def dial_addr(self, addr: Multiaddr, peer_id: ID) -> INetConn:
@ -427,7 +390,6 @@ class Swarm(Service, INetworkService):
:return: net stream instance
"""
logger.debug("attempting to open a stream to peer %s", peer_id)
# Get existing connections or dial new ones
connections = self.get_connections(peer_id)
if not connections:
@ -436,6 +398,10 @@ class Swarm(Service, INetworkService):
# Load balancing strategy at interface level
connection = self._select_connection(connections, peer_id)
if isinstance(self.transport, QUICTransport) and connection is not None:
conn = cast(SwarmConn, connection)
return await conn.new_stream()
try:
net_stream = await connection.new_stream()
logger.debug("successfully opened a stream to peer %s", peer_id)
@ -516,6 +482,7 @@ class Swarm(Service, INetworkService):
- Map multiaddr to listener
"""
# We need to wait until `self.listener_nursery` is created.
logger.debug("Starting to listen")
await self.event_listener_nursery_created.wait()
success_count = 0
@ -527,6 +494,22 @@ class Swarm(Service, INetworkService):
async def conn_handler(
read_write_closer: ReadWriteCloser, maddr: Multiaddr = maddr
) -> None:
# No need to upgrade QUIC Connection
if isinstance(self.transport, QUICTransport):
try:
quic_conn = cast(QUICConnection, read_write_closer)
await self.add_conn(quic_conn)
peer_id = quic_conn.peer_id
logger.debug(
f"successfully opened quic connection to peer {peer_id}"
)
# NOTE: This is a intentional barrier to prevent from the
# handler exiting and closing the connection.
await self.manager.wait_finished()
except Exception:
await read_write_closer.close()
return
raw_conn = RawConnection(read_write_closer, False)
# Per, https://discuss.libp2p.io/t/multistream-security/130, we first
@ -660,9 +643,10 @@ class Swarm(Service, INetworkService):
muxed_conn,
self,
)
logger.debug("Swarm::add_conn | starting muxed connection")
self.manager.run_task(muxed_conn.start)
await muxed_conn.event_started.wait()
logger.debug("Swarm::add_conn | starting swarm connection")
self.manager.run_task(swarm_conn.start)
await swarm_conn.event_started.wait()

View File

@ -1,3 +1,5 @@
from builtins import AssertionError
from libp2p.abc import (
IMultiselectCommunicator,
)
@ -36,7 +38,8 @@ class MultiselectCommunicator(IMultiselectCommunicator):
msg_bytes = encode_delim(msg_str.encode())
try:
await self.read_writer.write(msg_bytes)
except IOException as error:
# Handle for connection close during ongoing negotiation in QUIC
except (IOException, AssertionError, ValueError) as error:
raise MultiselectCommunicatorError(
"fail to write to multiselect communicator"
) from error

View File

@ -21,6 +21,7 @@ from libp2p.protocol_muxer.exceptions import (
MultiselectError,
)
from libp2p.protocol_muxer.multiselect import (
DEFAULT_NEGOTIATE_TIMEOUT,
Multiselect,
)
from libp2p.protocol_muxer.multiselect_client import (
@ -46,11 +47,17 @@ class MuxerMultistream:
transports: "OrderedDict[TProtocol, TMuxerClass]"
multiselect: Multiselect
multiselect_client: MultiselectClient
negotiate_timeout: int
def __init__(self, muxer_transports_by_protocol: TMuxerOptions) -> None:
def __init__(
self,
muxer_transports_by_protocol: TMuxerOptions,
negotiate_timeout: int = DEFAULT_NEGOTIATE_TIMEOUT,
) -> None:
self.transports = OrderedDict()
self.multiselect = Multiselect()
self.multistream_client = MultiselectClient()
self.negotiate_timeout = negotiate_timeout
for protocol, transport in muxer_transports_by_protocol.items():
self.add_transport(protocol, transport)
@ -80,10 +87,12 @@ class MuxerMultistream:
communicator = MultiselectCommunicator(conn)
if conn.is_initiator:
protocol = await self.multiselect_client.select_one_of(
tuple(self.transports.keys()), communicator
tuple(self.transports.keys()), communicator, self.negotiate_timeout
)
else:
protocol, _ = await self.multiselect.negotiate(communicator)
protocol, _ = await self.multiselect.negotiate(
communicator, self.negotiate_timeout
)
if protocol is None:
raise MultiselectError(
"Fail to negotiate a stream muxer protocol: no protocol selected"
@ -93,7 +102,7 @@ class MuxerMultistream:
async def new_conn(self, conn: ISecureConn, peer_id: ID) -> IMuxedConn:
communicator = MultiselectCommunicator(conn)
protocol = await self.multistream_client.select_one_of(
tuple(self.transports.keys()), communicator
tuple(self.transports.keys()), communicator, self.negotiate_timeout
)
transport_class = self.transports[protocol]
if protocol == PROTOCOL_ID:

View File

View File

@ -0,0 +1,345 @@
"""
Configuration classes for QUIC transport.
"""
from dataclasses import (
dataclass,
field,
)
import ssl
from typing import Any, Literal, TypedDict
from libp2p.custom_types import TProtocol
from libp2p.network.config import ConnectionConfig
class QUICTransportKwargs(TypedDict, total=False):
"""Type definition for kwargs accepted by new_transport function."""
# Connection settings
idle_timeout: float
max_datagram_size: int
local_port: int | None
# Protocol version support
enable_draft29: bool
enable_v1: bool
# TLS settings
verify_mode: ssl.VerifyMode
alpn_protocols: list[str]
# Performance settings
max_concurrent_streams: int
connection_window: int
stream_window: int
# Logging and debugging
enable_qlog: bool
qlog_dir: str | None
# Connection management
max_connections: int
connection_timeout: float
# Protocol identifiers
PROTOCOL_QUIC_V1: TProtocol
PROTOCOL_QUIC_DRAFT29: TProtocol
@dataclass
class QUICTransportConfig(ConnectionConfig):
"""Configuration for QUIC transport."""
# Connection settings
idle_timeout: float = 30.0 # Seconds before an idle connection is closed.
max_datagram_size: int = (
1200 # Maximum size of UDP datagrams to avoid IP fragmentation.
)
local_port: int | None = (
None # Local port to bind to. If None, a random port is chosen.
)
# Protocol version support
enable_draft29: bool = True # Enable QUIC draft-29 for compatibility
enable_v1: bool = True # Enable QUIC v1 (RFC 9000)
# TLS settings
verify_mode: ssl.VerifyMode = ssl.CERT_NONE
alpn_protocols: list[str] = field(default_factory=lambda: ["libp2p"])
# Performance settings
max_concurrent_streams: int = 100 # Maximum concurrent streams per connection
connection_window: int = 1024 * 1024 # Connection flow control window
stream_window: int = 64 * 1024 # Stream flow control window
# Logging and debugging
enable_qlog: bool = False # Enable QUIC logging
qlog_dir: str | None = None # Directory for QUIC logs
# Connection management
max_connections: int = 1000 # Maximum number of connections
connection_timeout: float = 10.0 # Connection establishment timeout
MAX_CONCURRENT_STREAMS: int = 1000
"""Maximum number of concurrent streams per connection."""
MAX_INCOMING_STREAMS: int = 1000
"""Maximum number of incoming streams per connection."""
CONNECTION_HANDSHAKE_TIMEOUT: float = 60.0
"""Timeout for connection handshake (seconds)."""
MAX_OUTGOING_STREAMS: int = 1000
"""Maximum number of outgoing streams per connection."""
CONNECTION_CLOSE_TIMEOUT: int = 10
"""Timeout for opening new connection (seconds)."""
# Stream timeouts
STREAM_OPEN_TIMEOUT: float = 5.0
"""Timeout for opening new streams (seconds)."""
STREAM_ACCEPT_TIMEOUT: float = 30.0
"""Timeout for accepting incoming streams (seconds)."""
STREAM_READ_TIMEOUT: float = 30.0
"""Default timeout for stream read operations (seconds)."""
STREAM_WRITE_TIMEOUT: float = 30.0
"""Default timeout for stream write operations (seconds)."""
STREAM_CLOSE_TIMEOUT: float = 10.0
"""Timeout for graceful stream close (seconds)."""
# Flow control configuration
STREAM_FLOW_CONTROL_WINDOW: int = 1024 * 1024 # 1MB
"""Per-stream flow control window size."""
CONNECTION_FLOW_CONTROL_WINDOW: int = 1536 * 1024 # 1.5MB
"""Connection-wide flow control window size."""
# Buffer management
MAX_STREAM_RECEIVE_BUFFER: int = 2 * 1024 * 1024 # 2MB
"""Maximum receive buffer size per stream."""
STREAM_RECEIVE_BUFFER_LOW_WATERMARK: int = 64 * 1024 # 64KB
"""Low watermark for stream receive buffer."""
STREAM_RECEIVE_BUFFER_HIGH_WATERMARK: int = 512 * 1024 # 512KB
"""High watermark for stream receive buffer."""
# Stream lifecycle configuration
ENABLE_STREAM_RESET_ON_ERROR: bool = True
"""Whether to automatically reset streams on errors."""
STREAM_RESET_ERROR_CODE: int = 1
"""Default error code for stream resets."""
ENABLE_STREAM_KEEP_ALIVE: bool = False
"""Whether to enable stream keep-alive mechanisms."""
STREAM_KEEP_ALIVE_INTERVAL: float = 30.0
"""Interval for stream keep-alive pings (seconds)."""
# Resource management
ENABLE_STREAM_RESOURCE_TRACKING: bool = True
"""Whether to track stream resource usage."""
STREAM_MEMORY_LIMIT_PER_STREAM: int = 2 * 1024 * 1024 # 2MB
"""Memory limit per individual stream."""
STREAM_MEMORY_LIMIT_PER_CONNECTION: int = 100 * 1024 * 1024 # 100MB
"""Total memory limit for all streams per connection."""
# Concurrency and performance
ENABLE_STREAM_BATCHING: bool = True
"""Whether to batch multiple stream operations."""
STREAM_BATCH_SIZE: int = 10
"""Number of streams to process in a batch."""
STREAM_PROCESSING_CONCURRENCY: int = 100
"""Maximum concurrent stream processing tasks."""
# Debugging and monitoring
ENABLE_STREAM_METRICS: bool = True
"""Whether to collect stream metrics."""
ENABLE_STREAM_TIMELINE_TRACKING: bool = True
"""Whether to track stream lifecycle timelines."""
STREAM_METRICS_COLLECTION_INTERVAL: float = 60.0
"""Interval for collecting stream metrics (seconds)."""
# Error handling configuration
STREAM_ERROR_RETRY_ATTEMPTS: int = 3
"""Number of retry attempts for recoverable stream errors."""
STREAM_ERROR_RETRY_DELAY: float = 1.0
"""Initial delay between stream error retries (seconds)."""
STREAM_ERROR_RETRY_BACKOFF_FACTOR: float = 2.0
"""Backoff factor for stream error retries."""
# Protocol identifiers matching go-libp2p
PROTOCOL_QUIC_V1: TProtocol = TProtocol("quic-v1") # RFC 9000
PROTOCOL_QUIC_DRAFT29: TProtocol = TProtocol("quic") # draft-29
def __post_init__(self) -> None:
"""Validate configuration after initialization."""
if not (self.enable_draft29 or self.enable_v1):
raise ValueError("At least one QUIC version must be enabled")
if self.idle_timeout <= 0:
raise ValueError("Idle timeout must be positive")
if self.max_datagram_size < 1200:
raise ValueError("Max datagram size must be at least 1200 bytes")
# Validate timeouts
timeout_fields = [
"STREAM_OPEN_TIMEOUT",
"STREAM_ACCEPT_TIMEOUT",
"STREAM_READ_TIMEOUT",
"STREAM_WRITE_TIMEOUT",
"STREAM_CLOSE_TIMEOUT",
]
for timeout_field in timeout_fields:
if getattr(self, timeout_field) <= 0:
raise ValueError(f"{timeout_field} must be positive")
# Validate flow control windows
if self.STREAM_FLOW_CONTROL_WINDOW <= 0:
raise ValueError("STREAM_FLOW_CONTROL_WINDOW must be positive")
if self.CONNECTION_FLOW_CONTROL_WINDOW < self.STREAM_FLOW_CONTROL_WINDOW:
raise ValueError(
"CONNECTION_FLOW_CONTROL_WINDOW must be >= STREAM_FLOW_CONTROL_WINDOW"
)
# Validate buffer sizes
if self.MAX_STREAM_RECEIVE_BUFFER <= 0:
raise ValueError("MAX_STREAM_RECEIVE_BUFFER must be positive")
if self.STREAM_RECEIVE_BUFFER_HIGH_WATERMARK > self.MAX_STREAM_RECEIVE_BUFFER:
raise ValueError(
"STREAM_RECEIVE_BUFFER_HIGH_WATERMARK cannot".__add__(
"exceed MAX_STREAM_RECEIVE_BUFFER"
)
)
if (
self.STREAM_RECEIVE_BUFFER_LOW_WATERMARK
>= self.STREAM_RECEIVE_BUFFER_HIGH_WATERMARK
):
raise ValueError(
"STREAM_RECEIVE_BUFFER_LOW_WATERMARK must be < HIGH_WATERMARK"
)
# Validate memory limits
if self.STREAM_MEMORY_LIMIT_PER_STREAM <= 0:
raise ValueError("STREAM_MEMORY_LIMIT_PER_STREAM must be positive")
if self.STREAM_MEMORY_LIMIT_PER_CONNECTION <= 0:
raise ValueError("STREAM_MEMORY_LIMIT_PER_CONNECTION must be positive")
expected_stream_memory = (
self.MAX_CONCURRENT_STREAMS * self.STREAM_MEMORY_LIMIT_PER_STREAM
)
if expected_stream_memory > self.STREAM_MEMORY_LIMIT_PER_CONNECTION * 2:
# Allow some headroom, but warn if configuration seems inconsistent
import logging
logger = logging.getLogger(__name__)
logger.warning(
"Stream memory configuration may be inconsistent: "
f"{self.MAX_CONCURRENT_STREAMS} streams ×"
"{self.STREAM_MEMORY_LIMIT_PER_STREAM} bytes "
"could exceed connection limit of"
f"{self.STREAM_MEMORY_LIMIT_PER_CONNECTION} bytes"
)
def get_stream_config_dict(self) -> dict[str, Any]:
"""Get stream-specific configuration as dictionary."""
stream_config = {}
for attr_name in dir(self):
if attr_name.startswith(
("STREAM_", "MAX_", "ENABLE_STREAM", "CONNECTION_FLOW")
):
stream_config[attr_name.lower()] = getattr(self, attr_name)
return stream_config
# Additional configuration classes for specific stream features
class QUICStreamFlowControlConfig:
"""Configuration for QUIC stream flow control."""
def __init__(
self,
initial_window_size: int = 512 * 1024,
max_window_size: int = 2 * 1024 * 1024,
window_update_threshold: float = 0.5,
enable_auto_tuning: bool = True,
):
self.initial_window_size = initial_window_size
self.max_window_size = max_window_size
self.window_update_threshold = window_update_threshold
self.enable_auto_tuning = enable_auto_tuning
def create_stream_config_for_use_case(
use_case: Literal[
"high_throughput", "low_latency", "many_streams", "memory_constrained"
],
) -> QUICTransportConfig:
"""
Create optimized stream configuration for specific use cases.
Args:
use_case: One of "high_throughput", "low_latency", "many_streams","
"memory_constrained"
Returns:
Optimized QUICTransportConfig
"""
base_config = QUICTransportConfig()
if use_case == "high_throughput":
# Optimize for high throughput
base_config.STREAM_FLOW_CONTROL_WINDOW = 2 * 1024 * 1024 # 2MB
base_config.CONNECTION_FLOW_CONTROL_WINDOW = 10 * 1024 * 1024 # 10MB
base_config.MAX_STREAM_RECEIVE_BUFFER = 4 * 1024 * 1024 # 4MB
base_config.STREAM_PROCESSING_CONCURRENCY = 200
elif use_case == "low_latency":
# Optimize for low latency
base_config.STREAM_OPEN_TIMEOUT = 1.0
base_config.STREAM_READ_TIMEOUT = 5.0
base_config.STREAM_WRITE_TIMEOUT = 5.0
base_config.ENABLE_STREAM_BATCHING = False
base_config.STREAM_BATCH_SIZE = 1
elif use_case == "many_streams":
# Optimize for many concurrent streams
base_config.MAX_CONCURRENT_STREAMS = 5000
base_config.STREAM_FLOW_CONTROL_WINDOW = 128 * 1024 # 128KB
base_config.MAX_STREAM_RECEIVE_BUFFER = 256 * 1024 # 256KB
base_config.STREAM_PROCESSING_CONCURRENCY = 500
elif use_case == "memory_constrained":
# Optimize for low memory usage
base_config.MAX_CONCURRENT_STREAMS = 100
base_config.STREAM_FLOW_CONTROL_WINDOW = 64 * 1024 # 64KB
base_config.CONNECTION_FLOW_CONTROL_WINDOW = 256 * 1024 # 256KB
base_config.MAX_STREAM_RECEIVE_BUFFER = 128 * 1024 # 128KB
base_config.STREAM_MEMORY_LIMIT_PER_STREAM = 512 * 1024 # 512KB
base_config.STREAM_PROCESSING_CONCURRENCY = 50
else:
raise ValueError(f"Unknown use case: {use_case}")
return base_config

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,391 @@
"""
QUIC Transport exceptions
"""
from typing import Any, Literal
class QUICError(Exception):
"""Base exception for all QUIC transport errors."""
def __init__(self, message: str, error_code: int | None = None):
super().__init__(message)
self.error_code = error_code
# Transport-level exceptions
class QUICTransportError(QUICError):
"""Base exception for QUIC transport operations."""
pass
class QUICDialError(QUICTransportError):
"""Error occurred during QUIC connection establishment."""
pass
class QUICListenError(QUICTransportError):
"""Error occurred during QUIC listener operations."""
pass
class QUICSecurityError(QUICTransportError):
"""Error related to QUIC security/TLS operations."""
pass
# Connection-level exceptions
class QUICConnectionError(QUICError):
"""Base exception for QUIC connection operations."""
pass
class QUICConnectionClosedError(QUICConnectionError):
"""QUIC connection has been closed."""
pass
class QUICConnectionTimeoutError(QUICConnectionError):
"""QUIC connection operation timed out."""
pass
class QUICHandshakeError(QUICConnectionError):
"""Error during QUIC handshake process."""
pass
class QUICPeerVerificationError(QUICConnectionError):
"""Error verifying peer identity during handshake."""
pass
# Stream-level exceptions
class QUICStreamError(QUICError):
"""Base exception for QUIC stream operations."""
def __init__(
self,
message: str,
stream_id: str | None = None,
error_code: int | None = None,
):
super().__init__(message, error_code)
self.stream_id = stream_id
class QUICStreamClosedError(QUICStreamError):
"""Stream is closed and cannot be used for I/O operations."""
pass
class QUICStreamResetError(QUICStreamError):
"""Stream was reset by local or remote peer."""
def __init__(
self,
message: str,
stream_id: str | None = None,
error_code: int | None = None,
reset_by_peer: bool = False,
):
super().__init__(message, stream_id, error_code)
self.reset_by_peer = reset_by_peer
class QUICStreamTimeoutError(QUICStreamError):
"""Stream operation timed out."""
pass
class QUICStreamBackpressureError(QUICStreamError):
"""Stream write blocked due to flow control."""
pass
class QUICStreamLimitError(QUICStreamError):
"""Stream limit reached (too many concurrent streams)."""
pass
class QUICStreamStateError(QUICStreamError):
"""Invalid operation for current stream state."""
def __init__(
self,
message: str,
stream_id: str | None = None,
current_state: str | None = None,
attempted_operation: str | None = None,
):
super().__init__(message, stream_id)
self.current_state = current_state
self.attempted_operation = attempted_operation
# Flow control exceptions
class QUICFlowControlError(QUICError):
"""Base exception for flow control related errors."""
pass
class QUICFlowControlViolationError(QUICFlowControlError):
"""Flow control limits were violated."""
pass
class QUICFlowControlDeadlockError(QUICFlowControlError):
"""Flow control deadlock detected."""
pass
# Resource management exceptions
class QUICResourceError(QUICError):
"""Base exception for resource management errors."""
pass
class QUICMemoryLimitError(QUICResourceError):
"""Memory limit exceeded."""
pass
class QUICConnectionLimitError(QUICResourceError):
"""Connection limit exceeded."""
pass
# Multiaddr and addressing exceptions
class QUICAddressError(QUICError):
"""Base exception for QUIC addressing errors."""
pass
class QUICInvalidMultiaddrError(QUICAddressError):
"""Invalid multiaddr format for QUIC transport."""
pass
class QUICAddressResolutionError(QUICAddressError):
"""Failed to resolve QUIC address."""
pass
class QUICProtocolError(QUICError):
"""Base exception for QUIC protocol errors."""
pass
class QUICVersionNegotiationError(QUICProtocolError):
"""QUIC version negotiation failed."""
pass
class QUICUnsupportedVersionError(QUICProtocolError):
"""Unsupported QUIC version."""
pass
# Configuration exceptions
class QUICConfigurationError(QUICError):
"""Base exception for QUIC configuration errors."""
pass
class QUICInvalidConfigError(QUICConfigurationError):
"""Invalid QUIC configuration parameters."""
pass
class QUICCertificateError(QUICConfigurationError):
"""Error with TLS certificate configuration."""
pass
def map_quic_error_code(error_code: int) -> str:
"""
Map QUIC error codes to human-readable descriptions.
Based on RFC 9000 Transport Error Codes.
"""
error_codes = {
0x00: "NO_ERROR",
0x01: "INTERNAL_ERROR",
0x02: "CONNECTION_REFUSED",
0x03: "FLOW_CONTROL_ERROR",
0x04: "STREAM_LIMIT_ERROR",
0x05: "STREAM_STATE_ERROR",
0x06: "FINAL_SIZE_ERROR",
0x07: "FRAME_ENCODING_ERROR",
0x08: "TRANSPORT_PARAMETER_ERROR",
0x09: "CONNECTION_ID_LIMIT_ERROR",
0x0A: "PROTOCOL_VIOLATION",
0x0B: "INVALID_TOKEN",
0x0C: "APPLICATION_ERROR",
0x0D: "CRYPTO_BUFFER_EXCEEDED",
0x0E: "KEY_UPDATE_ERROR",
0x0F: "AEAD_LIMIT_REACHED",
0x10: "NO_VIABLE_PATH",
}
return error_codes.get(error_code, f"UNKNOWN_ERROR_{error_code:02X}")
def create_stream_error(
error_type: str,
message: str,
stream_id: str | None = None,
error_code: int | None = None,
) -> QUICStreamError:
"""
Factory function to create appropriate stream error based on type.
Args:
error_type: Type of error ("closed", "reset", "timeout", "backpressure", etc.)
message: Error message
stream_id: Stream identifier
error_code: QUIC error code
Returns:
Appropriate QUICStreamError subclass
"""
error_type = error_type.lower()
if error_type in ("closed", "close"):
return QUICStreamClosedError(message, stream_id, error_code)
elif error_type == "reset":
return QUICStreamResetError(message, stream_id, error_code)
elif error_type == "timeout":
return QUICStreamTimeoutError(message, stream_id, error_code)
elif error_type in ("backpressure", "flow_control"):
return QUICStreamBackpressureError(message, stream_id, error_code)
elif error_type in ("limit", "stream_limit"):
return QUICStreamLimitError(message, stream_id, error_code)
elif error_type == "state":
return QUICStreamStateError(message, stream_id)
else:
return QUICStreamError(message, stream_id, error_code)
def create_connection_error(
error_type: str, message: str, error_code: int | None = None
) -> QUICConnectionError:
"""
Factory function to create appropriate connection error based on type.
Args:
error_type: Type of error ("closed", "timeout", "handshake", etc.)
message: Error message
error_code: QUIC error code
Returns:
Appropriate QUICConnectionError subclass
"""
error_type = error_type.lower()
if error_type in ("closed", "close"):
return QUICConnectionClosedError(message, error_code)
elif error_type == "timeout":
return QUICConnectionTimeoutError(message, error_code)
elif error_type == "handshake":
return QUICHandshakeError(message, error_code)
elif error_type in ("peer_verification", "verification"):
return QUICPeerVerificationError(message, error_code)
else:
return QUICConnectionError(message, error_code)
class QUICErrorContext:
"""
Context manager for handling QUIC errors with automatic error mapping.
Useful for converting low-level aioquic errors to py-libp2p QUIC errors.
"""
def __init__(self, operation: str, component: str = "quic") -> None:
self.operation = operation
self.component = component
def __enter__(self) -> "QUICErrorContext":
return self
# TODO: Fix types for exc_type
def __exit__(
self,
exc_type: type[BaseException] | None | None,
exc_val: BaseException | None,
exc_tb: Any,
) -> Literal[False]:
if exc_type is None:
return False
if exc_val is None:
return False
# Map common aioquic exceptions to our exceptions
if "ConnectionClosed" in str(exc_type):
raise QUICConnectionClosedError(
f"Connection closed during {self.operation}: {exc_val}"
) from exc_val
elif "StreamReset" in str(exc_type):
raise QUICStreamResetError(
f"Stream reset during {self.operation}: {exc_val}"
) from exc_val
elif "timeout" in str(exc_val).lower():
if "stream" in self.component.lower():
raise QUICStreamTimeoutError(
f"Timeout during {self.operation}: {exc_val}"
) from exc_val
else:
raise QUICConnectionTimeoutError(
f"Timeout during {self.operation}: {exc_val}"
) from exc_val
elif "flow control" in str(exc_val).lower():
raise QUICStreamBackpressureError(
f"Flow control error during {self.operation}: {exc_val}"
) from exc_val
# Let other exceptions propagate
return False

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,656 @@
"""
QUIC Stream implementation
Provides stream interface over QUIC's native multiplexing.
"""
from enum import Enum
import logging
import time
from types import TracebackType
from typing import TYPE_CHECKING, Any, cast
import trio
from .exceptions import (
QUICStreamBackpressureError,
QUICStreamClosedError,
QUICStreamResetError,
QUICStreamTimeoutError,
)
if TYPE_CHECKING:
from libp2p.abc import IMuxedStream
from libp2p.custom_types import TProtocol
from .connection import QUICConnection
else:
IMuxedStream = cast(type, object)
TProtocol = cast(type, object)
logger = logging.getLogger(__name__)
class StreamState(Enum):
"""Stream lifecycle states following libp2p patterns."""
OPEN = "open"
WRITE_CLOSED = "write_closed"
READ_CLOSED = "read_closed"
CLOSED = "closed"
RESET = "reset"
class StreamDirection(Enum):
"""Stream direction for tracking initiator."""
INBOUND = "inbound"
OUTBOUND = "outbound"
class StreamTimeline:
"""Track stream lifecycle events for debugging and monitoring."""
def __init__(self) -> None:
self.created_at = time.time()
self.opened_at: float | None = None
self.first_data_at: float | None = None
self.closed_at: float | None = None
self.reset_at: float | None = None
self.error_code: int | None = None
def record_open(self) -> None:
self.opened_at = time.time()
def record_first_data(self) -> None:
if self.first_data_at is None:
self.first_data_at = time.time()
def record_close(self) -> None:
self.closed_at = time.time()
def record_reset(self, error_code: int) -> None:
self.reset_at = time.time()
self.error_code = error_code
class QUICStream(IMuxedStream):
"""
QUIC Stream implementation following libp2p IMuxedStream interface.
Based on patterns from go-libp2p and js-libp2p, this implementation:
- Leverages QUIC's native multiplexing and flow control
- Integrates with libp2p resource management
- Provides comprehensive error handling with QUIC-specific codes
- Supports bidirectional communication with independent close semantics
- Implements proper stream lifecycle management
"""
def __init__(
self,
connection: "QUICConnection",
stream_id: int,
direction: StreamDirection,
remote_addr: tuple[str, int],
resource_scope: Any | None = None,
):
"""
Initialize QUIC stream.
Args:
connection: Parent QUIC connection
stream_id: QUIC stream identifier
direction: Stream direction (inbound/outbound)
resource_scope: Resource manager scope for memory accounting
remote_addr: Remote addr stream is connected to
"""
self._connection = connection
self._stream_id = stream_id
self._direction = direction
self._resource_scope = resource_scope
# libp2p interface compliance
self._protocol: TProtocol | None = None
self._metadata: dict[str, Any] = {}
self._remote_addr = remote_addr
# Stream state management
self._state = StreamState.OPEN
self._state_lock = trio.Lock()
# Flow control and buffering
self._receive_buffer = bytearray()
self._receive_buffer_lock = trio.Lock()
self._receive_event = trio.Event()
self._backpressure_event = trio.Event()
self._backpressure_event.set() # Initially no backpressure
# Close/reset state
self._write_closed = False
self._read_closed = False
self._close_event = trio.Event()
self._reset_error_code: int | None = None
# Lifecycle tracking
self._timeline = StreamTimeline()
self._timeline.record_open()
# Resource accounting
self._memory_reserved = 0
# Stream constant configurations
self.READ_TIMEOUT = connection._transport._config.STREAM_READ_TIMEOUT
self.WRITE_TIMEOUT = connection._transport._config.STREAM_WRITE_TIMEOUT
self.FLOW_CONTROL_WINDOW_SIZE = (
connection._transport._config.STREAM_FLOW_CONTROL_WINDOW
)
self.MAX_RECEIVE_BUFFER_SIZE = (
connection._transport._config.MAX_STREAM_RECEIVE_BUFFER
)
if self._resource_scope:
self._reserve_memory(self.FLOW_CONTROL_WINDOW_SIZE)
logger.debug(
f"Created QUIC stream {stream_id} "
f"({direction.value}, connection: {connection.remote_peer_id()})"
)
# Properties for libp2p interface compliance
@property
def protocol(self) -> TProtocol | None:
"""Get the protocol identifier for this stream."""
return self._protocol
@protocol.setter
def protocol(self, protocol_id: TProtocol) -> None:
"""Set the protocol identifier for this stream."""
self._protocol = protocol_id
self._metadata["protocol"] = protocol_id
logger.debug(f"Stream {self.stream_id} protocol set to: {protocol_id}")
@property
def stream_id(self) -> str:
"""Get stream ID as string for libp2p compatibility."""
return str(self._stream_id)
@property
def muxed_conn(self) -> "QUICConnection": # type: ignore
"""Get the parent muxed connection."""
return self._connection
@property
def state(self) -> StreamState:
"""Get current stream state."""
return self._state
@property
def direction(self) -> StreamDirection:
"""Get stream direction."""
return self._direction
@property
def is_initiator(self) -> bool:
"""Check if this stream was locally initiated."""
return self._direction == StreamDirection.OUTBOUND
# Core stream operations
async def read(self, n: int | None = None) -> bytes:
"""
Read data from the stream with QUIC flow control.
Args:
n: Maximum number of bytes to read. If None or -1, read all available.
Returns:
Data read from stream
Raises:
QUICStreamClosedError: Stream is closed
QUICStreamResetError: Stream was reset
QUICStreamTimeoutError: Read timeout exceeded
"""
if n is None:
n = -1
async with self._state_lock:
if self._state in (StreamState.CLOSED, StreamState.RESET):
raise QUICStreamClosedError(f"Stream {self.stream_id} is closed")
if self._read_closed:
# Return any remaining buffered data, then EOF
async with self._receive_buffer_lock:
if self._receive_buffer:
data = self._extract_data_from_buffer(n)
self._timeline.record_first_data()
return data
return b""
# Wait for data with timeout
timeout = self.READ_TIMEOUT
try:
with trio.move_on_after(timeout) as cancel_scope:
while True:
async with self._receive_buffer_lock:
if self._receive_buffer:
data = self._extract_data_from_buffer(n)
self._timeline.record_first_data()
return data
# Check if stream was closed while waiting
if self._read_closed:
return b""
# Wait for more data
await self._receive_event.wait()
self._receive_event = trio.Event() # Reset for next wait
if cancel_scope.cancelled_caught:
raise QUICStreamTimeoutError(f"Read timeout on stream {self.stream_id}")
return b""
except QUICStreamResetError:
# Stream was reset while reading
raise
except Exception as e:
logger.error(f"Error reading from stream {self.stream_id}: {e}")
await self._handle_stream_error(e)
raise
async def write(self, data: bytes) -> None:
"""
Write data to the stream with QUIC flow control.
Args:
data: Data to write
Raises:
QUICStreamClosedError: Stream is closed for writing
QUICStreamBackpressureError: Flow control window exhausted
QUICStreamResetError: Stream was reset
"""
if not data:
return
async with self._state_lock:
if self._state in (StreamState.CLOSED, StreamState.RESET):
raise QUICStreamClosedError(f"Stream {self.stream_id} is closed")
if self._write_closed:
raise QUICStreamClosedError(
f"Stream {self.stream_id} write side is closed"
)
try:
# Handle flow control backpressure
await self._backpressure_event.wait()
# Send data through QUIC connection
self._connection._quic.send_stream_data(self._stream_id, data)
await self._connection._transmit()
self._timeline.record_first_data()
logger.debug(f"Wrote {len(data)} bytes to stream {self.stream_id}")
except Exception as e:
logger.error(f"Error writing to stream {self.stream_id}: {e}")
# Convert QUIC-specific errors
if "flow control" in str(e).lower():
raise QUICStreamBackpressureError(f"Flow control limit reached: {e}")
await self._handle_stream_error(e)
raise
async def close(self) -> None:
"""
Close the stream gracefully (both read and write sides).
This implements proper close semantics where both sides
are closed and resources are cleaned up.
"""
async with self._state_lock:
if self._state in (StreamState.CLOSED, StreamState.RESET):
return
logger.debug(f"Closing stream {self.stream_id}")
# Close both sides
if not self._write_closed:
await self.close_write()
if not self._read_closed:
await self.close_read()
# Update state and cleanup
async with self._state_lock:
self._state = StreamState.CLOSED
await self._cleanup_resources()
self._timeline.record_close()
self._close_event.set()
logger.debug(f"Stream {self.stream_id} closed")
async def close_write(self) -> None:
"""Close the write side of the stream."""
if self._write_closed:
return
try:
# Send FIN to close write side
self._connection._quic.send_stream_data(
self._stream_id, b"", end_stream=True
)
await self._connection._transmit()
self._write_closed = True
async with self._state_lock:
if self._read_closed:
self._state = StreamState.CLOSED
else:
self._state = StreamState.WRITE_CLOSED
logger.debug(f"Stream {self.stream_id} write side closed")
except Exception as e:
logger.error(f"Error closing write side of stream {self.stream_id}: {e}")
async def close_read(self) -> None:
"""Close the read side of the stream."""
if self._read_closed:
return
try:
self._read_closed = True
async with self._state_lock:
if self._write_closed:
self._state = StreamState.CLOSED
else:
self._state = StreamState.READ_CLOSED
# Wake up any pending reads
self._receive_event.set()
logger.debug(f"Stream {self.stream_id} read side closed")
except Exception as e:
logger.error(f"Error closing read side of stream {self.stream_id}: {e}")
async def reset(self, error_code: int = 0) -> None:
"""
Reset the stream with the given error code.
Args:
error_code: QUIC error code for the reset
"""
async with self._state_lock:
if self._state == StreamState.RESET:
return
logger.debug(
f"Resetting stream {self.stream_id} with error code {error_code}"
)
self._state = StreamState.RESET
self._reset_error_code = error_code
try:
# Send QUIC reset frame
self._connection._quic.reset_stream(self._stream_id, error_code)
await self._connection._transmit()
except Exception as e:
logger.error(f"Error sending reset for stream {self.stream_id}: {e}")
finally:
# Always cleanup resources
await self._cleanup_resources()
self._timeline.record_reset(error_code)
self._close_event.set()
def is_closed(self) -> bool:
"""Check if stream is completely closed."""
return self._state in (StreamState.CLOSED, StreamState.RESET)
def is_reset(self) -> bool:
"""Check if stream was reset."""
return self._state == StreamState.RESET
def can_read(self) -> bool:
"""Check if stream can be read from."""
return not self._read_closed and self._state not in (
StreamState.CLOSED,
StreamState.RESET,
)
def can_write(self) -> bool:
"""Check if stream can be written to."""
return not self._write_closed and self._state not in (
StreamState.CLOSED,
StreamState.RESET,
)
async def handle_data_received(self, data: bytes, end_stream: bool) -> None:
"""
Handle data received from the QUIC connection.
Args:
data: Received data
end_stream: Whether this is the last data (FIN received)
"""
if self._state == StreamState.RESET:
return
if data:
async with self._receive_buffer_lock:
if len(self._receive_buffer) + len(data) > self.MAX_RECEIVE_BUFFER_SIZE:
logger.warning(
f"Stream {self.stream_id} receive buffer overflow, "
f"dropping {len(data)} bytes"
)
return
self._receive_buffer.extend(data)
self._timeline.record_first_data()
# Notify waiting readers
self._receive_event.set()
logger.debug(f"Stream {self.stream_id} received {len(data)} bytes")
if end_stream:
self._read_closed = True
async with self._state_lock:
if self._write_closed:
self._state = StreamState.CLOSED
else:
self._state = StreamState.READ_CLOSED
# Wake up readers to process remaining data and EOF
self._receive_event.set()
logger.debug(f"Stream {self.stream_id} received FIN")
async def handle_stop_sending(self, error_code: int) -> None:
"""
Handle STOP_SENDING frame from remote peer.
When a STOP_SENDING frame is received, the peer is requesting that we
stop sending data on this stream. We respond by resetting the stream.
Args:
error_code: Error code from the STOP_SENDING frame
"""
logger.debug(
f"Stream {self.stream_id} handling STOP_SENDING (error_code={error_code})"
)
self._write_closed = True
# Wake up any pending write operations
self._backpressure_event.set()
async with self._state_lock:
if self.direction == StreamDirection.OUTBOUND:
self._state = StreamState.CLOSED
elif self._read_closed:
self._state = StreamState.CLOSED
else:
# Only write side closed - add WRITE_CLOSED state if needed
self._state = StreamState.WRITE_CLOSED
# Send RESET_STREAM in response (QUIC protocol requirement)
try:
self._connection._quic.reset_stream(int(self.stream_id), error_code)
await self._connection._transmit()
logger.debug(f"Sent RESET_STREAM for stream {self.stream_id}")
except Exception as e:
logger.warning(
f"Could not send RESET_STREAM for stream {self.stream_id}: {e}"
)
async def handle_reset(self, error_code: int) -> None:
"""
Handle stream reset from remote peer.
Args:
error_code: QUIC error code from reset frame
"""
logger.debug(
f"Stream {self.stream_id} reset by peer with error code {error_code}"
)
async with self._state_lock:
self._state = StreamState.RESET
self._reset_error_code = error_code
await self._cleanup_resources()
self._timeline.record_reset(error_code)
self._close_event.set()
# Wake up any pending operations
self._receive_event.set()
self._backpressure_event.set()
async def handle_flow_control_update(self, available_window: int) -> None:
"""
Handle flow control window updates.
Args:
available_window: Available flow control window size
"""
if available_window > 0:
self._backpressure_event.set()
logger.debug(
f"Stream {self.stream_id} flow control".__add__(
f"window updated: {available_window}"
)
)
else:
self._backpressure_event = trio.Event() # Reset to blocking state
logger.debug(f"Stream {self.stream_id} flow control window exhausted")
def _extract_data_from_buffer(self, n: int) -> bytes:
"""Extract data from receive buffer with specified limit."""
if n == -1:
# Read all available data
data = bytes(self._receive_buffer)
self._receive_buffer.clear()
else:
# Read up to n bytes
data = bytes(self._receive_buffer[:n])
self._receive_buffer = self._receive_buffer[n:]
return data
async def _handle_stream_error(self, error: Exception) -> None:
"""Handle errors by resetting the stream."""
logger.error(f"Stream {self.stream_id} error: {error}")
await self.reset(error_code=1) # Generic error code
def _reserve_memory(self, size: int) -> None:
"""Reserve memory with resource manager."""
if self._resource_scope:
try:
self._resource_scope.reserve_memory(size)
self._memory_reserved += size
except Exception as e:
logger.warning(
f"Failed to reserve memory for stream {self.stream_id}: {e}"
)
def _release_memory(self, size: int) -> None:
"""Release memory with resource manager."""
if self._resource_scope and size > 0:
try:
self._resource_scope.release_memory(size)
self._memory_reserved = max(0, self._memory_reserved - size)
except Exception as e:
logger.warning(
f"Failed to release memory for stream {self.stream_id}: {e}"
)
async def _cleanup_resources(self) -> None:
"""Clean up stream resources."""
# Release all reserved memory
if self._memory_reserved > 0:
self._release_memory(self._memory_reserved)
# Clear receive buffer
async with self._receive_buffer_lock:
self._receive_buffer.clear()
# Remove from connection's stream registry
self._connection._remove_stream(self._stream_id)
logger.debug(f"Stream {self.stream_id} resources cleaned up")
# Abstact implementations
def get_remote_address(self) -> tuple[str, int]:
return self._remote_addr
async def __aenter__(self) -> "QUICStream":
"""Enter the async context manager."""
return self
async def __aexit__(
self,
exc_type: type[BaseException] | None,
exc_val: BaseException | None,
exc_tb: TracebackType | None,
) -> None:
"""Exit the async context manager and close the stream."""
logger.debug("Exiting the context and closing the stream")
await self.close()
def set_deadline(self, ttl: int) -> bool:
"""
Set a deadline for the stream. QUIC does not support deadlines natively,
so this method always returns False to indicate the operation is unsupported.
:param ttl: Time-to-live in seconds (ignored).
:return: False, as deadlines are not supported.
"""
raise NotImplementedError("QUIC does not support setting read deadlines")
# String representation for debugging
def __repr__(self) -> str:
return (
f"QUICStream(id={self.stream_id}, "
f"state={self._state.value}, "
f"direction={self._direction.value}, "
f"protocol={self._protocol})"
)
def __str__(self) -> str:
return f"QUICStream({self.stream_id})"

View File

@ -0,0 +1,491 @@
"""
QUIC Transport implementation
"""
import copy
import logging
import ssl
from typing import TYPE_CHECKING, cast
from aioquic.quic.configuration import (
QuicConfiguration,
)
from aioquic.quic.connection import (
QuicConnection as NativeQUICConnection,
)
from aioquic.quic.logger import QuicLogger
import multiaddr
import trio
from libp2p.abc import (
ITransport,
)
from libp2p.crypto.keys import (
PrivateKey,
)
from libp2p.custom_types import TProtocol, TQUICConnHandlerFn
from libp2p.peer.id import (
ID,
)
from libp2p.transport.quic.security import QUICTLSSecurityConfig
from libp2p.transport.quic.utils import (
create_client_config_from_base,
create_server_config_from_base,
get_alpn_protocols,
is_quic_multiaddr,
multiaddr_to_quic_version,
quic_multiaddr_to_endpoint,
quic_version_to_wire_format,
)
if TYPE_CHECKING:
from libp2p.network.swarm import Swarm
else:
Swarm = cast(type, object)
from .config import (
QUICTransportConfig,
)
from .connection import (
QUICConnection,
)
from .exceptions import (
QUICDialError,
QUICListenError,
QUICSecurityError,
)
from .listener import (
QUICListener,
)
from .security import (
QUICTLSConfigManager,
create_quic_security_transport,
)
QUIC_V1_PROTOCOL = QUICTransportConfig.PROTOCOL_QUIC_V1
QUIC_DRAFT29_PROTOCOL = QUICTransportConfig.PROTOCOL_QUIC_DRAFT29
logger = logging.getLogger(__name__)
class QUICTransport(ITransport):
"""
QUIC Stream implementation following libp2p IMuxedStream interface.
"""
def __init__(
self, private_key: PrivateKey, config: QUICTransportConfig | None = None
) -> None:
"""
Initialize QUIC transport with security integration.
Args:
private_key: libp2p private key for identity and TLS cert generation
config: QUIC transport configuration options
"""
self._private_key = private_key
self._peer_id = ID.from_pubkey(private_key.get_public_key())
self._config = config or QUICTransportConfig()
# Connection management
self._connections: dict[str, QUICConnection] = {}
self._listeners: list[QUICListener] = []
# Security manager for TLS integration
self._security_manager = create_quic_security_transport(
self._private_key, self._peer_id
)
# QUIC configurations for different versions
self._quic_configs: dict[TProtocol, QuicConfiguration] = {}
self._setup_quic_configurations()
# Resource management
self._closed = False
self._nursery_manager = trio.CapacityLimiter(1)
self._background_nursery: trio.Nursery | None = None
self._swarm: Swarm | None = None
logger.debug(
f"Initialized QUIC transport with security for peer {self._peer_id}"
)
def set_background_nursery(self, nursery: trio.Nursery) -> None:
"""Set the nursery to use for background tasks (called by swarm)."""
self._background_nursery = nursery
logger.debug("Transport background nursery set")
def set_swarm(self, swarm: Swarm) -> None:
"""Set the swarm for adding incoming connections."""
self._swarm = swarm
def _setup_quic_configurations(self) -> None:
"""Setup QUIC configurations."""
try:
# Get TLS configuration from security manager
server_tls_config = self._security_manager.create_server_config()
client_tls_config = self._security_manager.create_client_config()
# Base server configuration
base_server_config = QuicConfiguration(
is_client=False,
alpn_protocols=get_alpn_protocols(),
verify_mode=self._config.verify_mode,
max_datagram_frame_size=self._config.max_datagram_size,
idle_timeout=self._config.idle_timeout,
)
# Base client configuration
base_client_config = QuicConfiguration(
is_client=True,
alpn_protocols=get_alpn_protocols(),
verify_mode=self._config.verify_mode,
max_datagram_frame_size=self._config.max_datagram_size,
idle_timeout=self._config.idle_timeout,
)
# Apply TLS configuration
self._apply_tls_configuration(base_server_config, server_tls_config)
self._apply_tls_configuration(base_client_config, client_tls_config)
# QUIC v1 (RFC 9000) configurations
if self._config.enable_v1:
quic_v1_server_config = create_server_config_from_base(
base_server_config, self._security_manager, self._config
)
quic_v1_server_config.supported_versions = [
quic_version_to_wire_format(QUIC_V1_PROTOCOL)
]
quic_v1_client_config = create_client_config_from_base(
base_client_config, self._security_manager, self._config
)
quic_v1_client_config.supported_versions = [
quic_version_to_wire_format(QUIC_V1_PROTOCOL)
]
# Store both server and client configs for v1
self._quic_configs[TProtocol(f"{QUIC_V1_PROTOCOL}_server")] = (
quic_v1_server_config
)
self._quic_configs[TProtocol(f"{QUIC_V1_PROTOCOL}_client")] = (
quic_v1_client_config
)
# QUIC draft-29 configurations for compatibility
if self._config.enable_draft29:
draft29_server_config: QuicConfiguration = copy.copy(base_server_config)
draft29_server_config.supported_versions = [
quic_version_to_wire_format(QUIC_DRAFT29_PROTOCOL)
]
draft29_client_config = copy.copy(base_client_config)
draft29_client_config.supported_versions = [
quic_version_to_wire_format(QUIC_DRAFT29_PROTOCOL)
]
self._quic_configs[TProtocol(f"{QUIC_DRAFT29_PROTOCOL}_server")] = (
draft29_server_config
)
self._quic_configs[TProtocol(f"{QUIC_DRAFT29_PROTOCOL}_client")] = (
draft29_client_config
)
logger.debug("QUIC configurations initialized with libp2p TLS security")
except Exception as e:
raise QUICSecurityError(
f"Failed to setup QUIC TLS configurations: {e}"
) from e
def _apply_tls_configuration(
self, config: QuicConfiguration, tls_config: QUICTLSSecurityConfig
) -> None:
"""
Apply TLS configuration to a QUIC configuration using aioquic's actual API.
Args:
config: QuicConfiguration to update
tls_config: TLS configuration dictionary from security manager
"""
try:
config.certificate = tls_config.certificate
config.private_key = tls_config.private_key
config.certificate_chain = tls_config.certificate_chain
config.alpn_protocols = tls_config.alpn_protocols
config.verify_mode = ssl.CERT_NONE
logger.debug("Successfully applied TLS configuration to QUIC config")
except Exception as e:
raise QUICSecurityError(f"Failed to apply TLS configuration: {e}") from e
async def dial(
self,
maddr: multiaddr.Multiaddr,
) -> QUICConnection:
"""
Dial a remote peer using QUIC transport with security verification.
Args:
maddr: Multiaddr of the remote peer (e.g., /ip4/1.2.3.4/udp/4001/quic-v1)
peer_id: Expected peer ID for verification
nursery: Nursery to execute the background tasks
Returns:
Raw connection interface to the remote peer
Raises:
QUICDialError: If dialing fails
QUICSecurityError: If security verification fails
"""
if self._closed:
raise QUICDialError("Transport is closed")
if not is_quic_multiaddr(maddr):
raise QUICDialError(f"Invalid QUIC multiaddr: {maddr}")
try:
# Extract connection details from multiaddr
host, port = quic_multiaddr_to_endpoint(maddr)
remote_peer_id = maddr.get_peer_id()
if remote_peer_id is not None:
remote_peer_id = ID.from_base58(remote_peer_id)
if remote_peer_id is None:
logger.error("Unable to derive peer id from multiaddr")
raise QUICDialError("Unable to derive peer id from multiaddr")
quic_version = multiaddr_to_quic_version(maddr)
# Get appropriate QUIC client configuration
config_key = TProtocol(f"{quic_version}_client")
logger.debug("config_key", config_key, self._quic_configs.keys())
config = self._quic_configs.get(config_key)
if not config:
raise QUICDialError(f"Unsupported QUIC version: {quic_version}")
config.is_client = True
config.quic_logger = QuicLogger()
# Ensure client certificate is properly set for mutual authentication
if not config.certificate or not config.private_key:
logger.warning(
"Client config missing certificate - applying TLS config"
)
client_tls_config = self._security_manager.create_client_config()
self._apply_tls_configuration(config, client_tls_config)
# Debug log to verify certificate is present
logger.info(
f"Dialing QUIC connection to {host}:{port} (version: {{quic_version}})"
)
logger.debug("Starting QUIC Connection")
# Create QUIC connection using aioquic's sans-IO core
native_quic_connection = NativeQUICConnection(configuration=config)
# Create trio-based QUIC connection wrapper with security
connection = QUICConnection(
quic_connection=native_quic_connection,
remote_addr=(host, port),
remote_peer_id=remote_peer_id,
local_peer_id=self._peer_id,
is_initiator=True,
maddr=maddr,
transport=self,
security_manager=self._security_manager,
)
logger.debug("QUIC Connection Created")
if self._background_nursery is None:
logger.error("No nursery set to execute background tasks")
raise QUICDialError("No nursery found to execute tasks")
await connection.connect(self._background_nursery)
# Store connection for management
conn_id = f"{host}:{port}"
self._connections[conn_id] = connection
return connection
except Exception as e:
logger.error(f"Failed to dial QUIC connection to {maddr}: {e}")
raise QUICDialError(f"Dial failed: {e}") from e
async def _verify_peer_identity(
self, connection: QUICConnection, expected_peer_id: ID
) -> None:
"""
Verify remote peer identity after TLS handshake.
Args:
connection: The established QUIC connection
expected_peer_id: Expected peer ID
Raises:
QUICSecurityError: If peer verification fails
"""
try:
# Get peer certificate from the connection
peer_certificate = await connection.get_peer_certificate()
if not peer_certificate:
raise QUICSecurityError("No peer certificate available")
# Verify peer identity using security manager
verified_peer_id = self._security_manager.verify_peer_identity(
peer_certificate, expected_peer_id
)
if verified_peer_id != expected_peer_id:
raise QUICSecurityError(
"Peer ID verification failed: expected "
f"{expected_peer_id}, got {verified_peer_id}"
)
logger.debug(f"Peer identity verified: {verified_peer_id}")
logger.debug(f"Peer identity verified: {verified_peer_id}")
except Exception as e:
raise QUICSecurityError(f"Peer identity verification failed: {e}") from e
def create_listener(self, handler_function: TQUICConnHandlerFn) -> QUICListener:
"""
Create a QUIC listener with integrated security.
Args:
handler_function: Function to handle new connections
Returns:
QUIC listener instance
Raises:
QUICListenError: If transport is closed
"""
if self._closed:
raise QUICListenError("Transport is closed")
# Get server configurations for the listener
server_configs = {
version: config
for version, config in self._quic_configs.items()
if version.endswith("_server")
}
listener = QUICListener(
transport=self,
handler_function=handler_function,
quic_configs=server_configs,
config=self._config,
security_manager=self._security_manager,
)
self._listeners.append(listener)
logger.debug("Created QUIC listener with security")
return listener
def can_dial(self, maddr: multiaddr.Multiaddr) -> bool:
"""
Check if this transport can dial the given multiaddr.
Args:
maddr: Multiaddr to check
Returns:
True if this transport can dial the address
"""
return is_quic_multiaddr(maddr)
def protocols(self) -> list[TProtocol]:
"""
Get supported protocol identifiers.
Returns:
List of supported protocol strings
"""
protocols = [QUIC_V1_PROTOCOL]
if self._config.enable_draft29:
protocols.append(QUIC_DRAFT29_PROTOCOL)
return protocols
def listen_order(self) -> int:
"""
Get the listen order priority for this transport.
Matches go-libp2p's ListenOrder = 1 for QUIC.
Returns:
Priority order for listening (lower = higher priority)
"""
return 1
async def close(self) -> None:
"""Close the transport and cleanup resources."""
if self._closed:
return
self._closed = True
logger.debug("Closing QUIC transport")
# Close all active connections and listeners concurrently using trio nursery
async with trio.open_nursery() as nursery:
# Close all connections
for connection in self._connections.values():
nursery.start_soon(connection.close)
# Close all listeners
for listener in self._listeners:
nursery.start_soon(listener.close)
self._connections.clear()
self._listeners.clear()
logger.debug("QUIC transport closed")
async def _cleanup_terminated_connection(self, connection: QUICConnection) -> None:
"""Clean up a terminated connection from all listeners."""
try:
for listener in self._listeners:
await listener._remove_connection_by_object(connection)
logger.debug(
"✅ TRANSPORT: Cleaned up terminated connection from all listeners"
)
except Exception as e:
logger.error(f"❌ TRANSPORT: Error cleaning up terminated connection: {e}")
def get_stats(self) -> dict[str, int | list[str] | object]:
"""Get transport statistics including security info."""
return {
"active_connections": len(self._connections),
"active_listeners": len(self._listeners),
"supported_protocols": self.protocols(),
"local_peer_id": str(self._peer_id),
"security_enabled": True,
"tls_configured": True,
}
def get_security_manager(self) -> QUICTLSConfigManager:
"""
Get the security manager for this transport.
Returns:
The QUIC TLS configuration manager
"""
return self._security_manager
def get_listener_socket(self) -> trio.socket.SocketType | None:
"""Get the socket from the first active listener."""
for listener in self._listeners:
if listener.is_listening() and listener._socket:
return listener._socket
return None

View File

@ -0,0 +1,466 @@
"""
Multiaddr utilities for QUIC transport - Module 4.
Essential utilities required for QUIC transport implementation.
Based on go-libp2p and js-libp2p QUIC implementations.
"""
import ipaddress
import logging
import ssl
from aioquic.quic.configuration import QuicConfiguration
import multiaddr
from libp2p.custom_types import TProtocol
from libp2p.transport.quic.security import QUICTLSConfigManager
from .config import QUICTransportConfig
from .exceptions import QUICInvalidMultiaddrError, QUICUnsupportedVersionError
logger = logging.getLogger(__name__)
# Protocol constants
QUIC_V1_PROTOCOL = QUICTransportConfig.PROTOCOL_QUIC_V1
QUIC_DRAFT29_PROTOCOL = QUICTransportConfig.PROTOCOL_QUIC_DRAFT29
UDP_PROTOCOL = "udp"
IP4_PROTOCOL = "ip4"
IP6_PROTOCOL = "ip6"
SERVER_CONFIG_PROTOCOL_V1 = f"{QUIC_V1_PROTOCOL}_server"
CLIENT_CONFIG_PROTCOL_V1 = f"{QUIC_V1_PROTOCOL}_client"
SERVER_CONFIG_PROTOCOL_DRAFT_29 = f"{QUIC_DRAFT29_PROTOCOL}_server"
CLIENT_CONFIG_PROTOCOL_DRAFT_29 = f"{QUIC_DRAFT29_PROTOCOL}_client"
CUSTOM_QUIC_VERSION_MAPPING: dict[str, int] = {
SERVER_CONFIG_PROTOCOL_V1: 0x00000001, # RFC 9000
CLIENT_CONFIG_PROTCOL_V1: 0x00000001, # RFC 9000
SERVER_CONFIG_PROTOCOL_DRAFT_29: 0xFF00001D, # draft-29
CLIENT_CONFIG_PROTOCOL_DRAFT_29: 0xFF00001D, # draft-29
}
# QUIC version to wire format mappings (required for aioquic)
QUIC_VERSION_MAPPINGS: dict[TProtocol, int] = {
QUIC_V1_PROTOCOL: 0x00000001, # RFC 9000
QUIC_DRAFT29_PROTOCOL: 0xFF00001D, # draft-29
}
# ALPN protocols for libp2p over QUIC
LIBP2P_ALPN_PROTOCOLS: list[str] = ["libp2p"]
def is_quic_multiaddr(maddr: multiaddr.Multiaddr) -> bool:
"""
Check if a multiaddr represents a QUIC address.
Valid QUIC multiaddrs:
- /ip4/127.0.0.1/udp/4001/quic-v1
- /ip4/127.0.0.1/udp/4001/quic
- /ip6/::1/udp/4001/quic-v1
- /ip6/::1/udp/4001/quic
Args:
maddr: Multiaddr to check
Returns:
True if the multiaddr represents a QUIC address
"""
try:
addr_str = str(maddr)
# Check for required components
has_ip = f"/{IP4_PROTOCOL}/" in addr_str or f"/{IP6_PROTOCOL}/" in addr_str
has_udp = f"/{UDP_PROTOCOL}/" in addr_str
has_quic = (
f"/{QUIC_V1_PROTOCOL}" in addr_str
or f"/{QUIC_DRAFT29_PROTOCOL}" in addr_str
or "/quic" in addr_str
)
return has_ip and has_udp and has_quic
except Exception:
return False
def quic_multiaddr_to_endpoint(maddr: multiaddr.Multiaddr) -> tuple[str, int]:
"""
Extract host and port from a QUIC multiaddr.
Args:
maddr: QUIC multiaddr
Returns:
Tuple of (host, port)
Raises:
QUICInvalidMultiaddrError: If multiaddr is not a valid QUIC address
"""
if not is_quic_multiaddr(maddr):
raise QUICInvalidMultiaddrError(f"Not a valid QUIC multiaddr: {maddr}")
try:
host = None
port = None
# Try to get IPv4 address
try:
host = maddr.value_for_protocol(multiaddr.protocols.P_IP4) # type: ignore
except Exception:
pass
# Try to get IPv6 address if IPv4 not found
if host is None:
try:
host = maddr.value_for_protocol(multiaddr.protocols.P_IP6) # type: ignore
except Exception:
pass
# Get UDP port
try:
port_str = maddr.value_for_protocol(multiaddr.protocols.P_UDP) # type: ignore
port = int(port_str)
except Exception:
pass
if host is None or port is None:
raise QUICInvalidMultiaddrError(f"Could not extract host/port from {maddr}")
return host, port
except Exception as e:
raise QUICInvalidMultiaddrError(
f"Failed to parse QUIC multiaddr {maddr}: {e}"
) from e
def multiaddr_to_quic_version(maddr: multiaddr.Multiaddr) -> TProtocol:
"""
Determine QUIC version from multiaddr.
Args:
maddr: QUIC multiaddr
Returns:
QUIC version identifier ("quic-v1" or "quic")
Raises:
QUICInvalidMultiaddrError: If multiaddr doesn't contain QUIC protocol
"""
try:
addr_str = str(maddr)
if f"/{QUIC_V1_PROTOCOL}" in addr_str:
return QUIC_V1_PROTOCOL # RFC 9000
elif f"/{QUIC_DRAFT29_PROTOCOL}" in addr_str:
return QUIC_DRAFT29_PROTOCOL # draft-29
else:
raise QUICInvalidMultiaddrError(f"No QUIC protocol found in {maddr}")
except Exception as e:
raise QUICInvalidMultiaddrError(
f"Failed to determine QUIC version from {maddr}: {e}"
) from e
def create_quic_multiaddr(
host: str, port: int, version: str = "quic-v1"
) -> multiaddr.Multiaddr:
"""
Create a QUIC multiaddr from host, port, and version.
Args:
host: IP address (IPv4 or IPv6)
port: UDP port number
version: QUIC version ("quic-v1" or "quic")
Returns:
QUIC multiaddr
Raises:
QUICInvalidMultiaddrError: If invalid parameters provided
"""
try:
# Determine IP version
try:
ip = ipaddress.ip_address(host)
if isinstance(ip, ipaddress.IPv4Address):
ip_proto = IP4_PROTOCOL
else:
ip_proto = IP6_PROTOCOL
except ValueError:
raise QUICInvalidMultiaddrError(f"Invalid IP address: {host}")
# Validate port
if not (0 <= port <= 65535):
raise QUICInvalidMultiaddrError(f"Invalid port: {port}")
# Validate and normalize QUIC version
if version == "quic-v1" or version == "/quic-v1":
quic_proto = QUIC_V1_PROTOCOL
elif version == "quic" or version == "/quic":
quic_proto = QUIC_DRAFT29_PROTOCOL
else:
raise QUICInvalidMultiaddrError(f"Invalid QUIC version: {version}")
# Construct multiaddr
addr_str = f"/{ip_proto}/{host}/{UDP_PROTOCOL}/{port}/{quic_proto}"
return multiaddr.Multiaddr(addr_str)
except Exception as e:
raise QUICInvalidMultiaddrError(f"Failed to create QUIC multiaddr: {e}") from e
def quic_version_to_wire_format(version: TProtocol) -> int:
"""
Convert QUIC version string to wire format integer for aioquic.
Args:
version: QUIC version string ("quic-v1" or "quic")
Returns:
Wire format version number
Raises:
QUICUnsupportedVersionError: If version is not supported
"""
wire_version = QUIC_VERSION_MAPPINGS.get(version)
if wire_version is None:
raise QUICUnsupportedVersionError(f"Unsupported QUIC version: {version}")
return wire_version
def custom_quic_version_to_wire_format(version: TProtocol) -> int:
"""
Convert QUIC version string to wire format integer for aioquic.
Args:
version: QUIC version string ("quic-v1" or "quic")
Returns:
Wire format version number
Raises:
QUICUnsupportedVersionError: If version is not supported
"""
wire_version = CUSTOM_QUIC_VERSION_MAPPING.get(version)
if wire_version is None:
raise QUICUnsupportedVersionError(f"Unsupported QUIC version: {version}")
return wire_version
def get_alpn_protocols() -> list[str]:
"""
Get ALPN protocols for libp2p over QUIC.
Returns:
List of ALPN protocol identifiers
"""
return LIBP2P_ALPN_PROTOCOLS.copy()
def normalize_quic_multiaddr(maddr: multiaddr.Multiaddr) -> multiaddr.Multiaddr:
"""
Normalize a QUIC multiaddr to canonical form.
Args:
maddr: Input QUIC multiaddr
Returns:
Normalized multiaddr
Raises:
QUICInvalidMultiaddrError: If not a valid QUIC multiaddr
"""
if not is_quic_multiaddr(maddr):
raise QUICInvalidMultiaddrError(f"Not a QUIC multiaddr: {maddr}")
host, port = quic_multiaddr_to_endpoint(maddr)
version = multiaddr_to_quic_version(maddr)
return create_quic_multiaddr(host, port, version)
def create_server_config_from_base(
base_config: QuicConfiguration,
security_manager: QUICTLSConfigManager | None = None,
transport_config: QUICTransportConfig | None = None,
) -> QuicConfiguration:
"""
Create a server configuration without using deepcopy.
Manually copies attributes while handling cryptography objects properly.
"""
try:
# Create new server configuration from scratch
server_config = QuicConfiguration(is_client=False)
server_config.verify_mode = ssl.CERT_NONE
# Copy basic configuration attributes (these are safe to copy)
copyable_attrs = [
"alpn_protocols",
"verify_mode",
"max_datagram_frame_size",
"idle_timeout",
"max_concurrent_streams",
"supported_versions",
"max_data",
"max_stream_data",
"stateless_retry",
"quantum_readiness_test",
]
for attr in copyable_attrs:
if hasattr(base_config, attr):
value = getattr(base_config, attr)
if value is not None:
setattr(server_config, attr, value)
# Handle cryptography objects - these need direct reference, not copying
crypto_attrs = [
"certificate",
"private_key",
"certificate_chain",
"ca_certs",
]
for attr in crypto_attrs:
if hasattr(base_config, attr):
value = getattr(base_config, attr)
if value is not None:
setattr(server_config, attr, value)
# Apply security manager configuration if available
if security_manager:
try:
server_tls_config = security_manager.create_server_config()
# Override with security manager's TLS configuration
if server_tls_config.certificate:
server_config.certificate = server_tls_config.certificate
if server_tls_config.private_key:
server_config.private_key = server_tls_config.private_key
if server_tls_config.certificate_chain:
server_config.certificate_chain = (
server_tls_config.certificate_chain
)
if server_tls_config.alpn_protocols:
server_config.alpn_protocols = server_tls_config.alpn_protocols
server_tls_config.request_client_certificate = True
if getattr(server_tls_config, "request_client_certificate", False):
server_config._libp2p_request_client_cert = True # type: ignore
else:
logger.error(
"🔧 Failed to set request_client_certificate in server config"
)
except Exception as e:
logger.warning(f"Failed to apply security manager config: {e}")
# Set transport-specific defaults if provided
if transport_config:
if server_config.idle_timeout == 0:
server_config.idle_timeout = getattr(
transport_config, "idle_timeout", 30.0
)
if server_config.max_datagram_frame_size is None:
server_config.max_datagram_frame_size = getattr(
transport_config, "max_datagram_size", 1200
)
# Ensure we have ALPN protocols
if not server_config.alpn_protocols:
server_config.alpn_protocols = ["libp2p"]
logger.debug("Successfully created server config without deepcopy")
return server_config
except Exception as e:
logger.error(f"Failed to create server config: {e}")
raise
def create_client_config_from_base(
base_config: QuicConfiguration,
security_manager: QUICTLSConfigManager | None = None,
transport_config: QUICTransportConfig | None = None,
) -> QuicConfiguration:
"""
Create a client configuration without using deepcopy.
"""
try:
# Create new client configuration from scratch
client_config = QuicConfiguration(is_client=True)
client_config.verify_mode = ssl.CERT_NONE
# Copy basic configuration attributes
copyable_attrs = [
"alpn_protocols",
"verify_mode",
"max_datagram_frame_size",
"idle_timeout",
"max_concurrent_streams",
"supported_versions",
"max_data",
"max_stream_data",
"quantum_readiness_test",
]
for attr in copyable_attrs:
if hasattr(base_config, attr):
value = getattr(base_config, attr)
if value is not None:
setattr(client_config, attr, value)
# Handle cryptography objects - these need direct reference, not copying
crypto_attrs = [
"certificate",
"private_key",
"certificate_chain",
"ca_certs",
]
for attr in crypto_attrs:
if hasattr(base_config, attr):
value = getattr(base_config, attr)
if value is not None:
setattr(client_config, attr, value)
# Apply security manager configuration if available
if security_manager:
try:
client_tls_config = security_manager.create_client_config()
# Override with security manager's TLS configuration
if client_tls_config.certificate:
client_config.certificate = client_tls_config.certificate
if client_tls_config.private_key:
client_config.private_key = client_tls_config.private_key
if client_tls_config.certificate_chain:
client_config.certificate_chain = (
client_tls_config.certificate_chain
)
if client_tls_config.alpn_protocols:
client_config.alpn_protocols = client_tls_config.alpn_protocols
except Exception as e:
logger.warning(f"Failed to apply security manager config: {e}")
# Ensure we have ALPN protocols
if not client_config.alpn_protocols:
client_config.alpn_protocols = ["libp2p"]
logger.debug("Successfully created client config without deepcopy")
return client_config
except Exception as e:
logger.error(f"Failed to create client config: {e}")
raise

View File

@ -14,6 +14,9 @@ from libp2p.protocol_muxer.exceptions import (
MultiselectClientError,
MultiselectError,
)
from libp2p.protocol_muxer.multiselect import (
DEFAULT_NEGOTIATE_TIMEOUT,
)
from libp2p.security.exceptions import (
HandshakeFailure,
)
@ -37,9 +40,12 @@ class TransportUpgrader:
self,
secure_transports_by_protocol: TSecurityOptions,
muxer_transports_by_protocol: TMuxerOptions,
negotiate_timeout: int = DEFAULT_NEGOTIATE_TIMEOUT,
):
self.security_multistream = SecurityMultistream(secure_transports_by_protocol)
self.muxer_multistream = MuxerMultistream(muxer_transports_by_protocol)
self.muxer_multistream = MuxerMultistream(
muxer_transports_by_protocol, negotiate_timeout
)
async def upgrade_security(
self,