mirror of
https://github.com/varun-r-mallya/py-libp2p.git
synced 2025-12-31 20:36:24 +00:00
* feat: Replace mplex with yamux as default multiplexer in py-libp2p * Retain Mplex alongside Yamux in new_swarm with messaging that Yamux is preferred * moved !BBHII to a constant YAMUX_HEADER_FORMAT at the top of yamux.py with a comment explaining its structure * renamed the news fragment to 534.feature.rst and updated the description * renamed the news fragment to 534.feature.rst and updated the description * added a docstring to clarify that Yamux does not support deadlines natively * Remove the __main__ block entirely from test_yamux.py * Replaced the print statements in test_yamux.py with logging.debug * Added a comment linking to the spec for clarity * Raise NotImplementedError in YamuxStream.set_deadline per review * Add muxed_conn to YamuxStream and test deadline NotImplementedError * Fix Yamux implementation to meet libp2p spec * Fix None handling in YamuxStream.read and Yamux.read_stream * Fix test_connected_peers.py to correctly handle peer connections * fix: Ensure StreamReset is raised on read after local reset in yamux * fix: Map MuxedStreamError to StreamClosed in NetStream.write for Yamux * fix: Raise MuxedStreamReset in Yamux.read_stream for closed streams * fix: Correct Yamux stream read behavior for NetStream tests Fixed est_net_stream_read_after_remote_closed by updating NetStream.read to raise StreamEOF when the stream is remotely closed and no data is available, aligning with test expectations and Fixed est_net_stream_read_until_eof by modifying YamuxStream.read to block until the stream is closed ( ecv_closed=True) for =-1 reads, ensuring data is only returned after remote closure. * fix: Correct Yamux stream read behavior for NetStream tests Fixed est_net_stream_read_after_remote_closed by updating NetStream.read to raise StreamEOF when the stream is remotely closed and no data is available, aligning with test expectations and Fixed est_net_stream_read_until_eof by modifying YamuxStream.read to block until the stream is closed ( ecv_closed=True) for =-1 reads, ensuring data is only returned after remote closure. * fix: raise StreamEOF when reading from closed stream with empty buffer * fix: prioritize returning buffered data even after stream reset * fix: prioritize returning buffered data even after stream reset * fix: Ensure test_net_stream_read_after_remote_closed_and_reset passes in full suite * fix: Add __init__.py to yamux module to fix documentation build * fix: Add __init__.py to yamux module to fix documentation build * fix: Add libp2p.stream_muxer.yamux to libp2p.stream_muxer.rst toctree * fix: Correct title underline length in libp2p.stream_muxer.yamux.rst * fix: Add a = so that is matches the libp2p.stream\_muxer.yamux length * fix(tests): Resolve race condition in network notification test * fix: fixing failing tests and examples with yamux and noise * refactor: remove debug logging and improve x25519 tests * fix: Add functionality for users to choose between Yamux and Mplex * fix: increased trio sleep to 0.1 sec for slow environment * feat: Add test for switching between Yamux and mplex * refactor: move host fixtures to interop tests * chore: Update __init__.py removing unused import removed unused ```python import os import logging ``` * lint: fix import order * fix: Resolve conftest.py conflict by removing trio test support * fix: Resolve test skipping by keeping trio test support * Fix: add a newline at end of the file --------- Co-authored-by: acul71 <luca.pisani@birdo.net> Co-authored-by: acul71 <34693171+acul71@users.noreply.github.com>
155 lines
4.1 KiB
Python
155 lines
4.1 KiB
Python
from collections.abc import (
|
|
Awaitable,
|
|
)
|
|
import logging
|
|
from typing import (
|
|
Callable,
|
|
)
|
|
|
|
import trio
|
|
|
|
from libp2p.abc import (
|
|
IHost,
|
|
INetStream,
|
|
)
|
|
from libp2p.network.stream.exceptions import (
|
|
StreamError,
|
|
)
|
|
from libp2p.network.swarm import (
|
|
Swarm,
|
|
)
|
|
from libp2p.peer.peerinfo import (
|
|
info_from_p2p_addr,
|
|
)
|
|
|
|
from .constants import (
|
|
MAX_READ_LEN,
|
|
)
|
|
|
|
|
|
async def connect_swarm(swarm_0: Swarm, swarm_1: Swarm) -> None:
|
|
peer_id = swarm_1.get_peer_id()
|
|
addrs = tuple(
|
|
addr
|
|
for transport in swarm_1.listeners.values()
|
|
for addr in transport.get_addrs()
|
|
)
|
|
swarm_0.peerstore.add_addrs(peer_id, addrs, 10000)
|
|
|
|
# Add retry logic for more robust connection
|
|
max_retries = 3
|
|
retry_delay = 0.2
|
|
last_error = None
|
|
|
|
for attempt in range(max_retries):
|
|
try:
|
|
await swarm_0.dial_peer(peer_id)
|
|
|
|
# Verify connection is established in both directions
|
|
if (
|
|
swarm_0.get_peer_id() in swarm_1.connections
|
|
and swarm_1.get_peer_id() in swarm_0.connections
|
|
):
|
|
return
|
|
|
|
# Connection partially established, wait a bit for it to complete
|
|
await trio.sleep(0.1)
|
|
|
|
if (
|
|
swarm_0.get_peer_id() in swarm_1.connections
|
|
and swarm_1.get_peer_id() in swarm_0.connections
|
|
):
|
|
return
|
|
|
|
logging.debug(
|
|
"Swarm connection verification failed on attempt"
|
|
+ f" {attempt+1}, retrying..."
|
|
)
|
|
|
|
except Exception as e:
|
|
last_error = e
|
|
logging.debug(f"Swarm connection attempt {attempt+1} failed: {e}")
|
|
await trio.sleep(retry_delay)
|
|
|
|
# If we got here, all retries failed
|
|
if last_error:
|
|
raise RuntimeError(
|
|
f"Failed to connect swarms after {max_retries} attempts"
|
|
) from last_error
|
|
else:
|
|
err_msg = (
|
|
"Failed to establish bidirectional swarm connection"
|
|
+ f" after {max_retries} attempts"
|
|
)
|
|
raise RuntimeError(err_msg)
|
|
|
|
|
|
async def connect(node1: IHost, node2: IHost) -> None:
|
|
"""Connect node1 to node2."""
|
|
addr = node2.get_addrs()[0]
|
|
info = info_from_p2p_addr(addr)
|
|
|
|
# Add retry logic for more robust connection
|
|
max_retries = 3
|
|
retry_delay = 0.2
|
|
last_error = None
|
|
|
|
for attempt in range(max_retries):
|
|
try:
|
|
await node1.connect(info)
|
|
|
|
# Verify connection is established in both directions
|
|
if (
|
|
node2.get_id() in node1.get_network().connections
|
|
and node1.get_id() in node2.get_network().connections
|
|
):
|
|
return
|
|
|
|
# Connection partially established, wait a bit for it to complete
|
|
await trio.sleep(0.1)
|
|
|
|
if (
|
|
node2.get_id() in node1.get_network().connections
|
|
and node1.get_id() in node2.get_network().connections
|
|
):
|
|
return
|
|
|
|
logging.debug(
|
|
f"Connection verification failed on attempt {attempt+1}, retrying..."
|
|
)
|
|
|
|
except Exception as e:
|
|
last_error = e
|
|
logging.debug(f"Connection attempt {attempt+1} failed: {e}")
|
|
await trio.sleep(retry_delay)
|
|
|
|
# If we got here, all retries failed
|
|
if last_error:
|
|
raise RuntimeError(
|
|
f"Failed to connect after {max_retries} attempts"
|
|
) from last_error
|
|
else:
|
|
err_msg = (
|
|
f"Failed to establish bidirectional connection after {max_retries} attempts"
|
|
)
|
|
raise RuntimeError(err_msg)
|
|
|
|
|
|
def create_echo_stream_handler(
|
|
ack_prefix: str,
|
|
) -> Callable[[INetStream], Awaitable[None]]:
|
|
async def echo_stream_handler(stream: INetStream) -> None:
|
|
while True:
|
|
try:
|
|
read_string = (await stream.read(MAX_READ_LEN)).decode()
|
|
except StreamError:
|
|
break
|
|
|
|
resp = ack_prefix + read_string
|
|
try:
|
|
await stream.write(resp.encode())
|
|
except StreamError:
|
|
break
|
|
|
|
return echo_stream_handler
|