Feat: Adding Yamux as default multiplexer, keeping Mplex as fallback (#538)

* feat: Replace mplex with yamux as default multiplexer in py-libp2p

* Retain Mplex alongside Yamux in new_swarm with messaging that Yamux is preferred

* moved !BBHII to a constant YAMUX_HEADER_FORMAT at the top of yamux.py with a comment explaining its structure

* renamed the news fragment to 534.feature.rst and updated the description

* renamed the news fragment to 534.feature.rst and updated the description

* added a docstring to clarify that Yamux does not support deadlines natively

* Remove the __main__ block entirely from test_yamux.py

* Replaced the print statements in test_yamux.py with logging.debug

* Added a comment linking to the spec for clarity

* Raise NotImplementedError in YamuxStream.set_deadline per review

* Add muxed_conn to YamuxStream and test deadline NotImplementedError

* Fix Yamux implementation to meet libp2p spec

* Fix None handling in YamuxStream.read and Yamux.read_stream

* Fix test_connected_peers.py to correctly handle peer connections

* fix: Ensure StreamReset is raised on read after local reset in yamux

* fix: Map MuxedStreamError to StreamClosed in NetStream.write for Yamux

* fix: Raise MuxedStreamReset in Yamux.read_stream for closed streams

* fix: Correct Yamux stream read behavior for NetStream tests

Fixed 	est_net_stream_read_after_remote_closed by updating NetStream.read to raise StreamEOF when the stream is remotely closed and no data is available, aligning with test expectations and Fixed 	est_net_stream_read_until_eof by modifying YamuxStream.read to block until the stream is closed (
ecv_closed=True) for
=-1 reads, ensuring data is only returned after remote closure.

* fix: Correct Yamux stream read behavior for NetStream tests

Fixed 	est_net_stream_read_after_remote_closed by updating NetStream.read to raise StreamEOF when the stream is remotely closed and no data is available, aligning with test expectations and Fixed 	est_net_stream_read_until_eof by modifying YamuxStream.read to block until the stream is closed (
ecv_closed=True) for
=-1 reads, ensuring data is only returned after remote closure.

* fix: raise StreamEOF when reading from closed stream with empty buffer

* fix: prioritize returning buffered data even after stream reset

* fix: prioritize returning buffered data even after stream reset

* fix: Ensure test_net_stream_read_after_remote_closed_and_reset passes in full suite

* fix: Add __init__.py to yamux module to fix documentation build

* fix: Add __init__.py to yamux module to fix documentation build

* fix: Add libp2p.stream_muxer.yamux to libp2p.stream_muxer.rst toctree

* fix: Correct title underline length in libp2p.stream_muxer.yamux.rst

* fix: Add a = so that is matches the libp2p.stream\_muxer.yamux length

* fix(tests): Resolve race condition in network notification test

* fix: fixing failing tests and examples with yamux and noise

* refactor: remove debug logging and improve x25519 tests

* fix: Add functionality for users to choose between Yamux and Mplex

* fix: increased trio sleep to 0.1 sec for slow environment

* feat: Add test for switching between Yamux and mplex

* refactor: move host fixtures to interop tests

* chore: Update __init__.py removing unused import

removed unused
```python
import os
import logging
```

* lint: fix import order

* fix: Resolve conftest.py conflict by removing trio test support

* fix: Resolve test skipping by keeping trio test support

* Fix: add a newline at end of the file

---------

Co-authored-by: acul71 <luca.pisani@birdo.net>
Co-authored-by: acul71 <34693171+acul71@users.noreply.github.com>
This commit is contained in:
Paschal
2025-05-22 21:01:51 +01:00
committed by GitHub
parent 18c6f529c6
commit 4b1860766d
29 changed files with 2215 additions and 101 deletions

View File

@ -1,10 +1,13 @@
from collections.abc import (
Awaitable,
)
import logging
from typing import (
Callable,
)
import trio
from libp2p.abc import (
IHost,
INetStream,
@ -32,16 +35,104 @@ async def connect_swarm(swarm_0: Swarm, swarm_1: Swarm) -> None:
for addr in transport.get_addrs()
)
swarm_0.peerstore.add_addrs(peer_id, addrs, 10000)
await swarm_0.dial_peer(peer_id)
assert swarm_0.get_peer_id() in swarm_1.connections
assert swarm_1.get_peer_id() in swarm_0.connections
# Add retry logic for more robust connection
max_retries = 3
retry_delay = 0.2
last_error = None
for attempt in range(max_retries):
try:
await swarm_0.dial_peer(peer_id)
# Verify connection is established in both directions
if (
swarm_0.get_peer_id() in swarm_1.connections
and swarm_1.get_peer_id() in swarm_0.connections
):
return
# Connection partially established, wait a bit for it to complete
await trio.sleep(0.1)
if (
swarm_0.get_peer_id() in swarm_1.connections
and swarm_1.get_peer_id() in swarm_0.connections
):
return
logging.debug(
"Swarm connection verification failed on attempt"
+ f" {attempt+1}, retrying..."
)
except Exception as e:
last_error = e
logging.debug(f"Swarm connection attempt {attempt+1} failed: {e}")
await trio.sleep(retry_delay)
# If we got here, all retries failed
if last_error:
raise RuntimeError(
f"Failed to connect swarms after {max_retries} attempts"
) from last_error
else:
err_msg = (
"Failed to establish bidirectional swarm connection"
+ f" after {max_retries} attempts"
)
raise RuntimeError(err_msg)
async def connect(node1: IHost, node2: IHost) -> None:
"""Connect node1 to node2."""
addr = node2.get_addrs()[0]
info = info_from_p2p_addr(addr)
await node1.connect(info)
# Add retry logic for more robust connection
max_retries = 3
retry_delay = 0.2
last_error = None
for attempt in range(max_retries):
try:
await node1.connect(info)
# Verify connection is established in both directions
if (
node2.get_id() in node1.get_network().connections
and node1.get_id() in node2.get_network().connections
):
return
# Connection partially established, wait a bit for it to complete
await trio.sleep(0.1)
if (
node2.get_id() in node1.get_network().connections
and node1.get_id() in node2.get_network().connections
):
return
logging.debug(
f"Connection verification failed on attempt {attempt+1}, retrying..."
)
except Exception as e:
last_error = e
logging.debug(f"Connection attempt {attempt+1} failed: {e}")
await trio.sleep(retry_delay)
# If we got here, all retries failed
if last_error:
raise RuntimeError(
f"Failed to connect after {max_retries} attempts"
) from last_error
else:
err_msg = (
f"Failed to establish bidirectional connection after {max_retries} attempts"
)
raise RuntimeError(err_msg)
def create_echo_stream_handler(