mirror of
https://github.com/varun-r-mallya/py-libp2p.git
synced 2025-12-31 20:36:24 +00:00
Feat: Adding Yamux as default multiplexer, keeping Mplex as fallback (#538)
* feat: Replace mplex with yamux as default multiplexer in py-libp2p * Retain Mplex alongside Yamux in new_swarm with messaging that Yamux is preferred * moved !BBHII to a constant YAMUX_HEADER_FORMAT at the top of yamux.py with a comment explaining its structure * renamed the news fragment to 534.feature.rst and updated the description * renamed the news fragment to 534.feature.rst and updated the description * added a docstring to clarify that Yamux does not support deadlines natively * Remove the __main__ block entirely from test_yamux.py * Replaced the print statements in test_yamux.py with logging.debug * Added a comment linking to the spec for clarity * Raise NotImplementedError in YamuxStream.set_deadline per review * Add muxed_conn to YamuxStream and test deadline NotImplementedError * Fix Yamux implementation to meet libp2p spec * Fix None handling in YamuxStream.read and Yamux.read_stream * Fix test_connected_peers.py to correctly handle peer connections * fix: Ensure StreamReset is raised on read after local reset in yamux * fix: Map MuxedStreamError to StreamClosed in NetStream.write for Yamux * fix: Raise MuxedStreamReset in Yamux.read_stream for closed streams * fix: Correct Yamux stream read behavior for NetStream tests Fixed est_net_stream_read_after_remote_closed by updating NetStream.read to raise StreamEOF when the stream is remotely closed and no data is available, aligning with test expectations and Fixed est_net_stream_read_until_eof by modifying YamuxStream.read to block until the stream is closed ( ecv_closed=True) for =-1 reads, ensuring data is only returned after remote closure. * fix: Correct Yamux stream read behavior for NetStream tests Fixed est_net_stream_read_after_remote_closed by updating NetStream.read to raise StreamEOF when the stream is remotely closed and no data is available, aligning with test expectations and Fixed est_net_stream_read_until_eof by modifying YamuxStream.read to block until the stream is closed ( ecv_closed=True) for =-1 reads, ensuring data is only returned after remote closure. * fix: raise StreamEOF when reading from closed stream with empty buffer * fix: prioritize returning buffered data even after stream reset * fix: prioritize returning buffered data even after stream reset * fix: Ensure test_net_stream_read_after_remote_closed_and_reset passes in full suite * fix: Add __init__.py to yamux module to fix documentation build * fix: Add __init__.py to yamux module to fix documentation build * fix: Add libp2p.stream_muxer.yamux to libp2p.stream_muxer.rst toctree * fix: Correct title underline length in libp2p.stream_muxer.yamux.rst * fix: Add a = so that is matches the libp2p.stream\_muxer.yamux length * fix(tests): Resolve race condition in network notification test * fix: fixing failing tests and examples with yamux and noise * refactor: remove debug logging and improve x25519 tests * fix: Add functionality for users to choose between Yamux and Mplex * fix: increased trio sleep to 0.1 sec for slow environment * feat: Add test for switching between Yamux and mplex * refactor: move host fixtures to interop tests * chore: Update __init__.py removing unused import removed unused ```python import os import logging ``` * lint: fix import order * fix: Resolve conftest.py conflict by removing trio test support * fix: Resolve test skipping by keeping trio test support * Fix: add a newline at end of the file --------- Co-authored-by: acul71 <luca.pisani@birdo.net> Co-authored-by: acul71 <34693171+acul71@users.noreply.github.com>
This commit is contained in:
@ -242,45 +242,50 @@ class Pubsub(Service, IPubsub):
|
||||
"""
|
||||
peer_id = stream.muxed_conn.peer_id
|
||||
|
||||
while self.manager.is_running:
|
||||
incoming: bytes = await read_varint_prefixed_bytes(stream)
|
||||
rpc_incoming: rpc_pb2.RPC = rpc_pb2.RPC()
|
||||
rpc_incoming.ParseFromString(incoming)
|
||||
if rpc_incoming.publish:
|
||||
# deal with RPC.publish
|
||||
for msg in rpc_incoming.publish:
|
||||
if not self._is_subscribed_to_msg(msg):
|
||||
continue
|
||||
logger.debug(
|
||||
"received `publish` message %s from peer %s", msg, peer_id
|
||||
)
|
||||
self.manager.run_task(self.push_msg, peer_id, msg)
|
||||
try:
|
||||
while self.manager.is_running:
|
||||
incoming: bytes = await read_varint_prefixed_bytes(stream)
|
||||
rpc_incoming: rpc_pb2.RPC = rpc_pb2.RPC()
|
||||
rpc_incoming.ParseFromString(incoming)
|
||||
if rpc_incoming.publish:
|
||||
# deal with RPC.publish
|
||||
for msg in rpc_incoming.publish:
|
||||
if not self._is_subscribed_to_msg(msg):
|
||||
continue
|
||||
logger.debug(
|
||||
"received `publish` message %s from peer %s", msg, peer_id
|
||||
)
|
||||
self.manager.run_task(self.push_msg, peer_id, msg)
|
||||
|
||||
if rpc_incoming.subscriptions:
|
||||
# deal with RPC.subscriptions
|
||||
# We don't need to relay the subscription to our
|
||||
# peers because a given node only needs its peers
|
||||
# to know that it is subscribed to the topic (doesn't
|
||||
# need everyone to know)
|
||||
for message in rpc_incoming.subscriptions:
|
||||
if rpc_incoming.subscriptions:
|
||||
# deal with RPC.subscriptions
|
||||
# We don't need to relay the subscription to our
|
||||
# peers because a given node only needs its peers
|
||||
# to know that it is subscribed to the topic (doesn't
|
||||
# need everyone to know)
|
||||
for message in rpc_incoming.subscriptions:
|
||||
logger.debug(
|
||||
"received `subscriptions` message %s from peer %s",
|
||||
message,
|
||||
peer_id,
|
||||
)
|
||||
self.handle_subscription(peer_id, message)
|
||||
|
||||
# NOTE: Check if `rpc_incoming.control` is set through `HasField`.
|
||||
# This is necessary because `control` is an optional field in pb2.
|
||||
# Ref: https://developers.google.com/protocol-buffers/docs/reference/python-generated#singular-fields-proto2 # noqa: E501
|
||||
if rpc_incoming.HasField("control"):
|
||||
# Pass rpc to router so router could perform custom logic
|
||||
logger.debug(
|
||||
"received `subscriptions` message %s from peer %s",
|
||||
message,
|
||||
"received `control` message %s from peer %s",
|
||||
rpc_incoming.control,
|
||||
peer_id,
|
||||
)
|
||||
self.handle_subscription(peer_id, message)
|
||||
|
||||
# NOTE: Check if `rpc_incoming.control` is set through `HasField`.
|
||||
# This is necessary because `control` is an optional field in pb2.
|
||||
# Ref: https://developers.google.com/protocol-buffers/docs/reference/python-generated#singular-fields-proto2 # noqa: E501
|
||||
if rpc_incoming.HasField("control"):
|
||||
# Pass rpc to router so router could perform custom logic
|
||||
logger.debug(
|
||||
"received `control` message %s from peer %s",
|
||||
rpc_incoming.control,
|
||||
peer_id,
|
||||
)
|
||||
await self.router.handle_rpc(rpc_incoming, peer_id)
|
||||
await self.router.handle_rpc(rpc_incoming, peer_id)
|
||||
except StreamEOF:
|
||||
logger.debug(
|
||||
f"Stream closed for peer {peer_id}, exiting read loop cleanly."
|
||||
)
|
||||
|
||||
def set_topic_validator(
|
||||
self, topic: str, validator: ValidatorFn, is_async_validator: bool
|
||||
|
||||
Reference in New Issue
Block a user