10 Commits

Author SHA1 Message Date
5983c08379 feat: Add py-libp2p to rust-libp2p interoperability tests 2025-06-09 01:20:43 +01:00
d020bbc066 Add time_since_last_publish (#642)
Added `time_since_last_publish` field to gossipsub. Took reference from
https://github.com/libp2p/go-libp2p-pubsub/blob/master/gossipsub.go#L1224

Issue https://github.com/libp2p/py-libp2p/issues/636

## How was it fixed?
whenever someone publishes message to a topic or set of topics,
`time_since_last_publish` gets updated and whenever we clear fanout
peers or time exceeds ttl, we clear `time_since_last_publish` from dict.

### To-Do

Creating draft PR for now. Tests and type-binding is left for this
issue.
#### Cute Animal Picture

![put a cute animal picture link inside the
parentheses](https://i.etsystatic.com/27171676/r/il/eedb08/5303109239/il_570xN.5303109239_4o61.jpg)
2025-06-09 00:53:36 +05:30
00f10dbec3 Merge branch 'main' into add-last-publish 2025-06-08 19:19:30 +05:30
d75886b180 renamed newsfragment file causing docs ci failure 2025-06-06 17:55:40 +05:30
5ca6f26933 feat: Add blacklisting of peers (#651)
* init

* remove blacklist validation after hello packet

* add docs and newsfragment
2025-06-05 09:10:04 -06:00
a3c9ac61e6 Improve performance of read from daemon test (#646)
Signed-off-by: varun-r-mallya <varunrmallya@gmail.com>
2025-06-05 07:25:59 -06:00
d4785b9e26 Add newsfragments to the PR
Signed-off-by: sukhman <sukhmansinghsaluja@gmail.com>
2025-06-05 14:00:23 +05:30
cef217358f fixed fanout_heartbeat bug and gossipsub join test 2025-06-05 13:39:07 +05:30
338672214c Add test for time_since_last_publish
Signed-off-by: sukhman <sukhmansinghsaluja@gmail.com>
2025-06-04 14:15:07 +05:30
c2046e6aa4 Add time_since_last_publish 2025-06-01 01:47:47 +05:30
14 changed files with 1339 additions and 22 deletions

View File

@ -10,6 +10,7 @@ from collections.abc import (
)
import logging
import random
import time
from typing import (
Any,
DefaultDict,
@ -80,8 +81,7 @@ class GossipSub(IPubsubRouter, Service):
# The protocol peer supports
peer_protocol: dict[ID, TProtocol]
# TODO: Add `time_since_last_publish`
# Create topic --> time since last publish map.
time_since_last_publish: dict[str, int]
mcache: MessageCache
@ -138,6 +138,7 @@ class GossipSub(IPubsubRouter, Service):
self.direct_peers[direct_peer.peer_id] = direct_peer
self.direct_connect_interval = direct_connect_interval
self.direct_connect_initial_delay = direct_connect_initial_delay
self.time_since_last_publish = {}
async def run(self) -> None:
if self.pubsub is None:
@ -253,6 +254,8 @@ class GossipSub(IPubsubRouter, Service):
except StreamClosed:
logger.debug("Fail to publish message to %s: stream closed", peer_id)
self.pubsub._handle_dead_peer(peer_id)
for topic in pubsub_msg.topicIDs:
self.time_since_last_publish[topic] = int(time.time())
def _get_peers_to_send(
self, topic_ids: Iterable[str], msg_forwarder: ID, origin: ID
@ -342,6 +345,7 @@ class GossipSub(IPubsubRouter, Service):
await self.emit_graft(topic, peer)
self.fanout.pop(topic, None)
self.time_since_last_publish.pop(topic, None)
async def leave(self, topic: str) -> None:
# Note: the comments here are the near-exact algorithm description from the spec
@ -514,10 +518,12 @@ class GossipSub(IPubsubRouter, Service):
def fanout_heartbeat(self) -> None:
# Note: the comments here are the exact pseudocode from the spec
for topic in self.fanout:
# Delete topic entry if it's not in `pubsub.peer_topics`
# or (TODO) if it's time-since-last-published > ttl
if topic not in self.pubsub.peer_topics:
for topic in list(self.fanout):
if (
topic not in self.pubsub.peer_topics
and self.time_since_last_publish.get(topic, 0) + self.time_to_live
< int(time.time())
):
# Remove topic from fanout
del self.fanout[topic]
else:

View File

@ -122,6 +122,9 @@ class Pubsub(Service, IPubsub):
strict_signing: bool
sign_key: PrivateKey
# Set of blacklisted peer IDs
blacklisted_peers: set[ID]
event_handle_peer_queue_started: trio.Event
event_handle_dead_peer_queue_started: trio.Event
@ -201,6 +204,9 @@ class Pubsub(Service, IPubsub):
self.counter = int(time.time())
# Set of blacklisted peer IDs
self.blacklisted_peers = set()
self.event_handle_peer_queue_started = trio.Event()
self.event_handle_dead_peer_queue_started = trio.Event()
@ -320,6 +326,82 @@ class Pubsub(Service, IPubsub):
if topic in self.topic_validators
)
def add_to_blacklist(self, peer_id: ID) -> None:
"""
Add a peer to the blacklist.
When a peer is blacklisted:
- Any existing connection to that peer is immediately closed and removed
- The peer is removed from all topic subscription mappings
- Future connection attempts from this peer will be rejected
- Messages forwarded by or originating from this peer will be dropped
- The peer will not be able to participate in pubsub communication
:param peer_id: the peer ID to blacklist
"""
self.blacklisted_peers.add(peer_id)
logger.debug("Added peer %s to blacklist", peer_id)
self.manager.run_task(self._teardown_if_connected, peer_id)
async def _teardown_if_connected(self, peer_id: ID) -> None:
"""Close their stream and remove them if connected"""
stream = self.peers.get(peer_id)
if stream is not None:
try:
await stream.reset()
except Exception:
pass
del self.peers[peer_id]
# Also remove from any subscription maps:
for _topic, peerset in self.peer_topics.items():
if peer_id in peerset:
peerset.discard(peer_id)
def remove_from_blacklist(self, peer_id: ID) -> None:
"""
Remove a peer from the blacklist.
Once removed from the blacklist:
- The peer can establish new connections to this node
- Messages from this peer will be processed normally
- The peer can participate in topic subscriptions and message forwarding
:param peer_id: the peer ID to remove from blacklist
"""
self.blacklisted_peers.discard(peer_id)
logger.debug("Removed peer %s from blacklist", peer_id)
def is_peer_blacklisted(self, peer_id: ID) -> bool:
"""
Check if a peer is blacklisted.
:param peer_id: the peer ID to check
:return: True if peer is blacklisted, False otherwise
"""
return peer_id in self.blacklisted_peers
def clear_blacklist(self) -> None:
"""
Clear all peers from the blacklist.
This removes all blacklist restrictions, allowing previously blacklisted
peers to:
- Establish new connections
- Send and forward messages
- Participate in topic subscriptions
"""
self.blacklisted_peers.clear()
logger.debug("Cleared all peers from blacklist")
def get_blacklisted_peers(self) -> set[ID]:
"""
Get a copy of the current blacklisted peers.
Returns a snapshot of all currently blacklisted peer IDs. These peers
are completely isolated from pubsub communication - their connections
are rejected and their messages are dropped.
:return: a set containing all blacklisted peer IDs
"""
return self.blacklisted_peers.copy()
async def stream_handler(self, stream: INetStream) -> None:
"""
Stream handler for pubsub. Gets invoked whenever a new stream is
@ -346,6 +428,10 @@ class Pubsub(Service, IPubsub):
await self.event_handle_dead_peer_queue_started.wait()
async def _handle_new_peer(self, peer_id: ID) -> None:
if self.is_peer_blacklisted(peer_id):
logger.debug("Rejecting blacklisted peer %s", peer_id)
return
try:
stream: INetStream = await self.host.new_stream(peer_id, self.protocols)
except SwarmException as error:
@ -359,7 +445,6 @@ class Pubsub(Service, IPubsub):
except StreamClosed:
logger.debug("Fail to add new peer %s: stream closed", peer_id)
return
# TODO: Check if the peer in black list.
try:
self.router.add_peer(peer_id, stream.get_protocol())
except Exception as error:
@ -609,9 +694,20 @@ class Pubsub(Service, IPubsub):
"""
logger.debug("attempting to publish message %s", msg)
# TODO: Check if the `source` is in the blacklist. If yes, reject.
# Check if the message forwarder (source) is in the blacklist. If yes, reject.
if self.is_peer_blacklisted(msg_forwarder):
logger.debug(
"Rejecting message from blacklisted source peer %s", msg_forwarder
)
return
# TODO: Check if the `from` is in the blacklist. If yes, reject.
# Check if the message originator (from) is in the blacklist. If yes, reject.
msg_from_peer = ID(msg.from_id)
if self.is_peer_blacklisted(msg_from_peer):
logger.debug(
"Rejecting message from blacklisted originator peer %s", msg_from_peer
)
return
# If the message is processed before, return(i.e., don't further process the message) # noqa: E501
if self._is_msg_seen(msg):

View File

@ -0,0 +1 @@
feat: add method to compute time since last message published by a peer and remove fanout peers based on ttl.

View File

@ -0,0 +1 @@
implement blacklist management for `pubsub.Pubsub` with methods to get, add, remove, check, and clear blacklisted peer IDs.

View File

@ -22,13 +22,14 @@ from tests.utils.pubsub.utils import (
@pytest.mark.trio
async def test_join():
async with PubsubFactory.create_batch_with_gossipsub(
4, degree=4, degree_low=3, degree_high=5
4, degree=4, degree_low=3, degree_high=5, heartbeat_interval=1, time_to_live=1
) as pubsubs_gsub:
gossipsubs = [pubsub.router for pubsub in pubsubs_gsub]
hosts = [pubsub.host for pubsub in pubsubs_gsub]
hosts_indices = list(range(len(pubsubs_gsub)))
topic = "test_join"
to_drop_topic = "test_drop_topic"
central_node_index = 0
# Remove index of central host from the indices
hosts_indices.remove(central_node_index)
@ -42,23 +43,31 @@ async def test_join():
# Connect central host to all other hosts
await one_to_all_connect(hosts, central_node_index)
# Wait 2 seconds for heartbeat to allow mesh to connect
await trio.sleep(2)
# Wait 1 seconds for heartbeat to allow mesh to connect
await trio.sleep(1)
# Central node publish to the topic so that this topic
# is added to central node's fanout
# publish from the randomly chosen host
await pubsubs_gsub[central_node_index].publish(topic, b"data")
await pubsubs_gsub[central_node_index].publish(to_drop_topic, b"data")
await trio.sleep(0.5)
# Check that the gossipsub of central node has fanout for the topics
assert topic, to_drop_topic in gossipsubs[central_node_index].fanout
# Check that the gossipsub of central node does not have a mesh for the topics
assert topic, to_drop_topic not in gossipsubs[central_node_index].mesh
# Check that the gossipsub of central node
# has a time_since_last_publish for the topics
assert topic in gossipsubs[central_node_index].time_since_last_publish
assert to_drop_topic in gossipsubs[central_node_index].time_since_last_publish
# Check that the gossipsub of central node has fanout for the topic
assert topic in gossipsubs[central_node_index].fanout
# Check that the gossipsub of central node does not have a mesh for the topic
assert topic not in gossipsubs[central_node_index].mesh
await trio.sleep(1)
# Check that after ttl the to_drop_topic is no more in fanout of central node
assert to_drop_topic not in gossipsubs[central_node_index].fanout
# Central node subscribes the topic
await pubsubs_gsub[central_node_index].subscribe(topic)
await trio.sleep(2)
await trio.sleep(1)
# Check that the gossipsub of central node no longer has fanout for the topic
assert topic not in gossipsubs[central_node_index].fanout

View File

@ -702,3 +702,369 @@ async def test_strict_signing_failed_validation(monkeypatch):
await pubsubs_fsub[0].push_msg(pubsubs_fsub[0].my_id, msg)
await trio.sleep(0.01)
assert event.is_set()
@pytest.mark.trio
async def test_blacklist_basic_operations():
"""Test basic blacklist operations: add, remove, check, clear."""
async with PubsubFactory.create_batch_with_floodsub(1) as pubsubs_fsub:
pubsub = pubsubs_fsub[0]
# Create test peer IDs
peer1 = IDFactory()
peer2 = IDFactory()
peer3 = IDFactory()
# Initially no peers should be blacklisted
assert len(pubsub.get_blacklisted_peers()) == 0
assert not pubsub.is_peer_blacklisted(peer1)
assert not pubsub.is_peer_blacklisted(peer2)
assert not pubsub.is_peer_blacklisted(peer3)
# Add peers to blacklist
pubsub.add_to_blacklist(peer1)
pubsub.add_to_blacklist(peer2)
# Check blacklist state
assert len(pubsub.get_blacklisted_peers()) == 2
assert pubsub.is_peer_blacklisted(peer1)
assert pubsub.is_peer_blacklisted(peer2)
assert not pubsub.is_peer_blacklisted(peer3)
# Remove one peer from blacklist
pubsub.remove_from_blacklist(peer1)
# Check state after removal
assert len(pubsub.get_blacklisted_peers()) == 1
assert not pubsub.is_peer_blacklisted(peer1)
assert pubsub.is_peer_blacklisted(peer2)
assert not pubsub.is_peer_blacklisted(peer3)
# Add peer3 and then clear all
pubsub.add_to_blacklist(peer3)
assert len(pubsub.get_blacklisted_peers()) == 2
pubsub.clear_blacklist()
assert len(pubsub.get_blacklisted_peers()) == 0
assert not pubsub.is_peer_blacklisted(peer1)
assert not pubsub.is_peer_blacklisted(peer2)
assert not pubsub.is_peer_blacklisted(peer3)
# Test duplicate additions (should not increase size)
pubsub.add_to_blacklist(peer1)
pubsub.add_to_blacklist(peer1)
assert len(pubsub.get_blacklisted_peers()) == 1
# Test removing non-blacklisted peer (should not cause errors)
pubsub.remove_from_blacklist(peer2)
assert len(pubsub.get_blacklisted_peers()) == 1
@pytest.mark.trio
async def test_blacklist_blocks_new_peer_connections(monkeypatch):
"""Test that blacklisted peers are rejected when trying to connect."""
async with PubsubFactory.create_batch_with_floodsub(1) as pubsubs_fsub:
pubsub = pubsubs_fsub[0]
# Create a blacklisted peer ID
blacklisted_peer = IDFactory()
# Add peer to blacklist
pubsub.add_to_blacklist(blacklisted_peer)
new_stream_called = False
async def mock_new_stream(*args, **kwargs):
nonlocal new_stream_called
new_stream_called = True
# Create a mock stream
from unittest.mock import (
AsyncMock,
Mock,
)
mock_stream = Mock()
mock_stream.write = AsyncMock()
mock_stream.reset = AsyncMock()
mock_stream.get_protocol = Mock(return_value="test_protocol")
return mock_stream
router_add_peer_called = False
def mock_add_peer(*args, **kwargs):
nonlocal router_add_peer_called
router_add_peer_called = True
with monkeypatch.context() as m:
m.setattr(pubsub.host, "new_stream", mock_new_stream)
m.setattr(pubsub.router, "add_peer", mock_add_peer)
# Attempt to handle the blacklisted peer
await pubsub._handle_new_peer(blacklisted_peer)
# Verify that both new_stream and router.add_peer was not called
assert (
not new_stream_called
), "new_stream should be not be called to get hello packet"
assert (
not router_add_peer_called
), "Router.add_peer should not be called for blacklisted peer"
assert (
blacklisted_peer not in pubsub.peers
), "Blacklisted peer should not be in peers dict"
@pytest.mark.trio
async def test_blacklist_blocks_messages_from_blacklisted_originator():
"""Test that messages from blacklisted originator (from field) are rejected."""
async with PubsubFactory.create_batch_with_floodsub(2) as pubsubs_fsub:
pubsub = pubsubs_fsub[0]
blacklisted_originator = pubsubs_fsub[1].my_id # Use existing peer ID
# Add the originator to blacklist
pubsub.add_to_blacklist(blacklisted_originator)
# Create a message with blacklisted originator
msg = make_pubsub_msg(
origin_id=blacklisted_originator,
topic_ids=[TESTING_TOPIC],
data=TESTING_DATA,
seqno=b"\x00" * 8,
)
# Subscribe to the topic
await pubsub.subscribe(TESTING_TOPIC)
# Track if router.publish is called
router_publish_called = False
async def mock_router_publish(*args, **kwargs):
nonlocal router_publish_called
router_publish_called = True
await trio.lowlevel.checkpoint()
original_router_publish = pubsub.router.publish
pubsub.router.publish = mock_router_publish
try:
# Attempt to push message from blacklisted originator
await pubsub.push_msg(blacklisted_originator, msg)
# Verify message was rejected
assert (
not router_publish_called
), "Router.publish should not be called for blacklisted originator"
assert not pubsub._is_msg_seen(
msg
), "Message from blacklisted originator should not be marked as seen"
finally:
pubsub.router.publish = original_router_publish
@pytest.mark.trio
async def test_blacklist_allows_non_blacklisted_peers():
"""Test that non-blacklisted peers can send messages normally."""
async with PubsubFactory.create_batch_with_floodsub(3) as pubsubs_fsub:
pubsub = pubsubs_fsub[0]
allowed_peer = pubsubs_fsub[1].my_id
blacklisted_peer = pubsubs_fsub[2].my_id
# Blacklist one peer but not the other
pubsub.add_to_blacklist(blacklisted_peer)
# Create messages from both peers
msg_from_allowed = make_pubsub_msg(
origin_id=allowed_peer,
topic_ids=[TESTING_TOPIC],
data=b"allowed_data",
seqno=b"\x00" * 8,
)
msg_from_blacklisted = make_pubsub_msg(
origin_id=blacklisted_peer,
topic_ids=[TESTING_TOPIC],
data=b"blacklisted_data",
seqno=b"\x11" * 8,
)
# Subscribe to the topic
sub = await pubsub.subscribe(TESTING_TOPIC)
# Track router.publish calls
router_publish_calls = []
async def mock_router_publish(*args, **kwargs):
router_publish_calls.append(args)
await trio.lowlevel.checkpoint()
original_router_publish = pubsub.router.publish
pubsub.router.publish = mock_router_publish
try:
# Send message from allowed peer (should succeed)
await pubsub.push_msg(allowed_peer, msg_from_allowed)
# Send message from blacklisted peer (should be rejected)
await pubsub.push_msg(allowed_peer, msg_from_blacklisted)
# Verify only allowed message was processed
assert (
len(router_publish_calls) == 1
), "Only one message should be processed"
assert pubsub._is_msg_seen(
msg_from_allowed
), "Allowed message should be marked as seen"
assert not pubsub._is_msg_seen(
msg_from_blacklisted
), "Blacklisted message should not be marked as seen"
# Verify subscription received the allowed message
received_msg = await sub.get()
assert received_msg.data == b"allowed_data"
finally:
pubsub.router.publish = original_router_publish
@pytest.mark.trio
async def test_blacklist_integration_with_existing_functionality():
"""Test that blacklisting works correctly with existing pubsub functionality."""
async with PubsubFactory.create_batch_with_floodsub(2) as pubsubs_fsub:
pubsub = pubsubs_fsub[0]
other_peer = pubsubs_fsub[1].my_id
# Test that seen messages cache still works with blacklisting
pubsub.add_to_blacklist(other_peer)
msg = make_pubsub_msg(
origin_id=other_peer,
topic_ids=[TESTING_TOPIC],
data=TESTING_DATA,
seqno=b"\x00" * 8,
)
# First attempt - should be rejected due to blacklist
await pubsub.push_msg(other_peer, msg)
assert not pubsub._is_msg_seen(msg)
# Remove from blacklist
pubsub.remove_from_blacklist(other_peer)
# Now the message should be processed
await pubsub.subscribe(TESTING_TOPIC)
await pubsub.push_msg(other_peer, msg)
assert pubsub._is_msg_seen(msg)
# If we try to send the same message again, it should be rejected
# due to seen cache (not blacklist)
router_publish_called = False
async def mock_router_publish(*args, **kwargs):
nonlocal router_publish_called
router_publish_called = True
await trio.lowlevel.checkpoint()
original_router_publish = pubsub.router.publish
pubsub.router.publish = mock_router_publish
try:
await pubsub.push_msg(other_peer, msg)
assert (
not router_publish_called
), "Duplicate message should be rejected by seen cache"
finally:
pubsub.router.publish = original_router_publish
@pytest.mark.trio
async def test_blacklist_blocks_messages_from_blacklisted_source():
"""Test that messages from blacklisted source (forwarder) are rejected."""
async with PubsubFactory.create_batch_with_floodsub(2) as pubsubs_fsub:
pubsub = pubsubs_fsub[0]
blacklisted_forwarder = pubsubs_fsub[1].my_id
# Add the forwarder to blacklist
pubsub.add_to_blacklist(blacklisted_forwarder)
# Create a message
msg = make_pubsub_msg(
origin_id=pubsubs_fsub[1].my_id,
topic_ids=[TESTING_TOPIC],
data=TESTING_DATA,
seqno=b"\x00" * 8,
)
# Subscribe to the topic so we can check if message is processed
await pubsub.subscribe(TESTING_TOPIC)
# Track if router.publish is called (it shouldn't be for blacklisted forwarder)
router_publish_called = False
async def mock_router_publish(*args, **kwargs):
nonlocal router_publish_called
router_publish_called = True
await trio.lowlevel.checkpoint()
original_router_publish = pubsub.router.publish
pubsub.router.publish = mock_router_publish
try:
# Attempt to push message from blacklisted forwarder
await pubsub.push_msg(blacklisted_forwarder, msg)
# Verify message was rejected
assert (
not router_publish_called
), "Router.publish should not be called for blacklisted forwarder"
assert not pubsub._is_msg_seen(
msg
), "Message from blacklisted forwarder should not be marked as seen"
finally:
pubsub.router.publish = original_router_publish
@pytest.mark.trio
async def test_blacklist_tears_down_existing_connection():
"""
Verify that if a peer is already in pubsub.peers and pubsub.peer_topics,
calling add_to_blacklist(peer_id) immediately resets its stream and
removes it from both places.
"""
# Create two pubsub instances (floodsub), so they can connect to each other
async with PubsubFactory.create_batch_with_floodsub(2) as pubsubs_fsub:
pubsub0, pubsub1 = pubsubs_fsub
# 1) Connect peer1 to peer0
await connect(pubsub0.host, pubsub1.host)
# Give handle_peer_queue some time to run
await trio.sleep(0.1)
# After connect, pubsub0.peers should contain pubsub1.my_id
assert pubsub1.my_id in pubsub0.peers
# 2) Manually record a subscription from peer1 under TESTING_TOPIC,
# so that peer1 shows up in pubsub0.peer_topics[TESTING_TOPIC].
sub_msg = rpc_pb2.RPC.SubOpts(subscribe=True, topicid=TESTING_TOPIC)
pubsub0.handle_subscription(pubsub1.my_id, sub_msg)
assert TESTING_TOPIC in pubsub0.peer_topics
assert pubsub1.my_id in pubsub0.peer_topics[TESTING_TOPIC]
# 3) Now blacklist peer1
pubsub0.add_to_blacklist(pubsub1.my_id)
# Allow the asynchronous teardown task (_teardown_if_connected) to run
await trio.sleep(0.1)
# 4a) pubsub0.peers should no longer contain peer1
assert pubsub1.my_id not in pubsub0.peers
# 4b) pubsub0.peer_topics[TESTING_TOPIC] should no longer contain peer1
# (or TESTING_TOPIC may have been removed entirely if no other peers remain)
if TESTING_TOPIC in pubsub0.peer_topics:
assert pubsub1.my_id not in pubsub0.peer_topics[TESTING_TOPIC]
else:
# Its also fine if the entire topic entry was pruned
assert TESTING_TOPIC not in pubsub0.peer_topics

View File

@ -0,0 +1,166 @@
# libp2p Interoperability Tests
This directory contains interoperability tests between py-libp2p and rust-libp2p implementations, focusing on the ping protocol to verify core compatibility.
## Overview
The tests verify the following libp2p components work correctly between implementations:
- **Transport Layer**: TCP connection establishment
- **Security Layer**: Noise encryption protocol
- **Stream Multiplexing**: Yamux multiplexer compatibility
- **Protocol Negotiation**: Multistream-select protocol selection
- **Application Protocol**: Ping protocol (`/ipfs/ping/1.0.0`)
## Test Structure
```
├── py_node/
│ └── ping.py # Python libp2p ping client/server
├── rust_node/
│ ├── src/main.rs # Rust libp2p ping client/server
│ └── Cargo.toml
└── scripts/
├── run_py_to_rust_test.ps1 # Test: Python client → Rust server
└── run_rust_to_py_test.ps1 # Test: Rust client → Python server
```
## Prerequisites
### Python Environment
```bash
# Install py-libp2p and dependencies
pip install .
```
### Rust Environment
```bash
# Ensure Rust is installed
rustc --version
cargo --version
# Dependencies are defined in rust_node/Cargo.toml
```
## Running Tests
### Test 1: Rust Client → Python Server
This test starts a Python server and connects with a Rust client:
```powershell
# Run the automated test
.\scripts\run_rust_to_py_test.ps1
# Or with custom parameters
.\scripts\run_rust_to_py_test.ps1 -Port 9000 -PingCount 10
```
**Manual steps:**
1. Start Python server: `python py_node/ping.py server --port 8000`
2. Note the Peer ID from server output
3. Run Rust client: `cargo run --manifest-path rust_node/Cargo.toml -- /ip4/127.0.0.1/tcp/8000/p2p/<PEER_ID>`
### Test 2: Python Client → Rust Server
This test starts a Rust server and connects with a Python client:
```powershell
# Run the automated test (requires manual intervention)
.\scripts\run_py_to_rust_test.ps1
# Follow the on-screen instructions to complete the test
```
**Manual steps:**
1. Start Rust server: `cargo run --manifest-path rust_node/Cargo.toml`
2. Note the Peer ID and port from server output
3. Run Python client: `python py_node/ping.py client /ip4/127.0.0.1/tcp/<PORT>/p2p/<PEER_ID> --count 5`
## Expected Behavior
### Successful Test Output
**Python Server Logs:**
```
[INFO] Starting py-libp2p ping server...
[INFO] Peer ID: QmYourPeerIdHere
[INFO] Listening: /ip4/0.0.0.0/tcp/8000
[INFO] New ping stream opened by 12D3KooW...
[PING 1] Received ping from 12D3KooW...: 32 bytes
[PING 1] Echoed ping back to 12D3KooW...
```
**Rust Client Logs:**
```
Local peer ID: 12D3KooW...
Listening on "/ip4/0.0.0.0/tcp/54321"
Dialed /ip4/127.0.0.1/tcp/8000/p2p/QmYourPeerIdHere
Behaviour(Event { peer: QmYourPeerIdHere, result: Ok(Pong) })
```
### Performance Metrics
The tests measure:
- **Connection Establishment Time**: Time to establish secure connection
- **Round-Trip Time (RTT)**: Latency for ping/pong exchanges
- **Success Rate**: Percentage of successful ping attempts
- **Protocol Negotiation**: Successful selection of `/ipfs/ping/1.0.0`
## Troubleshooting
### Common Issues
1. **Protocol Mismatch**: Ensure both implementations use the same protocol ID
- Python: `/ipfs/ping/1.0.0`
- Rust: `/ipfs/ping/1.0.0` (default ping protocol)
2. **Connection Timeout**:
- Check firewall settings
- Verify correct IP addresses and ports
- Ensure both peers are running
3. **Noise Encryption Errors**:
- Verify cryptography library versions
- Check that both implementations support the same Noise variants
4. **Yamux Multiplexing Issues**:
- Confirm Yamux protocol versions match
- Check stream handling implementation
### Debug Logging
Enable detailed logging:
**Python:**
```bash
# Logs are automatically written to ping_debug.log
tail -f ping_debug.log
```
**Rust:**
```bash
# Set environment variable for detailed logs
$env:RUST_LOG="debug"
cargo run --manifest-path rust_node/Cargo.toml
```
## Interoperability Checklist
- [ ] TCP transport connection establishment
- [ ] Noise encryption handshake
- [ ] Yamux stream multiplexing
- [ ] Multistream protocol negotiation
- [ ] Ping protocol payload exchange (32 bytes)
- [ ] Proper connection cleanup
- [ ] Error handling and timeouts
- [ ] Performance metrics collection
## Contributing
When adding new tests:
1. Follow the existing pattern for client/server implementations
2. Add appropriate error handling and logging
3. Update this README with new test procedures
4. Ensure tests clean up resources properly

View File

@ -0,0 +1,427 @@
import argparse
import logging
from cryptography.hazmat.primitives.asymmetric import (
x25519,
)
import multiaddr
import trio
from libp2p import (
generate_new_rsa_identity,
new_host,
)
from libp2p.custom_types import (
TProtocol,
)
from libp2p.network.stream.net_stream import (
INetStream,
)
from libp2p.peer.peerinfo import (
info_from_p2p_addr,
)
from libp2p.security.noise.transport import Transport as NoiseTransport
from libp2p.stream_muxer.yamux.yamux import (
Yamux,
)
from libp2p.stream_muxer.yamux.yamux import PROTOCOL_ID as YAMUX_PROTOCOL_ID
# Configure detailed logging
logging.basicConfig(
level=logging.DEBUG,
format="%(asctime)s - %(levelname)s - %(message)s",
handlers=[
logging.StreamHandler(),
logging.FileHandler("ping_debug.log", mode="w", encoding="utf-8"),
],
)
# Standard libp2p ping protocol - this is what rust-libp2p uses by default
PING_PROTOCOL_ID = TProtocol("/ipfs/ping/1.0.0")
PING_LENGTH = 32
RESP_TIMEOUT = 60
async def handle_ping(stream: INetStream) -> None:
"""Handle incoming ping requests from rust-libp2p clients"""
peer_id = stream.muxed_conn.peer_id
print(f"[INFO] New ping stream opened by {peer_id}")
logging.info(f"Ping handler called for peer {peer_id}")
ping_count = 0
try:
while True:
try:
print(f"[INFO] Waiting for ping data from {peer_id}...")
logging.debug(f"Stream state: {stream}")
data = await stream.read(PING_LENGTH)
if not data:
print(
f"[INFO] No data received,"
f" connection likely closed by {peer_id}"
)
logging.debug("No data received, stream closed")
break
if len(data) == 0:
print(f"[INFO] Empty data received, connection closed by {peer_id}")
logging.debug("Empty data received")
break
ping_count += 1
print(
f"[PING {ping_count}] Received ping from {peer_id}:"
f" {len(data)} bytes"
)
logging.debug(f"Ping data: {data.hex()}")
# Echo the data back (this is what ping protocol does)
await stream.write(data)
print(f"[PING {ping_count}] Echoed ping back to {peer_id}")
except Exception as e:
print(f"[ERROR] Error in ping loop with {peer_id}: {e}")
logging.exception("Ping loop error")
break
except Exception as e:
print(f"[ERROR] Error handling ping from {peer_id}: {e}")
logging.exception("Ping handler error")
finally:
try:
print(f"[INFO] Closing ping stream with {peer_id}")
await stream.close()
except Exception as e:
logging.debug(f"Error closing stream: {e}")
print(f"[INFO] Ping session completed with {peer_id} ({ping_count} pings)")
async def send_ping_sequence(stream: INetStream, count: int = 5) -> None:
"""Send a sequence of pings compatible with rust-libp2p."""
peer_id = stream.muxed_conn.peer_id
print(f"[INFO] Starting ping sequence to {peer_id} ({count} pings)")
import os
import time
rtts = []
for i in range(1, count + 1):
try:
# Generate random 32-byte payload as per ping protocol spec
payload = os.urandom(PING_LENGTH)
print(f"[PING {i}/{count}] Sending ping to {peer_id}")
logging.debug(f"Sending payload: {payload.hex()}")
start_time = time.time()
await stream.write(payload)
with trio.fail_after(RESP_TIMEOUT):
response = await stream.read(PING_LENGTH)
end_time = time.time()
rtt = (end_time - start_time) * 1000
if (
response
and len(response) >= PING_LENGTH
and response[:PING_LENGTH] == payload
):
rtts.append(rtt)
print(f"[PING {i}] Successful! RTT: {rtt:.2f}ms")
else:
print(f"[ERROR] Ping {i} failed: response mismatch or incomplete")
if response:
logging.debug(f"Expected: {payload.hex()}")
logging.debug(f"Received: {response.hex()}")
if i < count:
await trio.sleep(1)
except trio.TooSlowError:
print(f"[ERROR] Ping {i} timed out after {RESP_TIMEOUT}s")
except Exception as e:
print(f"[ERROR] Ping {i} failed: {e}")
logging.exception(f"Ping {i} error")
# Print statistics
if rtts:
avg_rtt = sum(rtts) / len(rtts)
min_rtt = min(rtts)
max_rtt = max(rtts) # Fixed typo: was max_rtts
success_count = len(rtts)
loss_rate = ((count - success_count) / count) * 100
print(f"\n[STATS] Ping Statistics:")
print(
f" Packets: Sent={count}, Received={success_count},"
f" Lost={count - success_count}"
)
print(f" Loss rate: {loss_rate:.1f}%")
print(
f" RTT: min={min_rtt:.2f}ms, avg={avg_rtt:.2f}ms,"
f" max={max_rtt:.2f}ms"
)
else:
print(f"\n[STATS] All pings failed ({count} attempts)")
def create_noise_keypair():
"""Create a Noise protocol keypair for secure communication"""
try:
x25519_private_key = x25519.X25519PrivateKey.generate()
class NoisePrivateKey:
def __init__(self, key):
self._key = key
def to_bytes(self):
return self._key.private_bytes_raw()
def public_key(self):
return NoisePublicKey(self._key.public_key())
def get_public_key(self):
return NoisePublicKey(self._key.public_key())
class NoisePublicKey:
def __init__(self, key):
self._key = key
def to_bytes(self):
return self._key.public_bytes_raw()
return NoisePrivateKey(x25519_private_key)
except Exception as e:
logging.error(f"Failed to create Noise keypair: {e}")
return None
async def run_server(port: int) -> None:
"""Run ping server that accepts connections from rust-libp2p clients."""
listen_addr = multiaddr.Multiaddr(f"/ip4/0.0.0.0/tcp/{port}")
key_pair = generate_new_rsa_identity()
logging.debug("Generated RSA keypair")
noise_privkey = create_noise_keypair()
if not noise_privkey:
print("[ERROR] Failed to create Noise keypair")
return
logging.debug("Generated Noise keypair")
noise_transport = NoiseTransport(key_pair, noise_privkey=noise_privkey)
logging.debug(f"Noise transport initialized: {noise_transport}")
sec_opt = {TProtocol("/noise"): noise_transport}
muxer_opt = {TProtocol(YAMUX_PROTOCOL_ID): Yamux}
logging.info(f"Using muxer: {muxer_opt}")
host = new_host(key_pair=key_pair, sec_opt=sec_opt, muxer_opt=muxer_opt)
print("[INFO] Starting py-libp2p ping server...")
async with host.run(listen_addrs=[listen_addr]):
print(f"[INFO] Registering ping handler for protocol: {PING_PROTOCOL_ID}")
host.set_stream_handler(PING_PROTOCOL_ID, handle_ping)
# Also register alternative protocol IDs for better compatibility
alt_protocols = [
TProtocol("/ping/1.0.0"),
TProtocol("/libp2p/ping/1.0.0"),
]
for alt_proto in alt_protocols:
print(f"[INFO] Also registering handler for: {alt_proto}")
host.set_stream_handler(alt_proto, handle_ping)
print("[INFO] Server started successfully!")
print(f"[INFO] Peer ID: {host.get_id()}")
print(f"[INFO] Listening: /ip4/0.0.0.0/tcp/{port}")
print(f"[INFO] Primary Protocol: {PING_PROTOCOL_ID}")
print(f"[INFO] Security: Noise encryption")
print(f"[INFO] Muxer: Yamux stream multiplexing")
print("\n[INFO] Registered protocols:")
print(f" - {PING_PROTOCOL_ID}")
for proto in alt_protocols:
print(f" - {proto}")
peer_id = host.get_id()
print("\n[TEST] Test with rust-libp2p:")
print(f" cargo run -- /ip4/127.0.0.1/tcp/{port}/p2p/{peer_id}")
print("\n[TEST] Test with py-libp2p:")
print(f" python ping.py client /ip4/127.0.0.1/tcp/{port}/p2p/{peer_id}")
print("\n[INFO] Waiting for connections...")
print("Press Ctrl+C to exit")
await trio.sleep_forever()
async def run_client(destination: str, count: int = 5) -> None:
"""Run ping client to test connectivity with another peer."""
listen_addr = multiaddr.Multiaddr("/ip4/0.0.0.0/tcp/0")
key_pair = generate_new_rsa_identity()
logging.debug("Generated RSA keypair")
noise_privkey = create_noise_keypair()
if not noise_privkey:
print("[ERROR] Failed to create Noise keypair")
return 1
logging.debug("Generated Noise keypair")
noise_transport = NoiseTransport(key_pair, noise_privkey=noise_privkey)
logging.debug(f"Noise transport initialized: {noise_transport}")
sec_opt = {TProtocol("/noise"): noise_transport}
muxer_opt = {TProtocol(YAMUX_PROTOCOL_ID): Yamux}
logging.info(f"Using muxer: {muxer_opt}")
host = new_host(key_pair=key_pair, sec_opt=sec_opt, muxer_opt=muxer_opt)
print("[INFO] Starting py-libp2p ping client...")
async with host.run(listen_addrs=[listen_addr]):
print(f"[INFO] Our Peer ID: {host.get_id()}")
print(f"[INFO] Target: {destination}")
print("[INFO] Security: Noise encryption")
print("[INFO] Muxer: Yamux stream multiplexing")
try:
maddr = multiaddr.Multiaddr(destination)
info = info_from_p2p_addr(maddr)
target_peer_id = info.peer_id
print(f"[INFO] Target Peer ID: {target_peer_id}")
print("[INFO] Connecting to peer...")
await host.connect(info)
print("[INFO] Connection established!")
# Try protocols in order of preference
# Start with the standard libp2p ping protocol
protocols_to_try = [
PING_PROTOCOL_ID, # /ipfs/ping/1.0.0 - standard protocol
TProtocol("/ping/1.0.0"), # Alternative
TProtocol("/libp2p/ping/1.0.0"), # Another alternative
]
stream = None
for proto in protocols_to_try:
try:
print(f"[INFO] Trying to open stream with protocol: {proto}")
stream = await host.new_stream(target_peer_id, [proto])
print(f"[INFO] Stream opened with protocol: {proto}")
break
except Exception as e:
print(f"[ERROR] Failed to open stream with {proto}: {e}")
logging.debug(f"Protocol {proto} failed: {e}")
continue
if not stream:
print("[ERROR] Failed to open stream with any ping protocol")
print("[ERROR] Ensure the target peer supports one of these protocols:")
for proto in protocols_to_try:
print(f"[ERROR] - {proto}")
return 1
await send_ping_sequence(stream, count)
await stream.close()
print("[INFO] Stream closed successfully")
except Exception as e:
print(f"[ERROR] Client error: {e}")
logging.exception("Client error")
import traceback
traceback.print_exc()
return 1
print("\n[INFO] Client stopped")
return 0
def main() -> None:
"""Main function with argument parsing."""
description = """
py-libp2p ping tool for interoperability testing with rust-libp2p.
Uses Noise encryption and Yamux multiplexing for compatibility.
Server mode: Listens for ping requests from rust-libp2p or py-libp2p clients.
Client mode: Sends ping requests to rust-libp2p or py-libp2p servers.
The tool implements the standard libp2p ping protocol (/ipfs/ping/1.0.0)
which exchanges 32-byte random payloads and measures round-trip time.
"""
example_maddr = (
"/ip4/127.0.0.1/tcp/8000/p2p/QmQn4SwGkDZKkUEpBRBvTmheQycxAHJUNmVEnjA2v1qe8Q"
)
parser = argparse.ArgumentParser(
description=description,
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=f"""
Examples:
python ping.py server # Start server on port 8000
python ping.py server --port 9000 # Start server on port 9000
python ping.py client {example_maddr}
python ping.py client {example_maddr} --count 10
Protocols supported:
- /ipfs/ping/1.0.0 (primary, rust-libp2p default)
- /ping/1.0.0 (alternative)
- /libp2p/ping/1.0.0 (alternative)
""",
)
subparsers = parser.add_subparsers(dest="mode", help="Operation mode")
server_parser = subparsers.add_parser("server", help="Run as ping server")
server_parser.add_argument(
"--port", "-p", type=int, default=8000, help="Port to listen on (default: 8000)"
)
client_parser = subparsers.add_parser("client", help="Run as ping client")
client_parser.add_argument("destination", help="Target peer multiaddr")
client_parser.add_argument(
"--count",
"-c",
type=int,
default=5,
help="Number of pings to send (default: 5)",
)
args = parser.parse_args()
if not args.mode:
parser.print_help()
return 1
try:
if args.mode == "server":
trio.run(run_server, args.port)
elif args.mode == "client":
return trio.run(run_client, args.destination, args.count)
except KeyboardInterrupt:
print("\n[INFO] Goodbye!")
return 0
except Exception as e:
print(f"[ERROR] Fatal error: {e}")
logging.exception("Fatal error")
import traceback
traceback.print_exc()
return 1
return 0
if __name__ == "__main__":
exit(main())

View File

@ -0,0 +1,18 @@
[package]
name = "ping-example"
version = "0.1.0"
edition.workspace = true
publish = false
license = "MIT"
[package.metadata.release]
release = false
[dependencies]
futures = { workspace = true }
libp2p = { path = "../../libp2p", features = ["noise", "ping", "tcp", "tokio", "yamux", "rsa"] }
tokio = { workspace = true, features = ["full"] }
tracing-subscriber = { workspace = true, features = ["env-filter"] }
[lints]
workspace = true

View File

@ -0,0 +1,30 @@
## Description
The ping example showcases how to create a network of nodes that establish connections, negotiate the ping protocol, and ping each other.
## Usage
To run the example, follow these steps:
1. In a first terminal window, run the following command:
```sh
cargo run
```
This command starts a node and prints the `PeerId` and the listening addresses, such as `Listening on "/ip4/0.0.0.0/tcp/24915"`.
2. In a second terminal window, start a new instance of the example with the following command:
```sh
cargo run -- /ip4/127.0.0.1/tcp/24915
```
Replace `/ip4/127.0.0.1/tcp/24915` with the listen address of the first node obtained from the first terminal window.
3. The two nodes will establish a connection, negotiate the ping protocol, and begin pinging each other.
## Conclusion
The ping example demonstrates the basic usage of **libp2p** to create a simple p2p network and implement a ping protocol.
By running multiple nodes and observing the ping behavior, users can gain insights into how **libp2p** facilitates communication and interaction between peers.

View File

@ -0,0 +1,68 @@
// Copyright 2018 Parity Technologies (UK) Ltd.
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
#![doc = include_str!("../README.md")]
use std::{error::Error, time::Duration};
use futures::prelude::*;
use libp2p::{noise, ping, swarm::SwarmEvent, tcp, yamux, Multiaddr};
use tracing_subscriber::EnvFilter;
#[tokio::main]
async fn main() -> Result<(), Box<dyn Error>> {
let _ = tracing_subscriber::fmt()
.with_env_filter(EnvFilter::from_default_env())
.try_init();
let mut swarm = libp2p::SwarmBuilder::with_new_identity()
.with_tokio()
.with_tcp(
tcp::Config::default(),
noise::Config::new,
yamux::Config::default,
)?
.with_behaviour(|_| ping::Behaviour::default())?
.with_swarm_config(|cfg| cfg.with_idle_connection_timeout(Duration::from_secs(u64::MAX)))
.build();
// Print the peer ID
println!("Local peer ID: {}", swarm.local_peer_id());
// Tell the swarm to listen on all interfaces and a random, OS-assigned
// port.
swarm.listen_on("/ip4/0.0.0.0/tcp/0".parse()?)?;
// Dial the peer identified by the multi-address given as the second
// command-line argument, if any.
if let Some(addr) = std::env::args().nth(1) {
let remote: Multiaddr = addr.parse()?;
swarm.dial(remote)?;
println!("Dialed {addr}")
}
loop {
match swarm.select_next_some().await {
SwarmEvent::NewListenAddr { address, .. } => println!("Listening on {address:?}"),
SwarmEvent::Behaviour(event) => println!("{event:?}"),
_ => {}
}
}
}

View File

@ -0,0 +1,44 @@
# scripts/run_py_to_rust_test.ps1
# Test script for py-libp2p client connecting to rust-libp2p server
param(
[int]$PingCount = 5,
[int]$TimeoutSeconds = 30
)
Write-Host "=== py-libp2p to rust-libp2p Interop Test ===" -ForegroundColor Cyan
Write-Host "Starting rust-libp2p server..." -ForegroundColor Yellow
# Start rust server in background
$rustProcess = Start-Process -FilePath "cargo" -ArgumentList "run" -WorkingDirectory "rust_node" -PassThru -WindowStyle Hidden
# Wait a moment for server to start
Start-Sleep -Seconds 3
try {
# Get the rust server's listening address from its output
# For now, we'll assume it's listening on a predictable port
# In a real scenario, you'd parse the server output to get the actual address
Write-Host "Waiting for rust server to start..." -ForegroundColor Yellow
Start-Sleep -Seconds 5
# Try to find the server's peer ID and port from netstat or process output
# For this test, we'll need to manually check the rust server output
Write-Host "Please check the rust server output for its Peer ID and port" -ForegroundColor Red
Write-Host "Then run the Python client manually with:" -ForegroundColor Yellow
Write-Host "python py_node/ping.py client /ip4/127.0.0.1/tcp/<PORT>/p2p/<PEER_ID> --count $PingCount" -ForegroundColor Green
# Keep the server running
Write-Host "Press any key to stop the test..." -ForegroundColor Cyan
$null = $Host.UI.RawUI.ReadKey("NoEcho,IncludeKeyDown")
} finally {
# Clean up
Write-Host "Stopping rust server..." -ForegroundColor Yellow
if ($rustProcess -and !$rustProcess.HasExited) {
$rustProcess.Kill()
$rustProcess.WaitForExit(5000)
}
Write-Host "Test completed." -ForegroundColor Green
}

View File

@ -0,0 +1,78 @@
# scripts/run_rust_to_py_test.ps1
# Test script for rust-libp2p client connecting to py-libp2p server
param(
[int]$Port = 8000,
[int]$PingCount = 5,
[int]$TimeoutSeconds = 30
)
Write-Host "=== rust-libp2p to py-libp2p Interop Test ===" -ForegroundColor Cyan
Write-Host "Starting py-libp2p server on port $Port..." -ForegroundColor Yellow
# Start Python server in background
$pyProcess = Start-Process -FilePath "python" -ArgumentList "py_node/ping.py", "server", "--port", $Port -PassThru -RedirectStandardOutput "py_server_output.txt" -RedirectStandardError "py_server_error.txt"
# Wait for server to start
Start-Sleep -Seconds 5
try {
# Read the server output to get peer ID
$maxWaitTime = 10
$waited = 0
$peerID = $null
while ($waited -lt $maxWaitTime -and !$peerID) {
if (Test-Path "py_server_output.txt") {
$output = Get-Content "py_server_output.txt" -Raw
if ($output -match "Peer ID: ([\w\d]+)") {
$peerID = $matches[1]
break
}
}
Start-Sleep -Seconds 1
$waited++
}
if (!$peerID) {
Write-Host "Could not extract Peer ID from Python server output" -ForegroundColor Red
Write-Host "Server output:" -ForegroundColor Yellow
if (Test-Path "py_server_output.txt") {
Get-Content "py_server_output.txt"
}
if (Test-Path "py_server_error.txt") {
Write-Host "Server errors:" -ForegroundColor Red
Get-Content "py_server_error.txt"
}
return
}
$multiaddr = "/ip4/127.0.0.1/tcp/$Port/p2p/$peerID"
Write-Host "Python server started with Peer ID: $peerID" -ForegroundColor Green
Write-Host "Full address: $multiaddr" -ForegroundColor Green
Write-Host "Starting rust client..." -ForegroundColor Yellow
# Run rust client
$rustResult = Start-Process -FilePath "cargo" -ArgumentList "run", "--", $multiaddr -WorkingDirectory "rust_node" -Wait -PassThru -NoNewWindow
if ($rustResult.ExitCode -eq 0) {
Write-Host "Rust client completed successfully!" -ForegroundColor Green
} else {
Write-Host "Rust client failed with exit code: $($rustResult.ExitCode)" -ForegroundColor Red
}
} finally {
# Clean up
Write-Host "Stopping Python server..." -ForegroundColor Yellow
if ($pyProcess -and !$pyProcess.HasExited) {
$pyProcess.Kill()
$pyProcess.WaitForExit(5000)
}
# Clean up output files
if (Test-Path "py_server_output.txt") { Remove-Item "py_server_output.txt" }
if (Test-Path "py_server_error.txt") { Remove-Item "py_server_error.txt" }
Write-Host "Test completed." -ForegroundColor Green
}

View File

@ -32,18 +32,25 @@ class BaseInteractiveProcess(AbstractInterativeProcess):
async def wait_until_ready(self) -> None:
patterns_occurred = {pat: False for pat in self.patterns}
buffers = {pat: bytearray() for pat in self.patterns}
async def read_from_daemon_and_check() -> None:
async for data in self.proc.stdout:
# TODO: It takes O(n^2), which is quite bad.
# But it should succeed in a few seconds.
self.bytes_read.extend(data)
for pat, occurred in patterns_occurred.items():
if occurred:
continue
if pat in self.bytes_read:
# Check if pattern is in new data or spans across chunks
buf = buffers[pat]
buf.extend(data)
if pat in buf:
patterns_occurred[pat] = True
if all([value for value in patterns_occurred.values()]):
else:
keep = min(len(pat) - 1, len(buf))
buffers[pat] = buf[-keep:] if keep > 0 else bytearray()
if all(patterns_occurred.values()):
return
with trio.fail_after(TIMEOUT_DURATION):