ft. modernise py-libp2p (#618)

* fix pyproject.toml , add ruff

* rm lock

* make progress

* add poetry lock ignore

* fix type issues

* fix tcp type errors

* fix text example - type error - wrong args

* add setuptools to dev

* test ci

* fix docs build

* fix type issues for new_swarm & new_host

* fix types in gossipsub

* fix type issues in noise

* wip: factories

* revert factories

* fix more type issues

* more type fixes

* fix: add null checks for noise protocol initialization and key handling

* corrected argument-errors in peerId and Multiaddr in peer tests

* fix: Noice - remove redundant type casts in BaseNoiseMsgReadWriter

* fix: update test_notify.py to use SwarmFactory.create_batch_and_listen, fix type hints, and comment out ClosedStream assertions

* Fix type checks for pubsub module

Signed-off-by: sukhman <sukhmansinghsaluja@gmail.com>

* Fix type checks for pubsub module-tests

Signed-off-by: sukhman <sukhmansinghsaluja@gmail.com>

* noise: add checks for uninitialized protocol and key states in PatternXX

Signed-off-by: varun-r-mallya <varunrmallya@gmail.com>

* pubsub: add None checks for optional fields in FloodSub and Pubsub

Signed-off-by: varun-r-mallya <varunrmallya@gmail.com>

* Fix type hints and improve testing

Signed-off-by: varun-r-mallya <varunrmallya@gmail.com>

* remove redundant checks

Signed-off-by: varun-r-mallya <varunrmallya@gmail.com>

* fix build issues

* add optional to trio service

* fix types

* fix type errors

* Fix type errors

Signed-off-by: varun-r-mallya <varunrmallya@gmail.com>

* fixed more-type checks in crypto and peer_data files

* wip: factories

* replaced union with optional

* fix: type-error in interp-utils and peerinfo

* replace pyright with pyrefly

* add pyrefly.toml

* wip: fix multiselect issues

* try typecheck

* base check

* mcache test fixes , typecheck ci update

* fix ci

* will this work

* minor fix

* use poetry

* fix wokflow

* use cache,fix err

* fix pyrefly.toml

* fix pyrefly.toml

* fix cache in ci

* deploy commit

* add main baseline

* update to v5

* improve typecheck ci (#14)

* fix typo

* remove holepunching code (#16)

* fix gossipsub typeerrors (#17)

* fix: ensure initiator user includes remote peer id in handshake (#15)

* fix ci (#19)

* typefix: custom_types | core/peerinfo/test_peer_info | io/abc | pubsub/floodsub | protocol_muxer/multiselect (#18)

* fix: Typefixes in PeerInfo  (#21)

* fix minor type issue (#22)

* fix type errors in pubsub (#24)

* fix: Minor typefixes in tests (#23)

* Fix failing tests for type-fixed test/pubsub (#8)

* move pyrefly & ruff to pyproject.toml & rm .project-template (#28)

* move the async_context file to tests/core

* move crypto test to crypto folder

* fix: some typefixes (#25)

* fix type errors

* fix type issues

* fix: update gRPC API usage in autonat_pb2_grpc.py (#31)

* md: typecheck ci

* rm comments

* clean up : from review suggestions

* use | None over Optional as per new python standards

* drop supporto for py3.9

* newsfragments

---------

Signed-off-by: sukhman <sukhmansinghsaluja@gmail.com>
Signed-off-by: varun-r-mallya <varunrmallya@gmail.com>
Co-authored-by: acul71 <luca.pisani@birdo.net>
Co-authored-by: kaneki003 <sakshamchauhan707@gmail.com>
Co-authored-by: sukhman <sukhmansinghsaluja@gmail.com>
Co-authored-by: varun-r-mallya <varunrmallya@gmail.com>
Co-authored-by: varunrmallya <100590632+varun-r-mallya@users.noreply.github.com>
Co-authored-by: lla-dane <abhinavagarwalla6@gmail.com>
Co-authored-by: Collins <ArtemisfowlX@protonmail.com>
Co-authored-by: Abhinav Agarwalla <120122716+lla-dane@users.noreply.github.com>
Co-authored-by: guha-rahul <52607971+guha-rahul@users.noreply.github.com>
Co-authored-by: Sukhman Singh <63765293+sukhman-sukh@users.noreply.github.com>
Co-authored-by: acul71 <34693171+acul71@users.noreply.github.com>
Co-authored-by: pacrob <5199899+pacrob@users.noreply.github.com>
This commit is contained in:
Arush Kurundodi
2025-06-09 23:09:59 +05:30
committed by GitHub
parent d020bbc066
commit bdadec7519
111 changed files with 1537 additions and 1401 deletions

View File

@ -0,0 +1,102 @@
import pytest
from libp2p.crypto.keys import (
KeyType,
)
from libp2p.crypto.x25519 import (
X25519PrivateKey,
X25519PublicKey,
create_new_key_pair,
)
def test_x25519_public_key_creation():
# Create a new X25519 key pair
key_pair = create_new_key_pair()
public_key = key_pair.public_key
# Test that it's an instance of X25519PublicKey
assert isinstance(public_key, X25519PublicKey)
# Test key type
assert public_key.get_type() == KeyType.X25519
# Test to_bytes and from_bytes roundtrip
key_bytes = public_key.to_bytes()
reconstructed_key = X25519PublicKey.from_bytes(key_bytes)
assert isinstance(reconstructed_key, X25519PublicKey)
assert reconstructed_key.to_bytes() == key_bytes
def test_x25519_private_key_creation():
# Create a new private key
private_key = X25519PrivateKey.new()
# Test that it's an instance of X25519PrivateKey
assert isinstance(private_key, X25519PrivateKey)
# Test key type
assert private_key.get_type() == KeyType.X25519
# Test to_bytes and from_bytes roundtrip
key_bytes = private_key.to_bytes()
reconstructed_key = X25519PrivateKey.from_bytes(key_bytes)
assert isinstance(reconstructed_key, X25519PrivateKey)
assert reconstructed_key.to_bytes() == key_bytes
def test_x25519_key_pair_creation():
# Create a new key pair
key_pair = create_new_key_pair()
# Test that both private and public keys are of correct types
assert isinstance(key_pair.private_key, X25519PrivateKey)
assert isinstance(key_pair.public_key, X25519PublicKey)
# Test that public key matches private key
assert (
key_pair.private_key.get_public_key().to_bytes()
== key_pair.public_key.to_bytes()
)
def test_x25519_unsupported_operations():
# Test that signature operations are not supported
key_pair = create_new_key_pair()
# Test that public key verify raises NotImplementedError
with pytest.raises(NotImplementedError, match="X25519 does not support signatures"):
key_pair.public_key.verify(b"data", b"signature")
# Test that private key sign raises NotImplementedError
with pytest.raises(NotImplementedError, match="X25519 does not support signatures"):
key_pair.private_key.sign(b"data")
def test_x25519_invalid_key_bytes():
# Test that invalid key bytes raise appropriate exceptions
with pytest.raises(ValueError, match="An X25519 public key is 32 bytes long"):
X25519PublicKey.from_bytes(b"invalid_key_bytes")
with pytest.raises(ValueError, match="An X25519 private key is 32 bytes long"):
X25519PrivateKey.from_bytes(b"invalid_key_bytes")
def test_x25519_key_serialization():
# Test key serialization and deserialization
key_pair = create_new_key_pair()
# Serialize both keys
private_bytes = key_pair.private_key.to_bytes()
public_bytes = key_pair.public_key.to_bytes()
# Deserialize and verify
reconstructed_private = X25519PrivateKey.from_bytes(private_bytes)
reconstructed_public = X25519PublicKey.from_bytes(public_bytes)
# Verify the reconstructed keys match the original
assert reconstructed_private.to_bytes() == private_bytes
assert reconstructed_public.to_bytes() == public_bytes
# Verify the public key derived from reconstructed private key matches
assert reconstructed_private.get_public_key().to_bytes() == public_bytes

View File

@ -209,6 +209,18 @@ async def ping_demo(host_a, host_b):
async def pubsub_demo(host_a, host_b):
gossipsub_a = GossipSub(
[GOSSIPSUB_PROTOCOL_ID],
3,
2,
4,
)
gossipsub_b = GossipSub(
[GOSSIPSUB_PROTOCOL_ID],
3,
2,
4,
)
gossipsub_a = GossipSub([GOSSIPSUB_PROTOCOL_ID], 3, 2, 4, None, 1, 1)
gossipsub_b = GossipSub([GOSSIPSUB_PROTOCOL_ID], 3, 2, 4, None, 1, 1)
pubsub_a = Pubsub(host_a, gossipsub_a)

View File

@ -76,18 +76,18 @@ async def test_update_status():
# Less than 2 successful dials should result in PRIVATE status
service.dial_results = {
ID("peer1"): True,
ID("peer2"): False,
ID("peer3"): False,
ID(b"peer1"): True,
ID(b"peer2"): False,
ID(b"peer3"): False,
}
service.update_status()
assert service.status == AutoNATStatus.PRIVATE
# 2 or more successful dials should result in PUBLIC status
service.dial_results = {
ID("peer1"): True,
ID("peer2"): True,
ID("peer3"): False,
ID(b"peer1"): True,
ID(b"peer2"): True,
ID(b"peer3"): False,
}
service.update_status()
assert service.status == AutoNATStatus.PUBLIC

View File

@ -22,9 +22,10 @@ async def test_host_routing_success():
@pytest.mark.trio
async def test_host_routing_fail():
async with RoutedHostFactory.create_batch_and_listen(
2
) as routed_hosts, HostFactory.create_batch_and_listen(1) as basic_hosts:
async with (
RoutedHostFactory.create_batch_and_listen(2) as routed_hosts,
HostFactory.create_batch_and_listen(1) as basic_hosts,
):
# routing fails because host_c does not use routing
with pytest.raises(ConnectionFailure):
await routed_hosts[0].connect(PeerInfo(basic_hosts[0].get_id(), []))

View File

@ -218,7 +218,6 @@ async def test_push_identify_to_peers_with_explicit_params(security_protocol):
This test ensures all parameters of push_identify_to_peers are properly tested.
"""
# Create four hosts to thoroughly test selective pushing
async with host_pair_factory(security_protocol=security_protocol) as (
host_a,

View File

@ -8,23 +8,20 @@ into network after network has already started listening
TODO: Add tests for closed_stream, listen_close when those
features are implemented in swarm
"""
import enum
import pytest
from multiaddr import Multiaddr
import trio
from libp2p.abc import (
INetConn,
INetStream,
INetwork,
INotifee,
)
from libp2p.tools.async_service import (
background_trio_service,
)
from libp2p.tools.constants import (
LISTEN_MADDR,
)
from libp2p.tools.utils import (
connect_swarm,
)
from libp2p.tools.utils import connect_swarm
from tests.utils.factories import (
SwarmFactory,
)
@ -40,169 +37,94 @@ class Event(enum.Enum):
class MyNotifee(INotifee):
def __init__(self, events):
def __init__(self, events: list[Event]):
self.events = events
async def opened_stream(self, network, stream):
async def opened_stream(self, network: INetwork, stream: INetStream) -> None:
self.events.append(Event.OpenedStream)
async def closed_stream(self, network, stream):
async def closed_stream(self, network: INetwork, stream: INetStream) -> None:
# TODO: It is not implemented yet.
pass
async def connected(self, network, conn):
async def connected(self, network: INetwork, conn: INetConn) -> None:
self.events.append(Event.Connected)
async def disconnected(self, network, conn):
async def disconnected(self, network: INetwork, conn: INetConn) -> None:
self.events.append(Event.Disconnected)
async def listen(self, network, _multiaddr):
async def listen(self, network: INetwork, multiaddr: Multiaddr) -> None:
self.events.append(Event.Listen)
async def listen_close(self, network, _multiaddr):
async def listen_close(self, network: INetwork, multiaddr: Multiaddr) -> None:
# TODO: It is not implemented yet.
pass
@pytest.mark.trio
async def test_notify(security_protocol):
swarms = [SwarmFactory(security_protocol=security_protocol) for _ in range(2)]
events_0_0 = []
events_1_0 = []
events_0_without_listen = []
# Helper to wait for specific event
async def wait_for_event(events_list, expected_event, timeout=1.0):
start_time = trio.current_time()
while trio.current_time() - start_time < timeout:
if expected_event in events_list:
return True
await trio.sleep(0.01)
async def wait_for_event(events_list, event, timeout=1.0):
with trio.move_on_after(timeout):
while event not in events_list:
await trio.sleep(0.01)
return True
return False
# Run swarms.
async with background_trio_service(swarms[0]), background_trio_service(swarms[1]):
# Register events before listening
swarms[0].register_notifee(MyNotifee(events_0_0))
swarms[1].register_notifee(MyNotifee(events_1_0))
# Event lists for notifees
events_0_0 = []
events_0_1 = []
events_1_0 = []
events_1_1 = []
# Listen
async with trio.open_nursery() as nursery:
nursery.start_soon(swarms[0].listen, LISTEN_MADDR)
nursery.start_soon(swarms[1].listen, LISTEN_MADDR)
# Create two swarms, but do not listen yet
async with SwarmFactory.create_batch_and_listen(2) as swarms:
# Register notifees before listening
notifee_0_0 = MyNotifee(events_0_0)
notifee_0_1 = MyNotifee(events_0_1)
notifee_1_0 = MyNotifee(events_1_0)
notifee_1_1 = MyNotifee(events_1_1)
# Wait for Listen events
assert await wait_for_event(events_0_0, Event.Listen)
assert await wait_for_event(events_1_0, Event.Listen)
swarms[0].register_notifee(notifee_0_0)
swarms[0].register_notifee(notifee_0_1)
swarms[1].register_notifee(notifee_1_0)
swarms[1].register_notifee(notifee_1_1)
swarms[0].register_notifee(MyNotifee(events_0_without_listen))
# Connected
# Connect swarms
await connect_swarm(swarms[0], swarms[1])
assert await wait_for_event(events_0_0, Event.Connected)
assert await wait_for_event(events_1_0, Event.Connected)
assert await wait_for_event(events_0_without_listen, Event.Connected)
# OpenedStream: first
await swarms[0].new_stream(swarms[1].get_peer_id())
# OpenedStream: second
await swarms[0].new_stream(swarms[1].get_peer_id())
# OpenedStream: third, but different direction.
await swarms[1].new_stream(swarms[0].get_peer_id())
# Create a stream
stream = await swarms[0].new_stream(swarms[1].get_peer_id())
await stream.close()
# Clear any duplicate events that might have occurred
events_0_0.copy()
events_1_0.copy()
events_0_without_listen.copy()
# TODO: Check `ClosedStream` and `ListenClose` events after they are ready.
# Disconnected
# Close peer
await swarms[0].close_peer(swarms[1].get_peer_id())
assert await wait_for_event(events_0_0, Event.Disconnected)
assert await wait_for_event(events_1_0, Event.Disconnected)
assert await wait_for_event(events_0_without_listen, Event.Disconnected)
# Connected again, but different direction.
await connect_swarm(swarms[1], swarms[0])
# Wait for events
assert await wait_for_event(events_0_0, Event.Connected, 1.0)
assert await wait_for_event(events_0_0, Event.OpenedStream, 1.0)
# assert await wait_for_event(
# events_0_0, Event.ClosedStream, 1.0
# ) # Not implemented
assert await wait_for_event(events_0_0, Event.Disconnected, 1.0)
# Get the index of the first disconnected event
disconnect_idx_0_0 = events_0_0.index(Event.Disconnected)
disconnect_idx_1_0 = events_1_0.index(Event.Disconnected)
disconnect_idx_without_listen = events_0_without_listen.index(
Event.Disconnected
)
assert await wait_for_event(events_0_1, Event.Connected, 1.0)
assert await wait_for_event(events_0_1, Event.OpenedStream, 1.0)
# assert await wait_for_event(
# events_0_1, Event.ClosedStream, 1.0
# ) # Not implemented
assert await wait_for_event(events_0_1, Event.Disconnected, 1.0)
# Check for connected event after disconnect
assert await wait_for_event(
events_0_0[disconnect_idx_0_0 + 1 :], Event.Connected
)
assert await wait_for_event(
events_1_0[disconnect_idx_1_0 + 1 :], Event.Connected
)
assert await wait_for_event(
events_0_without_listen[disconnect_idx_without_listen + 1 :],
Event.Connected,
)
assert await wait_for_event(events_1_0, Event.Connected, 1.0)
assert await wait_for_event(events_1_0, Event.OpenedStream, 1.0)
# assert await wait_for_event(
# events_1_0, Event.ClosedStream, 1.0
# ) # Not implemented
assert await wait_for_event(events_1_0, Event.Disconnected, 1.0)
# Disconnected again, but different direction.
await swarms[1].close_peer(swarms[0].get_peer_id())
# Find index of the second connected event
second_connect_idx_0_0 = events_0_0.index(
Event.Connected, disconnect_idx_0_0 + 1
)
second_connect_idx_1_0 = events_1_0.index(
Event.Connected, disconnect_idx_1_0 + 1
)
second_connect_idx_without_listen = events_0_without_listen.index(
Event.Connected, disconnect_idx_without_listen + 1
)
# Check for second disconnected event
assert await wait_for_event(
events_0_0[second_connect_idx_0_0 + 1 :], Event.Disconnected
)
assert await wait_for_event(
events_1_0[second_connect_idx_1_0 + 1 :], Event.Disconnected
)
assert await wait_for_event(
events_0_without_listen[second_connect_idx_without_listen + 1 :],
Event.Disconnected,
)
# Verify the core sequence of events
expected_events_without_listen = [
Event.Connected,
Event.Disconnected,
Event.Connected,
Event.Disconnected,
]
# Filter events to check only pattern we care about
# (skipping OpenedStream which may vary)
filtered_events_0_0 = [
e
for e in events_0_0
if e in [Event.Listen, Event.Connected, Event.Disconnected]
]
filtered_events_1_0 = [
e
for e in events_1_0
if e in [Event.Listen, Event.Connected, Event.Disconnected]
]
filtered_events_without_listen = [
e
for e in events_0_without_listen
if e in [Event.Connected, Event.Disconnected]
]
# Check that the pattern matches
assert filtered_events_0_0[0] == Event.Listen, "First event should be Listen"
assert filtered_events_1_0[0] == Event.Listen, "First event should be Listen"
# Check pattern: Connected -> Disconnected -> Connected -> Disconnected
assert filtered_events_0_0[1:5] == expected_events_without_listen
assert filtered_events_1_0[1:5] == expected_events_without_listen
assert filtered_events_without_listen[:4] == expected_events_without_listen
assert await wait_for_event(events_1_1, Event.Connected, 1.0)
assert await wait_for_event(events_1_1, Event.OpenedStream, 1.0)
# assert await wait_for_event(
# events_1_1, Event.ClosedStream, 1.0
# ) # Not implemented
assert await wait_for_event(events_1_1, Event.Disconnected, 1.0)

View File

@ -13,6 +13,9 @@ from libp2p import (
from libp2p.network.exceptions import (
SwarmException,
)
from libp2p.network.swarm import (
Swarm,
)
from libp2p.tools.utils import (
connect_swarm,
)
@ -166,12 +169,14 @@ async def test_swarm_multiaddr(security_protocol):
def test_new_swarm_defaults_to_tcp():
swarm = new_swarm()
assert isinstance(swarm, Swarm)
assert isinstance(swarm.transport, TCP)
def test_new_swarm_tcp_multiaddr_supported():
addr = Multiaddr("/ip4/127.0.0.1/tcp/9999")
swarm = new_swarm(listen_addrs=[addr])
assert isinstance(swarm, Swarm)
assert isinstance(swarm.transport, TCP)

View File

@ -1,5 +1,9 @@
import pytest
from multiaddr import (
Multiaddr,
)
from libp2p.peer.id import ID
from libp2p.peer.peerstore import (
PeerStore,
PeerStoreError,
@ -11,51 +15,72 @@ from libp2p.peer.peerstore import (
def test_addrs_empty():
with pytest.raises(PeerStoreError):
store = PeerStore()
val = store.addrs("peer")
val = store.addrs(ID(b"peer"))
assert not val
def test_add_addr_single():
store = PeerStore()
store.add_addr("peer1", "/foo", 10)
store.add_addr("peer1", "/bar", 10)
store.add_addr("peer2", "/baz", 10)
store.add_addr(ID(b"peer1"), Multiaddr("/ip4/127.0.0.1/tcp/4001"), 10)
store.add_addr(ID(b"peer1"), Multiaddr("/ip4/127.0.0.1/tcp/4002"), 10)
store.add_addr(ID(b"peer2"), Multiaddr("/ip4/127.0.0.1/tcp/4003"), 10)
assert store.addrs("peer1") == ["/foo", "/bar"]
assert store.addrs("peer2") == ["/baz"]
assert store.addrs(ID(b"peer1")) == [
Multiaddr("/ip4/127.0.0.1/tcp/4001"),
Multiaddr("/ip4/127.0.0.1/tcp/4002"),
]
assert store.addrs(ID(b"peer2")) == [Multiaddr("/ip4/127.0.0.1/tcp/4003")]
def test_add_addrs_multiple():
store = PeerStore()
store.add_addrs("peer1", ["/foo1", "/bar1"], 10)
store.add_addrs("peer2", ["/foo2"], 10)
store.add_addrs(
ID(b"peer1"),
[Multiaddr("/ip4/127.0.0.1/tcp/40011"), Multiaddr("/ip4/127.0.0.1/tcp/40021")],
10,
)
store.add_addrs(ID(b"peer2"), [Multiaddr("/ip4/127.0.0.1/tcp/40012")], 10)
assert store.addrs("peer1") == ["/foo1", "/bar1"]
assert store.addrs("peer2") == ["/foo2"]
assert store.addrs(ID(b"peer1")) == [
Multiaddr("/ip4/127.0.0.1/tcp/40011"),
Multiaddr("/ip4/127.0.0.1/tcp/40021"),
]
assert store.addrs(ID(b"peer2")) == [Multiaddr("/ip4/127.0.0.1/tcp/40012")]
def test_clear_addrs():
store = PeerStore()
store.add_addrs("peer1", ["/foo1", "/bar1"], 10)
store.add_addrs("peer2", ["/foo2"], 10)
store.clear_addrs("peer1")
store.add_addrs(
ID(b"peer1"),
[Multiaddr("/ip4/127.0.0.1/tcp/40011"), Multiaddr("/ip4/127.0.0.1/tcp/40021")],
10,
)
store.add_addrs(ID(b"peer2"), [Multiaddr("/ip4/127.0.0.1/tcp/40012")], 10)
store.clear_addrs(ID(b"peer1"))
assert store.addrs("peer1") == []
assert store.addrs("peer2") == ["/foo2"]
assert store.addrs(ID(b"peer1")) == []
assert store.addrs(ID(b"peer2")) == [Multiaddr("/ip4/127.0.0.1/tcp/40012")]
store.add_addrs("peer1", ["/foo1", "/bar1"], 10)
store.add_addrs(
ID(b"peer1"),
[Multiaddr("/ip4/127.0.0.1/tcp/40011"), Multiaddr("/ip4/127.0.0.1/tcp/40021")],
10,
)
assert store.addrs("peer1") == ["/foo1", "/bar1"]
assert store.addrs(ID(b"peer1")) == [
Multiaddr("/ip4/127.0.0.1/tcp/40011"),
Multiaddr("/ip4/127.0.0.1/tcp/40021"),
]
def test_peers_with_addrs():
store = PeerStore()
store.add_addrs("peer1", [], 10)
store.add_addrs("peer2", ["/foo"], 10)
store.add_addrs("peer3", ["/bar"], 10)
store.add_addrs(ID(b"peer1"), [], 10)
store.add_addrs(ID(b"peer2"), [Multiaddr("/ip4/127.0.0.1/tcp/4001")], 10)
store.add_addrs(ID(b"peer3"), [Multiaddr("/ip4/127.0.0.1/tcp/4002")], 10)
assert set(store.peers_with_addrs()) == {"peer2", "peer3"}
assert set(store.peers_with_addrs()) == {ID(b"peer2"), ID(b"peer3")}
store.clear_addrs("peer2")
store.clear_addrs(ID(b"peer2"))
assert set(store.peers_with_addrs()) == {"peer3"}
assert set(store.peers_with_addrs()) == {ID(b"peer3")}

View File

@ -23,9 +23,7 @@ kBZ7WvkmPV3aPL6jnwp2pXepntdVnaTiSxJ1dkXShZ/VSSDNZMYKY306EtHrIu3NZHtXhdyHKcggDXr
qkBrdgErAkAlpGPojUwemOggr4FD8sLX1ot2hDJyyV7OK2FXfajWEYJyMRL1Gm9Uk1+Un53RAkJneqp
JGAzKpyttXBTIDO51AkEA98KTiROMnnU8Y6Mgcvr68/SMIsvCYMt9/mtwSBGgl80VaTQ5Hpaktl6Xbh
VUt5Wv0tRxlXZiViCGCD1EtrrwTw==
""".replace(
"\n", ""
)
""".replace("\n", "")
EXPECTED_PEER_ID = "QmRK3JgmVEGiewxWbhpXLJyjWuGuLeSTMTndA1coMHEy5o"

View File

@ -1,4 +1,7 @@
from collections.abc import Sequence
import pytest
from multiaddr import Multiaddr
from libp2p.crypto.secp256k1 import (
create_new_key_pair,
@ -8,7 +11,7 @@ from libp2p.peer.peerdata import (
PeerDataError,
)
MOCK_ADDR = "/peer"
MOCK_ADDR = Multiaddr("/ip4/127.0.0.1/tcp/4001")
MOCK_KEYPAIR = create_new_key_pair()
MOCK_PUBKEY = MOCK_KEYPAIR.public_key
MOCK_PRIVKEY = MOCK_KEYPAIR.private_key
@ -23,7 +26,7 @@ def test_get_protocols_empty():
# Test case when adding protocols
def test_add_protocols():
peer_data = PeerData()
protocols = ["protocol1", "protocol2"]
protocols: Sequence[str] = ["protocol1", "protocol2"]
peer_data.add_protocols(protocols)
assert peer_data.get_protocols() == protocols
@ -31,7 +34,7 @@ def test_add_protocols():
# Test case when setting protocols
def test_set_protocols():
peer_data = PeerData()
protocols = ["protocolA", "protocolB"]
protocols: Sequence[str] = ["protocol1", "protocol2"]
peer_data.set_protocols(protocols)
assert peer_data.get_protocols() == protocols
@ -39,7 +42,7 @@ def test_set_protocols():
# Test case when adding addresses
def test_add_addrs():
peer_data = PeerData()
addresses = [MOCK_ADDR]
addresses: Sequence[Multiaddr] = [MOCK_ADDR]
peer_data.add_addrs(addresses)
assert peer_data.get_addrs() == addresses
@ -47,7 +50,7 @@ def test_add_addrs():
# Test case when adding same address more than once
def test_add_dup_addrs():
peer_data = PeerData()
addresses = [MOCK_ADDR, MOCK_ADDR]
addresses: Sequence[Multiaddr] = [MOCK_ADDR, MOCK_ADDR]
peer_data.add_addrs(addresses)
peer_data.add_addrs(addresses)
assert peer_data.get_addrs() == [MOCK_ADDR]
@ -56,7 +59,7 @@ def test_add_dup_addrs():
# Test case for clearing addresses
def test_clear_addrs():
peer_data = PeerData()
addresses = [MOCK_ADDR]
addresses: Sequence[Multiaddr] = [MOCK_ADDR]
peer_data.add_addrs(addresses)
peer_data.clear_addrs()
assert peer_data.get_addrs() == []

View File

@ -6,16 +6,12 @@ import multihash
from libp2p.crypto.rsa import (
create_new_key_pair,
)
import libp2p.peer.id as PeerID
from libp2p.peer.id import (
ID,
)
ALPHABETS = "123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz"
# ensure we are not in "debug" mode for the following tests
PeerID.FRIENDLY_IDS = False
def test_eq_impl_for_bytes():
random_id_string = ""
@ -70,8 +66,8 @@ def test_eq_true():
def test_eq_false():
peer_id = ID("efgh")
other = ID("abcd")
peer_id = ID(b"efgh")
other = ID(b"abcd")
assert peer_id != other
@ -91,7 +87,7 @@ def test_id_from_base58():
for _ in range(10):
random_id_string += random.choice(ALPHABETS)
expected = ID(base58.b58decode(random_id_string))
actual = ID.from_base58(random_id_string.encode())
actual = ID.from_base58(random_id_string)
assert actual == expected

View File

@ -17,10 +17,14 @@ VALID_MULTI_ADDR_STR = "/ip4/127.0.0.1/tcp/8000/p2p/3YgLAeMKSAPcGqZkAt8mREqhQXmJ
def test_init_():
random_addrs = [random.randint(0, 255) for r in range(4)]
random_addrs = [
multiaddr.Multiaddr(f"/ip4/127.0.0.1/tcp/{1000 + i}") for i in range(4)
]
random_id_string = ""
for _ in range(10):
random_id_string += random.SystemRandom().choice(ALPHABETS)
peer_id = ID(random_id_string.encode())
peer_info = PeerInfo(peer_id, random_addrs)

View File

@ -1,5 +1,6 @@
import pytest
from libp2p.peer.id import ID
from libp2p.peer.peerstore import (
PeerStore,
PeerStoreError,
@ -11,36 +12,36 @@ from libp2p.peer.peerstore import (
def test_get_empty():
with pytest.raises(PeerStoreError):
store = PeerStore()
val = store.get("peer", "key")
val = store.get(ID(b"peer"), "key")
assert not val
def test_put_get_simple():
store = PeerStore()
store.put("peer", "key", "val")
assert store.get("peer", "key") == "val"
store.put(ID(b"peer"), "key", "val")
assert store.get(ID(b"peer"), "key") == "val"
def test_put_get_update():
store = PeerStore()
store.put("peer", "key1", "val1")
store.put("peer", "key2", "val2")
store.put("peer", "key2", "new val2")
store.put(ID(b"peer"), "key1", "val1")
store.put(ID(b"peer"), "key2", "val2")
store.put(ID(b"peer"), "key2", "new val2")
assert store.get("peer", "key1") == "val1"
assert store.get("peer", "key2") == "new val2"
assert store.get(ID(b"peer"), "key1") == "val1"
assert store.get(ID(b"peer"), "key2") == "new val2"
def test_put_get_two_peers():
store = PeerStore()
store.put("peer1", "key1", "val1")
store.put("peer2", "key1", "val1 prime")
store.put(ID(b"peer1"), "key1", "val1")
store.put(ID(b"peer2"), "key1", "val1 prime")
assert store.get("peer1", "key1") == "val1"
assert store.get("peer2", "key1") == "val1 prime"
assert store.get(ID(b"peer1"), "key1") == "val1"
assert store.get(ID(b"peer2"), "key1") == "val1 prime"
# Try update
store.put("peer2", "key1", "new val1")
store.put(ID(b"peer2"), "key1", "new val1")
assert store.get("peer1", "key1") == "val1"
assert store.get("peer2", "key1") == "new val1"
assert store.get(ID(b"peer1"), "key1") == "val1"
assert store.get(ID(b"peer2"), "key1") == "new val1"

View File

@ -1,5 +1,7 @@
import pytest
from multiaddr import Multiaddr
from libp2p.peer.id import ID
from libp2p.peer.peerstore import (
PeerStore,
PeerStoreError,
@ -11,52 +13,52 @@ from libp2p.peer.peerstore import (
def test_peer_info_empty():
store = PeerStore()
with pytest.raises(PeerStoreError):
store.peer_info("peer")
store.peer_info(ID(b"peer"))
def test_peer_info_basic():
store = PeerStore()
store.add_addr("peer", "/foo", 10)
info = store.peer_info("peer")
store.add_addr(ID(b"peer"), Multiaddr("/ip4/127.0.0.1/tcp/4001"), 10)
info = store.peer_info(ID(b"peer"))
assert info.peer_id == "peer"
assert info.addrs == ["/foo"]
assert info.peer_id == ID(b"peer")
assert info.addrs == [Multiaddr("/ip4/127.0.0.1/tcp/4001")]
def test_add_get_protocols_basic():
store = PeerStore()
store.add_protocols("peer1", ["p1", "p2"])
store.add_protocols("peer2", ["p3"])
store.add_protocols(ID(b"peer1"), ["p1", "p2"])
store.add_protocols(ID(b"peer2"), ["p3"])
assert set(store.get_protocols("peer1")) == {"p1", "p2"}
assert set(store.get_protocols("peer2")) == {"p3"}
assert set(store.get_protocols(ID(b"peer1"))) == {"p1", "p2"}
assert set(store.get_protocols(ID(b"peer2"))) == {"p3"}
def test_add_get_protocols_extend():
store = PeerStore()
store.add_protocols("peer1", ["p1", "p2"])
store.add_protocols("peer1", ["p3"])
store.add_protocols(ID(b"peer1"), ["p1", "p2"])
store.add_protocols(ID(b"peer1"), ["p3"])
assert set(store.get_protocols("peer1")) == {"p1", "p2", "p3"}
assert set(store.get_protocols(ID(b"peer1"))) == {"p1", "p2", "p3"}
def test_set_protocols():
store = PeerStore()
store.add_protocols("peer1", ["p1", "p2"])
store.add_protocols("peer2", ["p3"])
store.add_protocols(ID(b"peer1"), ["p1", "p2"])
store.add_protocols(ID(b"peer2"), ["p3"])
store.set_protocols("peer1", ["p4"])
store.set_protocols("peer2", [])
store.set_protocols(ID(b"peer1"), ["p4"])
store.set_protocols(ID(b"peer2"), [])
assert set(store.get_protocols("peer1")) == {"p4"}
assert set(store.get_protocols("peer2")) == set()
assert set(store.get_protocols(ID(b"peer1"))) == {"p4"}
assert set(store.get_protocols(ID(b"peer2"))) == set()
# Test with methods from other Peer interfaces.
def test_peers():
store = PeerStore()
store.add_protocols("peer1", [])
store.put("peer2", "key", "val")
store.add_addr("peer3", "/foo", 10)
store.add_protocols(ID(b"peer1"), [])
store.put(ID(b"peer2"), "key", "val")
store.add_addr(ID(b"peer3"), Multiaddr("/ip4/127.0.0.1/tcp/4001"), 10)
assert set(store.peer_ids()) == {"peer1", "peer2", "peer3"}
assert set(store.peer_ids()) == {ID(b"peer1"), ID(b"peer2"), ID(b"peer3")}

View File

@ -1,10 +1,7 @@
import pytest
from trio.testing import (
RaisesGroup,
)
from libp2p.host.exceptions import (
StreamFailure,
from libp2p.custom_types import (
TProtocol,
)
from libp2p.tools.utils import (
create_echo_stream_handler,
@ -13,10 +10,10 @@ from tests.utils.factories import (
HostFactory,
)
PROTOCOL_ECHO = "/echo/1.0.0"
PROTOCOL_POTATO = "/potato/1.0.0"
PROTOCOL_FOO = "/foo/1.0.0"
PROTOCOL_ROCK = "/rock/1.0.0"
PROTOCOL_ECHO = TProtocol("/echo/1.0.0")
PROTOCOL_POTATO = TProtocol("/potato/1.0.0")
PROTOCOL_FOO = TProtocol("/foo/1.0.0")
PROTOCOL_ROCK = TProtocol("/rock/1.0.0")
ACK_PREFIX = "ack:"
@ -61,19 +58,12 @@ async def test_single_protocol_succeeds(security_protocol):
@pytest.mark.trio
async def test_single_protocol_fails(security_protocol):
# using trio.testing.RaisesGroup b/c pytest.raises does not handle ExceptionGroups
# yet: https://github.com/pytest-dev/pytest/issues/11538
# but switch to that once they do
# the StreamFailure is within 2 nested ExceptionGroups, so we use strict=False
# to unwrap down to the core Exception
with RaisesGroup(StreamFailure, allow_unwrapped=True, flatten_subgroups=True):
# Expect that protocol negotiation fails when no common protocols exist
with pytest.raises(Exception):
await perform_simple_test(
"", [PROTOCOL_ECHO], [PROTOCOL_POTATO], security_protocol
)
# Cleanup not reached on error
@pytest.mark.trio
async def test_multiple_protocol_first_is_valid_succeeds(security_protocol):
@ -103,16 +93,16 @@ async def test_multiple_protocol_second_is_valid_succeeds(security_protocol):
@pytest.mark.trio
async def test_multiple_protocol_fails(security_protocol):
protocols_for_client = [PROTOCOL_ROCK, PROTOCOL_FOO, "/bar/1.0.0"]
protocols_for_listener = ["/aspyn/1.0.0", "/rob/1.0.0", "/zx/1.0.0", "/alex/1.0.0"]
protocols_for_client = [PROTOCOL_ROCK, PROTOCOL_FOO, TProtocol("/bar/1.0.0")]
protocols_for_listener = [
TProtocol("/aspyn/1.0.0"),
TProtocol("/rob/1.0.0"),
TProtocol("/zx/1.0.0"),
TProtocol("/alex/1.0.0"),
]
# using trio.testing.RaisesGroup b/c pytest.raises does not handle ExceptionGroups
# yet: https://github.com/pytest-dev/pytest/issues/11538
# but switch to that once they do
# the StreamFailure is within 2 nested ExceptionGroups, so we use strict=False
# to unwrap down to the core Exception
with RaisesGroup(StreamFailure, allow_unwrapped=True, flatten_subgroups=True):
# Expect that protocol negotiation fails when no common protocols exist
with pytest.raises(Exception):
await perform_simple_test(
"", protocols_for_client, protocols_for_listener, security_protocol
)
@ -142,8 +132,8 @@ async def test_multistream_command(security_protocol):
for protocol in supported_protocols:
assert protocol in response
assert "/does/not/exist" not in response
assert "/foo/bar/1.2.3" not in response
assert TProtocol("/does/not/exist") not in response
assert TProtocol("/foo/bar/1.2.3") not in response
# Dialer asks for unspoorted command
with pytest.raises(ValueError, match="Command not supported"):

View File

@ -20,7 +20,6 @@ async def perform_test(num_nodes, adjacency_map, action_func, assertion_func):
such as send crypto and set crypto
:param assertion_func: assertions for testing the results of the actions are correct
"""
async with DummyAccountNode.create(num_nodes) as dummy_nodes:
# Create connections between nodes according to `adjacency_map`
async with trio.open_nursery() as nursery:

View File

@ -46,7 +46,7 @@ async def test_simple_two_nodes():
async def test_timed_cache_two_nodes():
# Two nodes using LastSeenCache with a TTL of 120 seconds
def get_msg_id(msg):
return (msg.data, msg.from_id)
return msg.data + msg.from_id
async with PubsubFactory.create_batch_with_floodsub(
2, seen_ttl=120, msg_id_constructor=get_msg_id

View File

@ -5,6 +5,7 @@ import trio
from libp2p.pubsub.gossipsub import (
PROTOCOL_ID,
GossipSub,
)
from libp2p.tools.utils import (
connect,
@ -24,7 +25,10 @@ async def test_join():
async with PubsubFactory.create_batch_with_gossipsub(
4, degree=4, degree_low=3, degree_high=5, heartbeat_interval=1, time_to_live=1
) as pubsubs_gsub:
gossipsubs = [pubsub.router for pubsub in pubsubs_gsub]
gossipsubs = []
for pubsub in pubsubs_gsub:
if isinstance(pubsub.router, GossipSub):
gossipsubs.append(pubsub.router)
hosts = [pubsub.host for pubsub in pubsubs_gsub]
hosts_indices = list(range(len(pubsubs_gsub)))
@ -86,7 +90,9 @@ async def test_join():
@pytest.mark.trio
async def test_leave():
async with PubsubFactory.create_batch_with_gossipsub(1) as pubsubs_gsub:
gossipsub = pubsubs_gsub[0].router
router = pubsubs_gsub[0].router
assert isinstance(router, GossipSub)
gossipsub = router
topic = "test_leave"
assert topic not in gossipsub.mesh
@ -104,7 +110,11 @@ async def test_leave():
@pytest.mark.trio
async def test_handle_graft(monkeypatch):
async with PubsubFactory.create_batch_with_gossipsub(2) as pubsubs_gsub:
gossipsubs = tuple(pubsub.router for pubsub in pubsubs_gsub)
gossipsub_routers = []
for pubsub in pubsubs_gsub:
if isinstance(pubsub.router, GossipSub):
gossipsub_routers.append(pubsub.router)
gossipsubs = tuple(gossipsub_routers)
index_alice = 0
id_alice = pubsubs_gsub[index_alice].my_id
@ -156,7 +166,11 @@ async def test_handle_prune():
async with PubsubFactory.create_batch_with_gossipsub(
2, heartbeat_interval=3
) as pubsubs_gsub:
gossipsubs = tuple(pubsub.router for pubsub in pubsubs_gsub)
gossipsub_routers = []
for pubsub in pubsubs_gsub:
if isinstance(pubsub.router, GossipSub):
gossipsub_routers.append(pubsub.router)
gossipsubs = tuple(gossipsub_routers)
index_alice = 0
id_alice = pubsubs_gsub[index_alice].my_id
@ -382,7 +396,9 @@ async def test_mesh_heartbeat(initial_mesh_peer_count, monkeypatch):
fake_peer_ids = [IDFactory() for _ in range(total_peer_count)]
peer_protocol = {peer_id: PROTOCOL_ID for peer_id in fake_peer_ids}
monkeypatch.setattr(pubsubs_gsub[0].router, "peer_protocol", peer_protocol)
router = pubsubs_gsub[0].router
assert isinstance(router, GossipSub)
monkeypatch.setattr(router, "peer_protocol", peer_protocol)
peer_topics = {topic: set(fake_peer_ids)}
# Monkeypatch the peer subscriptions
@ -394,27 +410,21 @@ async def test_mesh_heartbeat(initial_mesh_peer_count, monkeypatch):
mesh_peers = [fake_peer_ids[i] for i in mesh_peer_indices]
router_mesh = {topic: set(mesh_peers)}
# Monkeypatch our mesh peers
monkeypatch.setattr(pubsubs_gsub[0].router, "mesh", router_mesh)
monkeypatch.setattr(router, "mesh", router_mesh)
peers_to_graft, peers_to_prune = pubsubs_gsub[0].router.mesh_heartbeat()
if initial_mesh_peer_count > pubsubs_gsub[0].router.degree:
peers_to_graft, peers_to_prune = router.mesh_heartbeat()
if initial_mesh_peer_count > router.degree:
# If number of initial mesh peers is more than `GossipSubDegree`,
# we should PRUNE mesh peers
assert len(peers_to_graft) == 0
assert (
len(peers_to_prune)
== initial_mesh_peer_count - pubsubs_gsub[0].router.degree
)
assert len(peers_to_prune) == initial_mesh_peer_count - router.degree
for peer in peers_to_prune:
assert peer in mesh_peers
elif initial_mesh_peer_count < pubsubs_gsub[0].router.degree:
elif initial_mesh_peer_count < router.degree:
# If number of initial mesh peers is less than `GossipSubDegree`,
# we should GRAFT more peers
assert len(peers_to_prune) == 0
assert (
len(peers_to_graft)
== pubsubs_gsub[0].router.degree - initial_mesh_peer_count
)
assert len(peers_to_graft) == router.degree - initial_mesh_peer_count
for peer in peers_to_graft:
assert peer not in mesh_peers
else:
@ -436,7 +446,10 @@ async def test_gossip_heartbeat(initial_peer_count, monkeypatch):
fake_peer_ids = [IDFactory() for _ in range(total_peer_count)]
peer_protocol = {peer_id: PROTOCOL_ID for peer_id in fake_peer_ids}
monkeypatch.setattr(pubsubs_gsub[0].router, "peer_protocol", peer_protocol)
router_obj = pubsubs_gsub[0].router
assert isinstance(router_obj, GossipSub)
router = router_obj
monkeypatch.setattr(router, "peer_protocol", peer_protocol)
topic_mesh_peer_count = 14
# Split into mesh peers and fanout peers
@ -453,14 +466,14 @@ async def test_gossip_heartbeat(initial_peer_count, monkeypatch):
mesh_peers = [fake_peer_ids[i] for i in mesh_peer_indices]
router_mesh = {topic_mesh: set(mesh_peers)}
# Monkeypatch our mesh peers
monkeypatch.setattr(pubsubs_gsub[0].router, "mesh", router_mesh)
monkeypatch.setattr(router, "mesh", router_mesh)
fanout_peer_indices = random.sample(
range(topic_mesh_peer_count, total_peer_count), initial_peer_count
)
fanout_peers = [fake_peer_ids[i] for i in fanout_peer_indices]
router_fanout = {topic_fanout: set(fanout_peers)}
# Monkeypatch our fanout peers
monkeypatch.setattr(pubsubs_gsub[0].router, "fanout", router_fanout)
monkeypatch.setattr(router, "fanout", router_fanout)
def window(topic):
if topic == topic_mesh:
@ -471,20 +484,18 @@ async def test_gossip_heartbeat(initial_peer_count, monkeypatch):
return []
# Monkeypatch the memory cache messages
monkeypatch.setattr(pubsubs_gsub[0].router.mcache, "window", window)
monkeypatch.setattr(router.mcache, "window", window)
peers_to_gossip = pubsubs_gsub[0].router.gossip_heartbeat()
peers_to_gossip = router.gossip_heartbeat()
# If our mesh peer count is less than `GossipSubDegree`, we should gossip to up
# to `GossipSubDegree` peers (exclude mesh peers).
if topic_mesh_peer_count - initial_peer_count < pubsubs_gsub[0].router.degree:
if topic_mesh_peer_count - initial_peer_count < router.degree:
# The same goes for fanout so it's two times the number of peers to gossip.
assert len(peers_to_gossip) == 2 * (
topic_mesh_peer_count - initial_peer_count
)
elif (
topic_mesh_peer_count - initial_peer_count >= pubsubs_gsub[0].router.degree
):
assert len(peers_to_gossip) == 2 * (pubsubs_gsub[0].router.degree)
elif topic_mesh_peer_count - initial_peer_count >= router.degree:
assert len(peers_to_gossip) == 2 * (router.degree)
for peer in peers_to_gossip:
if peer in peer_topics[topic_mesh]:

View File

@ -4,6 +4,9 @@ import trio
from libp2p.peer.peerinfo import (
info_from_p2p_addr,
)
from libp2p.pubsub.gossipsub import (
GossipSub,
)
from libp2p.tools.utils import (
connect,
)
@ -82,31 +85,33 @@ async def test_reject_graft():
await pubsubs_gsub_1[0].router.join(topic)
# Pre-Graft assertions
assert (
topic in pubsubs_gsub_0[0].router.mesh
), "topic not in mesh for gossipsub 0"
assert (
topic in pubsubs_gsub_1[0].router.mesh
), "topic not in mesh for gossipsub 1"
assert (
host_1.get_id() not in pubsubs_gsub_0[0].router.mesh[topic]
), "gossipsub 1 in mesh topic for gossipsub 0"
assert (
host_0.get_id() not in pubsubs_gsub_1[0].router.mesh[topic]
), "gossipsub 0 in mesh topic for gossipsub 1"
assert topic in pubsubs_gsub_0[0].router.mesh, (
"topic not in mesh for gossipsub 0"
)
assert topic in pubsubs_gsub_1[0].router.mesh, (
"topic not in mesh for gossipsub 1"
)
assert host_1.get_id() not in pubsubs_gsub_0[0].router.mesh[topic], (
"gossipsub 1 in mesh topic for gossipsub 0"
)
assert host_0.get_id() not in pubsubs_gsub_1[0].router.mesh[topic], (
"gossipsub 0 in mesh topic for gossipsub 1"
)
# Gossipsub 1 emits a graft request to Gossipsub 0
await pubsubs_gsub_0[0].router.emit_graft(topic, host_1.get_id())
router_obj = pubsubs_gsub_0[0].router
assert isinstance(router_obj, GossipSub)
await router_obj.emit_graft(topic, host_1.get_id())
await trio.sleep(1)
# Post-Graft assertions
assert (
host_1.get_id() not in pubsubs_gsub_0[0].router.mesh[topic]
), "gossipsub 1 in mesh topic for gossipsub 0"
assert (
host_0.get_id() not in pubsubs_gsub_1[0].router.mesh[topic]
), "gossipsub 0 in mesh topic for gossipsub 1"
assert host_1.get_id() not in pubsubs_gsub_0[0].router.mesh[topic], (
"gossipsub 1 in mesh topic for gossipsub 0"
)
assert host_0.get_id() not in pubsubs_gsub_1[0].router.mesh[topic], (
"gossipsub 0 in mesh topic for gossipsub 1"
)
except Exception as e:
print(f"Test failed with error: {e}")
@ -139,12 +144,12 @@ async def test_heartbeat_reconnect():
await trio.sleep(1)
# Verify initial connection
assert (
host_1.get_id() in pubsubs_gsub_0[0].peers
), "Initial connection not established for gossipsub 0"
assert (
host_0.get_id() in pubsubs_gsub_1[0].peers
), "Initial connection not established for gossipsub 0"
assert host_1.get_id() in pubsubs_gsub_0[0].peers, (
"Initial connection not established for gossipsub 0"
)
assert host_0.get_id() in pubsubs_gsub_1[0].peers, (
"Initial connection not established for gossipsub 0"
)
# Simulate disconnection
await host_0.disconnect(host_1.get_id())
@ -153,17 +158,17 @@ async def test_heartbeat_reconnect():
await trio.sleep(1)
# Verify that peers are removed after disconnection
assert (
host_0.get_id() not in pubsubs_gsub_1[0].peers
), "Peer 0 still in gossipsub 1 after disconnection"
assert host_0.get_id() not in pubsubs_gsub_1[0].peers, (
"Peer 0 still in gossipsub 1 after disconnection"
)
# Wait for heartbeat to reestablish connection
await trio.sleep(2)
# Verify connection reestablishment
assert (
host_0.get_id() in pubsubs_gsub_1[0].peers
), "Reconnection not established for gossipsub 0"
assert host_0.get_id() in pubsubs_gsub_1[0].peers, (
"Reconnection not established for gossipsub 0"
)
except Exception as e:
print(f"Test failed with error: {e}")

View File

@ -1,15 +1,26 @@
from collections.abc import (
Sequence,
)
from libp2p.peer.id import (
ID,
)
from libp2p.pubsub.mcache import (
MessageCache,
)
from libp2p.pubsub.pb import (
rpc_pb2,
)
class Msg:
__slots__ = ["topicIDs", "seqno", "from_id"]
def __init__(self, topicIDs, seqno, from_id):
self.topicIDs = topicIDs
self.seqno = seqno
self.from_id = from_id
def make_msg(
topic_ids: Sequence[str],
seqno: bytes,
from_id: ID,
) -> rpc_pb2.Message:
return rpc_pb2.Message(
from_id=from_id.to_bytes(), seqno=seqno, topicIDs=list(topic_ids)
)
def test_mcache():
@ -19,7 +30,7 @@ def test_mcache():
msgs = []
for i in range(60):
msgs.append(Msg(["test"], i, "test"))
msgs.append(make_msg(["test"], i.to_bytes(1, "big"), ID(b"test")))
for i in range(10):
mcache.put(msgs[i])

View File

@ -1,6 +1,7 @@
from contextlib import (
contextmanager,
)
import inspect
from typing import (
NamedTuple,
)
@ -14,6 +15,9 @@ from libp2p.exceptions import (
from libp2p.network.stream.exceptions import (
StreamEOF,
)
from libp2p.peer.id import (
ID,
)
from libp2p.pubsub.pb import (
rpc_pb2,
)
@ -121,16 +125,18 @@ async def test_set_and_remove_topic_validator():
async with PubsubFactory.create_batch_with_floodsub(1) as pubsubs_fsub:
is_sync_validator_called = False
def sync_validator(peer_id, msg):
def sync_validator(peer_id: ID, msg: rpc_pb2.Message) -> bool:
nonlocal is_sync_validator_called
is_sync_validator_called = True
return True
is_async_validator_called = False
async def async_validator(peer_id, msg):
async def async_validator(peer_id: ID, msg: rpc_pb2.Message) -> bool:
nonlocal is_async_validator_called
is_async_validator_called = True
await trio.lowlevel.checkpoint()
return True
topic = "TEST_VALIDATOR"
@ -144,7 +150,13 @@ async def test_set_and_remove_topic_validator():
assert not topic_validator.is_async
# Validate with sync validator
topic_validator.validator(peer_id=IDFactory(), msg="msg")
test_msg = make_pubsub_msg(
origin_id=IDFactory(),
topic_ids=[topic],
data=b"test",
seqno=b"\x00" * 8,
)
topic_validator.validator(IDFactory(), test_msg)
assert is_sync_validator_called
assert not is_async_validator_called
@ -158,7 +170,20 @@ async def test_set_and_remove_topic_validator():
assert topic_validator.is_async
# Validate with async validator
await topic_validator.validator(peer_id=IDFactory(), msg="msg")
test_msg = make_pubsub_msg(
origin_id=IDFactory(),
topic_ids=[topic],
data=b"test",
seqno=b"\x00" * 8,
)
validator = topic_validator.validator
if topic_validator.is_async:
import inspect
if inspect.iscoroutinefunction(validator):
await validator(IDFactory(), test_msg)
else:
validator(IDFactory(), test_msg)
assert is_async_validator_called
assert not is_sync_validator_called
@ -170,20 +195,18 @@ async def test_set_and_remove_topic_validator():
@pytest.mark.trio
async def test_get_msg_validators():
calls = [0, 0] # [sync, async]
def sync_validator(peer_id: ID, msg: rpc_pb2.Message) -> bool:
calls[0] += 1
return True
async def async_validator(peer_id: ID, msg: rpc_pb2.Message) -> bool:
calls[1] += 1
await trio.lowlevel.checkpoint()
return True
async with PubsubFactory.create_batch_with_floodsub(1) as pubsubs_fsub:
times_sync_validator_called = 0
def sync_validator(peer_id, msg):
nonlocal times_sync_validator_called
times_sync_validator_called += 1
times_async_validator_called = 0
async def async_validator(peer_id, msg):
nonlocal times_async_validator_called
times_async_validator_called += 1
await trio.lowlevel.checkpoint()
topic_1 = "TEST_VALIDATOR_1"
topic_2 = "TEST_VALIDATOR_2"
topic_3 = "TEST_VALIDATOR_3"
@ -204,13 +227,15 @@ async def test_get_msg_validators():
topic_validators = pubsubs_fsub[0].get_msg_validators(msg)
for topic_validator in topic_validators:
validator = topic_validator.validator
if topic_validator.is_async:
await topic_validator.validator(peer_id=IDFactory(), msg="msg")
if inspect.iscoroutinefunction(validator):
await validator(IDFactory(), msg)
else:
topic_validator.validator(peer_id=IDFactory(), msg="msg")
validator(IDFactory(), msg)
assert times_sync_validator_called == 2
assert times_async_validator_called == 1
assert calls[0] == 2
assert calls[1] == 1
@pytest.mark.parametrize(
@ -221,17 +246,17 @@ async def test_get_msg_validators():
async def test_validate_msg(is_topic_1_val_passed, is_topic_2_val_passed):
async with PubsubFactory.create_batch_with_floodsub(1) as pubsubs_fsub:
def passed_sync_validator(peer_id, msg):
def passed_sync_validator(peer_id: ID, msg: rpc_pb2.Message) -> bool:
return True
def failed_sync_validator(peer_id, msg):
def failed_sync_validator(peer_id: ID, msg: rpc_pb2.Message) -> bool:
return False
async def passed_async_validator(peer_id, msg):
async def passed_async_validator(peer_id: ID, msg: rpc_pb2.Message) -> bool:
await trio.lowlevel.checkpoint()
return True
async def failed_async_validator(peer_id, msg):
async def failed_async_validator(peer_id: ID, msg: rpc_pb2.Message) -> bool:
await trio.lowlevel.checkpoint()
return False
@ -297,11 +322,12 @@ async def test_continuously_read_stream(monkeypatch, nursery, security_protocol)
m.setattr(pubsubs_fsub[0].router, "handle_rpc", mock_handle_rpc)
yield Events(event_push_msg, event_handle_subscription, event_handle_rpc)
async with PubsubFactory.create_batch_with_floodsub(
1, security_protocol=security_protocol
) as pubsubs_fsub, net_stream_pair_factory(
security_protocol=security_protocol
) as stream_pair:
async with (
PubsubFactory.create_batch_with_floodsub(
1, security_protocol=security_protocol
) as pubsubs_fsub,
net_stream_pair_factory(security_protocol=security_protocol) as stream_pair,
):
await pubsubs_fsub[0].subscribe(TESTING_TOPIC)
# Kick off the task `continuously_read_stream`
nursery.start_soon(pubsubs_fsub[0].continuously_read_stream, stream_pair[0])
@ -429,11 +455,12 @@ async def test_handle_talk():
@pytest.mark.trio
async def test_message_all_peers(monkeypatch, security_protocol):
async with PubsubFactory.create_batch_with_floodsub(
1, security_protocol=security_protocol
) as pubsubs_fsub, net_stream_pair_factory(
security_protocol=security_protocol
) as stream_pair:
async with (
PubsubFactory.create_batch_with_floodsub(
1, security_protocol=security_protocol
) as pubsubs_fsub,
net_stream_pair_factory(security_protocol=security_protocol) as stream_pair,
):
peer_id = IDFactory()
mock_peers = {peer_id: stream_pair[0]}
with monkeypatch.context() as m:
@ -530,15 +557,15 @@ async def test_publish_push_msg_is_called(monkeypatch):
await pubsubs_fsub[0].publish(TESTING_TOPIC, TESTING_DATA)
await pubsubs_fsub[0].publish(TESTING_TOPIC, TESTING_DATA)
assert (
len(msgs) == 2
), "`push_msg` should be called every time `publish` is called"
assert len(msgs) == 2, (
"`push_msg` should be called every time `publish` is called"
)
assert (msg_forwarders[0] == msg_forwarders[1]) and (
msg_forwarders[1] == pubsubs_fsub[0].my_id
)
assert (
msgs[0].seqno != msgs[1].seqno
), "`seqno` should be different every time"
assert msgs[0].seqno != msgs[1].seqno, (
"`seqno` should be different every time"
)
@pytest.mark.trio
@ -611,7 +638,7 @@ async def test_push_msg(monkeypatch):
# Test: add a topic validator and `push_msg` the message that
# does not pass the validation.
# `router_publish` is not called then.
def failed_sync_validator(peer_id, msg):
def failed_sync_validator(peer_id: ID, msg: rpc_pb2.Message) -> bool:
return False
pubsubs_fsub[0].set_topic_validator(
@ -659,6 +686,9 @@ async def test_strict_signing_failed_validation(monkeypatch):
seqno=b"\x00" * 8,
)
priv_key = pubsubs_fsub[0].sign_key
assert priv_key is not None, (
"Private key should not be None when strict_signing=True"
)
signature = priv_key.sign(
PUBSUB_SIGNING_PREFIX.encode() + msg.SerializeToString()
)
@ -803,15 +833,15 @@ async def test_blacklist_blocks_new_peer_connections(monkeypatch):
await pubsub._handle_new_peer(blacklisted_peer)
# Verify that both new_stream and router.add_peer was not called
assert (
not new_stream_called
), "new_stream should be not be called to get hello packet"
assert (
not router_add_peer_called
), "Router.add_peer should not be called for blacklisted peer"
assert (
blacklisted_peer not in pubsub.peers
), "Blacklisted peer should not be in peers dict"
assert not new_stream_called, (
"new_stream should be not be called to get hello packet"
)
assert not router_add_peer_called, (
"Router.add_peer should not be called for blacklisted peer"
)
assert blacklisted_peer not in pubsub.peers, (
"Blacklisted peer should not be in peers dict"
)
@pytest.mark.trio
@ -838,7 +868,7 @@ async def test_blacklist_blocks_messages_from_blacklisted_originator():
# Track if router.publish is called
router_publish_called = False
async def mock_router_publish(*args, **kwargs):
async def mock_router_publish(msg_forwarder: ID, pubsub_msg: rpc_pb2.Message):
nonlocal router_publish_called
router_publish_called = True
await trio.lowlevel.checkpoint()
@ -851,12 +881,12 @@ async def test_blacklist_blocks_messages_from_blacklisted_originator():
await pubsub.push_msg(blacklisted_originator, msg)
# Verify message was rejected
assert (
not router_publish_called
), "Router.publish should not be called for blacklisted originator"
assert not pubsub._is_msg_seen(
msg
), "Message from blacklisted originator should not be marked as seen"
assert not router_publish_called, (
"Router.publish should not be called for blacklisted originator"
)
assert not pubsub._is_msg_seen(msg), (
"Message from blacklisted originator should not be marked as seen"
)
finally:
pubsub.router.publish = original_router_publish
@ -894,8 +924,8 @@ async def test_blacklist_allows_non_blacklisted_peers():
# Track router.publish calls
router_publish_calls = []
async def mock_router_publish(*args, **kwargs):
router_publish_calls.append(args)
async def mock_router_publish(msg_forwarder: ID, pubsub_msg: rpc_pb2.Message):
router_publish_calls.append((msg_forwarder, pubsub_msg))
await trio.lowlevel.checkpoint()
original_router_publish = pubsub.router.publish
@ -909,15 +939,15 @@ async def test_blacklist_allows_non_blacklisted_peers():
await pubsub.push_msg(allowed_peer, msg_from_blacklisted)
# Verify only allowed message was processed
assert (
len(router_publish_calls) == 1
), "Only one message should be processed"
assert pubsub._is_msg_seen(
msg_from_allowed
), "Allowed message should be marked as seen"
assert not pubsub._is_msg_seen(
msg_from_blacklisted
), "Blacklisted message should not be marked as seen"
assert len(router_publish_calls) == 1, (
"Only one message should be processed"
)
assert pubsub._is_msg_seen(msg_from_allowed), (
"Allowed message should be marked as seen"
)
assert not pubsub._is_msg_seen(msg_from_blacklisted), (
"Blacklisted message should not be marked as seen"
)
# Verify subscription received the allowed message
received_msg = await sub.get()
@ -960,7 +990,7 @@ async def test_blacklist_integration_with_existing_functionality():
# due to seen cache (not blacklist)
router_publish_called = False
async def mock_router_publish(*args, **kwargs):
async def mock_router_publish(msg_forwarder: ID, pubsub_msg: rpc_pb2.Message):
nonlocal router_publish_called
router_publish_called = True
await trio.lowlevel.checkpoint()
@ -970,9 +1000,9 @@ async def test_blacklist_integration_with_existing_functionality():
try:
await pubsub.push_msg(other_peer, msg)
assert (
not router_publish_called
), "Duplicate message should be rejected by seen cache"
assert not router_publish_called, (
"Duplicate message should be rejected by seen cache"
)
finally:
pubsub.router.publish = original_router_publish
@ -1001,7 +1031,7 @@ async def test_blacklist_blocks_messages_from_blacklisted_source():
# Track if router.publish is called (it shouldn't be for blacklisted forwarder)
router_publish_called = False
async def mock_router_publish(*args, **kwargs):
async def mock_router_publish(msg_forwarder: ID, pubsub_msg: rpc_pb2.Message):
nonlocal router_publish_called
router_publish_called = True
await trio.lowlevel.checkpoint()
@ -1014,12 +1044,12 @@ async def test_blacklist_blocks_messages_from_blacklisted_source():
await pubsub.push_msg(blacklisted_forwarder, msg)
# Verify message was rejected
assert (
not router_publish_called
), "Router.publish should not be called for blacklisted forwarder"
assert not pubsub._is_msg_seen(
msg
), "Message from blacklisted forwarder should not be marked as seen"
assert not router_publish_called, (
"Router.publish should not be called for blacklisted forwarder"
)
assert not pubsub._is_msg_seen(msg), (
"Message from blacklisted forwarder should not be marked as seen"
)
finally:
pubsub.router.publish = original_router_publish

View File

@ -1,6 +1,7 @@
import pytest
import trio
from libp2p.abc import ISecureConn
from libp2p.crypto.secp256k1 import (
create_new_key_pair,
)
@ -32,7 +33,8 @@ async def test_create_secure_session(nursery):
async with raw_conn_factory(nursery) as conns:
local_conn, remote_conn = conns
local_secure_conn, remote_secure_conn = None, None
local_secure_conn: ISecureConn | None = None
remote_secure_conn: ISecureConn | None = None
async def local_create_secure_session():
nonlocal local_secure_conn
@ -54,6 +56,9 @@ async def test_create_secure_session(nursery):
nursery_1.start_soon(local_create_secure_session)
nursery_1.start_soon(remote_create_secure_session)
if local_secure_conn is None or remote_secure_conn is None:
raise Exception("Failed to secure connection")
msg = b"abc"
await local_secure_conn.write(msg)
received_msg = await remote_secure_conn.read(MAX_READ_LEN)

View File

@ -0,0 +1,189 @@
import pytest
import trio
from libp2p.abc import ISecureConn
from libp2p.crypto.keys import PrivateKey, PublicKey
from libp2p.peer.id import ID
from libp2p.stream_muxer.exceptions import (
MuxedStreamClosed,
MuxedStreamError,
)
from libp2p.stream_muxer.mplex.datastructures import (
StreamID,
)
from libp2p.stream_muxer.mplex.mplex import Mplex
from libp2p.stream_muxer.mplex.mplex_stream import (
MplexStream,
)
from libp2p.stream_muxer.yamux.yamux import (
Yamux,
YamuxStream,
)
DUMMY_PEER_ID = ID(b"dummy_peer_id")
class DummySecuredConn(ISecureConn):
def __init__(self, is_initiator: bool = False):
self.is_initiator = is_initiator
async def write(self, data: bytes) -> None:
pass
async def read(self, n: int | None = -1) -> bytes:
return b""
async def close(self) -> None:
pass
def get_remote_address(self):
return None
def get_local_address(self):
return None
def get_local_peer(self) -> ID:
return ID(b"local")
def get_local_private_key(self) -> PrivateKey:
return PrivateKey() # Dummy key
def get_remote_peer(self) -> ID:
return ID(b"remote")
def get_remote_public_key(self) -> PublicKey:
return PublicKey() # Dummy key
class MockMuxedConn:
def __init__(self):
self.streams = {}
self.streams_lock = trio.Lock()
self.event_shutting_down = trio.Event()
self.event_closed = trio.Event()
self.event_started = trio.Event()
self.secured_conn = DummySecuredConn() # For YamuxStream
async def send_message(self, flag, data, stream_id):
pass
def get_remote_address(self):
return None
class MockMplexMuxedConn:
def __init__(self):
self.streams_lock = trio.Lock()
self.event_shutting_down = trio.Event()
self.event_closed = trio.Event()
self.event_started = trio.Event()
async def send_message(self, flag, data, stream_id):
pass
def get_remote_address(self):
return None
class MockYamuxMuxedConn:
def __init__(self):
self.secured_conn = DummySecuredConn()
self.event_shutting_down = trio.Event()
self.event_closed = trio.Event()
self.event_started = trio.Event()
async def send_message(self, flag, data, stream_id):
pass
def get_remote_address(self):
return None
@pytest.mark.trio
async def test_mplex_stream_async_context_manager():
muxed_conn = Mplex(DummySecuredConn(), DUMMY_PEER_ID)
stream_id = StreamID(1, True) # Use real StreamID
stream = MplexStream(
name="test_stream",
stream_id=stream_id,
muxed_conn=muxed_conn,
incoming_data_channel=trio.open_memory_channel(8)[1],
)
async with stream as s:
assert s is stream
assert not stream.event_local_closed.is_set()
assert not stream.event_remote_closed.is_set()
assert not stream.event_reset.is_set()
assert stream.event_local_closed.is_set()
@pytest.mark.trio
async def test_yamux_stream_async_context_manager():
muxed_conn = Yamux(DummySecuredConn(), DUMMY_PEER_ID)
stream = YamuxStream(stream_id=1, conn=muxed_conn, is_initiator=True)
async with stream as s:
assert s is stream
assert not stream.closed
assert not stream.send_closed
assert not stream.recv_closed
assert stream.send_closed
@pytest.mark.trio
async def test_mplex_stream_async_context_manager_with_error():
muxed_conn = Mplex(DummySecuredConn(), DUMMY_PEER_ID)
stream_id = StreamID(1, True)
stream = MplexStream(
name="test_stream",
stream_id=stream_id,
muxed_conn=muxed_conn,
incoming_data_channel=trio.open_memory_channel(8)[1],
)
with pytest.raises(ValueError):
async with stream as s:
assert s is stream
assert not stream.event_local_closed.is_set()
assert not stream.event_remote_closed.is_set()
assert not stream.event_reset.is_set()
raise ValueError("Test error")
assert stream.event_local_closed.is_set()
@pytest.mark.trio
async def test_yamux_stream_async_context_manager_with_error():
muxed_conn = Yamux(DummySecuredConn(), DUMMY_PEER_ID)
stream = YamuxStream(stream_id=1, conn=muxed_conn, is_initiator=True)
with pytest.raises(ValueError):
async with stream as s:
assert s is stream
assert not stream.closed
assert not stream.send_closed
assert not stream.recv_closed
raise ValueError("Test error")
assert stream.send_closed
@pytest.mark.trio
async def test_mplex_stream_async_context_manager_write_after_close():
muxed_conn = Mplex(DummySecuredConn(), DUMMY_PEER_ID)
stream_id = StreamID(1, True)
stream = MplexStream(
name="test_stream",
stream_id=stream_id,
muxed_conn=muxed_conn,
incoming_data_channel=trio.open_memory_channel(8)[1],
)
async with stream as s:
assert s is stream
with pytest.raises(MuxedStreamClosed):
await stream.write(b"test data")
@pytest.mark.trio
async def test_yamux_stream_async_context_manager_write_after_close():
muxed_conn = Yamux(DummySecuredConn(), DUMMY_PEER_ID)
stream = YamuxStream(stream_id=1, conn=muxed_conn, is_initiator=True)
async with stream as s:
assert s is stream
with pytest.raises(MuxedStreamError):
await stream.write(b"test data")

View File

@ -1,6 +1,7 @@
import logging
import pytest
from multiaddr.multiaddr import Multiaddr
import trio
from libp2p import (
@ -11,6 +12,8 @@ from libp2p import (
new_host,
set_default_muxer,
)
from libp2p.custom_types import TProtocol
from libp2p.peer.peerinfo import PeerInfo
# Enable logging for debugging
logging.basicConfig(level=logging.DEBUG)
@ -24,13 +27,14 @@ async def host_pair(muxer_preference=None, muxer_opt=None):
host_b = new_host(muxer_preference=muxer_preference, muxer_opt=muxer_opt)
# Start both hosts
await host_a.get_network().listen("/ip4/127.0.0.1/tcp/0")
await host_b.get_network().listen("/ip4/127.0.0.1/tcp/0")
await host_a.get_network().listen(Multiaddr("/ip4/127.0.0.1/tcp/0"))
await host_b.get_network().listen(Multiaddr("/ip4/127.0.0.1/tcp/0"))
# Connect hosts with a timeout
listen_addrs_a = host_a.get_addrs()
with trio.move_on_after(5): # 5 second timeout
await host_b.connect(host_a.get_id(), listen_addrs_a)
peer_info_a = PeerInfo(host_a.get_id(), listen_addrs_a)
await host_b.connect(peer_info_a)
yield host_a, host_b
@ -57,14 +61,14 @@ async def test_multiplexer_preference_parameter(muxer_preference):
try:
# Start both hosts
await host_a.get_network().listen("/ip4/127.0.0.1/tcp/0")
await host_b.get_network().listen("/ip4/127.0.0.1/tcp/0")
await host_a.get_network().listen(Multiaddr("/ip4/127.0.0.1/tcp/0"))
await host_b.get_network().listen(Multiaddr("/ip4/127.0.0.1/tcp/0"))
# Connect hosts with timeout
listen_addrs_a = host_a.get_addrs()
with trio.move_on_after(5): # 5 second timeout
await host_b.connect(host_a.get_id(), listen_addrs_a)
peer_info_a = PeerInfo(host_a.get_id(), listen_addrs_a)
await host_b.connect(peer_info_a)
# Check if connection was established
connections = host_b.get_network().connections
assert len(connections) > 0, "Connection not established"
@ -74,7 +78,7 @@ async def test_multiplexer_preference_parameter(muxer_preference):
muxed_conn = conn.muxed_conn
# Define a simple echo protocol
ECHO_PROTOCOL = "/echo/1.0.0"
ECHO_PROTOCOL = TProtocol("/echo/1.0.0")
# Setup echo handler on host_a
async def echo_handler(stream):
@ -89,7 +93,7 @@ async def test_multiplexer_preference_parameter(muxer_preference):
# Open a stream with timeout
with trio.move_on_after(5):
stream = await muxed_conn.open_stream(ECHO_PROTOCOL)
stream = await muxed_conn.open_stream()
# Check stream type
if muxer_preference == MUXER_YAMUX:
@ -132,13 +136,14 @@ async def test_explicit_muxer_options(muxer_option_func, expected_stream_class):
try:
# Start both hosts
await host_a.get_network().listen("/ip4/127.0.0.1/tcp/0")
await host_b.get_network().listen("/ip4/127.0.0.1/tcp/0")
await host_a.get_network().listen(Multiaddr("/ip4/127.0.0.1/tcp/0"))
await host_b.get_network().listen(Multiaddr("/ip4/127.0.0.1/tcp/0"))
# Connect hosts with timeout
listen_addrs_a = host_a.get_addrs()
with trio.move_on_after(5): # 5 second timeout
await host_b.connect(host_a.get_id(), listen_addrs_a)
peer_info_a = PeerInfo(host_a.get_id(), listen_addrs_a)
await host_b.connect(peer_info_a)
# Check if connection was established
connections = host_b.get_network().connections
@ -149,7 +154,7 @@ async def test_explicit_muxer_options(muxer_option_func, expected_stream_class):
muxed_conn = conn.muxed_conn
# Define a simple echo protocol
ECHO_PROTOCOL = "/echo/1.0.0"
ECHO_PROTOCOL = TProtocol("/echo/1.0.0")
# Setup echo handler on host_a
async def echo_handler(stream):
@ -164,7 +169,7 @@ async def test_explicit_muxer_options(muxer_option_func, expected_stream_class):
# Open a stream with timeout
with trio.move_on_after(5):
stream = await muxed_conn.open_stream(ECHO_PROTOCOL)
stream = await muxed_conn.open_stream()
# Check stream type
assert expected_stream_class in stream.__class__.__name__
@ -200,13 +205,14 @@ async def test_global_default_muxer(global_default):
try:
# Start both hosts
await host_a.get_network().listen("/ip4/127.0.0.1/tcp/0")
await host_b.get_network().listen("/ip4/127.0.0.1/tcp/0")
await host_a.get_network().listen(Multiaddr("/ip4/127.0.0.1/tcp/0"))
await host_b.get_network().listen(Multiaddr("/ip4/127.0.0.1/tcp/0"))
# Connect hosts with timeout
listen_addrs_a = host_a.get_addrs()
with trio.move_on_after(5): # 5 second timeout
await host_b.connect(host_a.get_id(), listen_addrs_a)
peer_info_a = PeerInfo(host_a.get_id(), listen_addrs_a)
await host_b.connect(peer_info_a)
# Check if connection was established
connections = host_b.get_network().connections
@ -217,7 +223,7 @@ async def test_global_default_muxer(global_default):
muxed_conn = conn.muxed_conn
# Define a simple echo protocol
ECHO_PROTOCOL = "/echo/1.0.0"
ECHO_PROTOCOL = TProtocol("/echo/1.0.0")
# Setup echo handler on host_a
async def echo_handler(stream):
@ -232,7 +238,7 @@ async def test_global_default_muxer(global_default):
# Open a stream with timeout
with trio.move_on_after(5):
stream = await muxed_conn.open_stream(ECHO_PROTOCOL)
stream = await muxed_conn.open_stream()
# Check stream type based on global default
if global_default == MUXER_YAMUX:

View File

@ -7,6 +7,9 @@ from trio.testing import (
memory_stream_pair,
)
from libp2p.abc import (
IRawConnection,
)
from libp2p.crypto.ed25519 import (
create_new_key_pair,
)
@ -29,18 +32,19 @@ from libp2p.stream_muxer.yamux.yamux import (
)
class TrioStreamAdapter:
def __init__(self, send_stream, receive_stream):
class TrioStreamAdapter(IRawConnection):
def __init__(self, send_stream, receive_stream, is_initiator: bool = False):
self.send_stream = send_stream
self.receive_stream = receive_stream
self.is_initiator = is_initiator
async def write(self, data):
async def write(self, data: bytes) -> None:
logging.debug(f"Writing {len(data)} bytes")
with trio.move_on_after(2):
await self.send_stream.send_all(data)
async def read(self, n=-1):
if n == -1:
async def read(self, n: int | None = None) -> bytes:
if n is None or n == -1:
raise ValueError("Reading unbounded not supported")
logging.debug(f"Attempting to read {n} bytes")
with trio.move_on_after(2):
@ -48,9 +52,13 @@ class TrioStreamAdapter:
logging.debug(f"Read {len(data)} bytes")
return data
async def close(self):
async def close(self) -> None:
logging.debug("Closing stream")
def get_remote_address(self) -> tuple[str, int] | None:
# Return None since this is a test adapter without real network info
return None
@pytest.fixture
def key_pair():
@ -68,8 +76,8 @@ async def secure_conn_pair(key_pair, peer_id):
client_send, server_receive = memory_stream_pair()
server_send, client_receive = memory_stream_pair()
client_rw = TrioStreamAdapter(client_send, client_receive)
server_rw = TrioStreamAdapter(server_send, server_receive)
client_rw = TrioStreamAdapter(client_send, client_receive, is_initiator=True)
server_rw = TrioStreamAdapter(server_send, server_receive, is_initiator=False)
insecure_transport = InsecureTransport(key_pair)
@ -196,9 +204,9 @@ async def test_yamux_stream_close(yamux_pair):
await trio.sleep(0.1)
# Now both directions are closed, so stream should be fully closed
assert (
client_stream.closed
), "Client stream should be fully closed after bidirectional close"
assert client_stream.closed, (
"Client stream should be fully closed after bidirectional close"
)
# Writing should still fail
with pytest.raises(MuxedStreamError):
@ -215,8 +223,12 @@ async def test_yamux_stream_reset(yamux_pair):
server_stream = await server_yamux.accept_stream()
await client_stream.reset()
# After reset, reading should raise MuxedStreamReset or MuxedStreamEOF
with pytest.raises((MuxedStreamEOF, MuxedStreamError)):
try:
await server_stream.read()
except (MuxedStreamEOF, MuxedStreamError):
pass
else:
pytest.fail("Expected MuxedStreamEOF or MuxedStreamError")
# Verify subsequent operations fail with StreamReset or EOF
with pytest.raises(MuxedStreamError):
await server_stream.read()
@ -269,9 +281,9 @@ async def test_yamux_flow_control(yamux_pair):
await client_stream.write(large_data)
# Check that window was reduced
assert (
client_stream.send_window < initial_window
), "Window should be reduced after sending"
assert client_stream.send_window < initial_window, (
"Window should be reduced after sending"
)
# Read the data on the server side
received = b""
@ -307,9 +319,9 @@ async def test_yamux_flow_control(yamux_pair):
f" {client_stream.send_window},"
f"initial half: {initial_window // 2}"
)
assert (
client_stream.send_window > initial_window // 2
), "Window should be increased after update"
assert client_stream.send_window > initial_window // 2, (
"Window should be increased after update"
)
await client_stream.close()
await server_stream.close()
@ -349,17 +361,17 @@ async def test_yamux_half_close(yamux_pair):
test_data = b"server response after client close"
# The server shouldn't be marked as send_closed yet
assert (
not server_stream.send_closed
), "Server stream shouldn't be marked as send_closed"
assert not server_stream.send_closed, (
"Server stream shouldn't be marked as send_closed"
)
await server_stream.write(test_data)
# Client can still read
received = await client_stream.read(len(test_data))
assert (
received == test_data
), "Client should still be able to read after sending FIN"
assert received == test_data, (
"Client should still be able to read after sending FIN"
)
# Now server closes its sending side
await server_stream.close()
@ -406,9 +418,9 @@ async def test_yamux_go_away_with_error(yamux_pair):
await trio.sleep(0.2)
# Verify server recognized shutdown
assert (
server_yamux.event_shutting_down.is_set()
), "Server should be shutting down after GO_AWAY"
assert server_yamux.event_shutting_down.is_set(), (
"Server should be shutting down after GO_AWAY"
)
logging.debug("test_yamux_go_away_with_error complete")

View File

@ -11,13 +11,8 @@ else:
import pytest
import trio
from trio.testing import (
Matcher,
RaisesGroup,
)
from libp2p.tools.async_service import (
DaemonTaskExit,
LifecycleError,
Service,
TrioManager,
@ -134,11 +129,7 @@ async def test_trio_service_lifecycle_run_and_exception():
manager = TrioManager(service)
async def do_service_run():
with RaisesGroup(
Matcher(RuntimeError, match="Service throwing error"),
allow_unwrapped=True,
flatten_subgroups=True,
):
with pytest.raises(ExceptionGroup):
await manager.run()
await do_service_lifecycle_check(
@ -165,11 +156,7 @@ async def test_trio_service_lifecycle_run_and_task_exception():
manager = TrioManager(service)
async def do_service_run():
with RaisesGroup(
Matcher(RuntimeError, match="Service throwing error"),
allow_unwrapped=True,
flatten_subgroups=True,
):
with pytest.raises(ExceptionGroup):
await manager.run()
await do_service_lifecycle_check(
@ -230,11 +217,7 @@ async def test_trio_service_lifecycle_run_and_daemon_task_exit():
manager = TrioManager(service)
async def do_service_run():
with RaisesGroup(
Matcher(DaemonTaskExit, match="Daemon task"),
allow_unwrapped=True,
flatten_subgroups=True,
):
with pytest.raises(ExceptionGroup):
await manager.run()
await do_service_lifecycle_check(
@ -395,11 +378,7 @@ async def test_trio_service_manager_run_task_reraises_exceptions():
with trio.fail_after(1):
await trio.sleep_forever()
with RaisesGroup(
Matcher(Exception, match="task exception in run_task"),
allow_unwrapped=True,
flatten_subgroups=True,
):
with pytest.raises(ExceptionGroup):
async with background_trio_service(RunTaskService()):
task_event.set()
with trio.fail_after(1):
@ -419,13 +398,7 @@ async def test_trio_service_manager_run_daemon_task_cancels_if_exits():
with trio.fail_after(1):
await trio.sleep_forever()
with RaisesGroup(
Matcher(
DaemonTaskExit, match=r"Daemon task daemon_task_fn\[daemon=True\] exited"
),
allow_unwrapped=True,
flatten_subgroups=True,
):
with pytest.raises(ExceptionGroup):
async with background_trio_service(RunTaskService()):
task_event.set()
with trio.fail_after(1):
@ -443,11 +416,7 @@ async def test_trio_service_manager_propogates_and_records_exceptions():
assert manager.did_error is False
with RaisesGroup(
Matcher(RuntimeError, match="this is the error"),
allow_unwrapped=True,
flatten_subgroups=True,
):
with pytest.raises(ExceptionGroup):
await manager.run()
assert manager.did_error is True
@ -641,7 +610,7 @@ async def test_trio_service_with_try_finally_cleanup_with_shielded_await():
ready_cancel.set()
await self.manager.wait_finished()
finally:
with trio.CancelScope(shield=True):
with trio.CancelScope(shield=True): # type: ignore[call-arg]
await trio.lowlevel.checkpoint()
self.cleanup_up = True
@ -660,7 +629,7 @@ async def test_error_in_service_run():
self.manager.run_daemon_task(self.manager.wait_finished)
raise ValueError("Exception inside run()")
with RaisesGroup(ValueError, allow_unwrapped=True, flatten_subgroups=True):
with pytest.raises(ExceptionGroup):
await TrioManager.run_service(ServiceTest())
@ -679,5 +648,5 @@ async def test_daemon_task_finishes_leaving_children():
async def run(self):
self.manager.run_daemon_task(self.buggy_daemon)
with RaisesGroup(DaemonTaskExit, allow_unwrapped=True, flatten_subgroups=True):
with pytest.raises(ExceptionGroup):
await TrioManager.run_service(ServiceTest())

View File

@ -1,9 +1,15 @@
# Copied from https://github.com/ethereum/async-service
import sys
import pytest
import trio
from trio.testing import (
RaisesGroup,
)
if sys.version_info >= (3, 11):
from builtins import (
ExceptionGroup,
)
else:
from exceptiongroup import ExceptionGroup
from libp2p.tools.async_service import (
LifecycleError,
@ -50,7 +56,7 @@ async def test_trio_service_external_api_raises_when_cancelled():
service = ExternalAPIService()
async with background_trio_service(service) as manager:
with RaisesGroup(LifecycleError, allow_unwrapped=True, flatten_subgroups=True):
with pytest.raises(ExceptionGroup):
async with trio.open_nursery() as nursery:
# an event to ensure that we are indeed within the body of the
is_within_fn = trio.Event()

View File

@ -3,8 +3,8 @@ import trio
from libp2p.tools.async_service import (
Service,
background_trio_service,
)
from libp2p.tools.async_service.trio_service import TrioManager
@pytest.mark.trio
@ -33,24 +33,31 @@ async def test_trio_manager_stats():
self.manager.run_task(trio.lowlevel.checkpoint)
service = StatsTest()
async with background_trio_service(service) as manager:
service.run_external_root()
assert len(manager._root_tasks) == 2
with trio.fail_after(1):
await ready.wait()
async with trio.open_nursery() as nursery:
manager = TrioManager(service)
nursery.start_soon(manager.run)
await manager.wait_started()
# we need to yield to the event loop a few times to allow the various
# tasks to schedule themselves and get running.
for _ in range(50):
await trio.lowlevel.checkpoint()
try:
service.run_external_root()
assert len(manager._root_tasks) == 2
with trio.fail_after(1):
await ready.wait()
assert manager.stats.tasks.total_count == 10
assert manager.stats.tasks.finished_count == 3
assert manager.stats.tasks.pending_count == 7
# we need to yield to the event loop a few times to allow the various
# tasks to schedule themselves and get running.
for _ in range(50):
await trio.lowlevel.checkpoint()
# This is a simple test to ensure that finished tasks are removed from
# tracking to prevent unbounded memory growth.
assert len(manager._root_tasks) == 1
assert manager.stats.tasks.total_count == 10
assert manager.stats.tasks.finished_count == 3
assert manager.stats.tasks.pending_count == 7
# This is a simple test to ensure that finished tasks are removed from
# tracking to prevent unbounded memory growth.
assert len(manager._root_tasks) == 1
finally:
await manager.stop()
# now check after exiting
assert manager.stats.tasks.total_count == 10
@ -67,18 +74,26 @@ async def test_trio_manager_stats_does_not_count_main_run_method():
self.manager.run_task(trio.sleep_forever)
ready.set()
async with background_trio_service(StatsTest()) as manager:
with trio.fail_after(1):
await ready.wait()
service = StatsTest()
async with trio.open_nursery() as nursery:
manager = TrioManager(service)
nursery.start_soon(manager.run)
await manager.wait_started()
# we need to yield to the event loop a few times to allow the various
# tasks to schedule themselves and get running.
for _ in range(10):
await trio.lowlevel.checkpoint()
try:
with trio.fail_after(1):
await ready.wait()
assert manager.stats.tasks.total_count == 1
assert manager.stats.tasks.finished_count == 0
assert manager.stats.tasks.pending_count == 1
# we need to yield to the event loop a few times to allow the various
# tasks to schedule themselves and get running.
for _ in range(10):
await trio.lowlevel.checkpoint()
assert manager.stats.tasks.total_count == 1
assert manager.stats.tasks.finished_count == 0
assert manager.stats.tasks.pending_count == 1
finally:
await manager.stop()
# now check after exiting
assert manager.stats.tasks.total_count == 1

View File

@ -36,7 +36,7 @@ async def test_tcp_listener(nursery):
@pytest.mark.trio
async def test_tcp_dial(nursery):
transport = TCP()
raw_conn_other_side = None
raw_conn_other_side: RawConnection | None = None
event = trio.Event()
async def handler(tcp_stream):
@ -59,5 +59,6 @@ async def test_tcp_dial(nursery):
await event.wait()
data = b"123"
assert raw_conn_other_side is not None
await raw_conn_other_side.write(data)
assert (await raw_conn.read(len(data))) == data