Merge branch 'main' into main

This commit is contained in:
Manu Sheel Gupta
2025-06-30 07:43:32 -07:00
committed by GitHub
44 changed files with 1685 additions and 109 deletions

View File

@ -15,6 +15,7 @@ from tests.utils.factories import (
PubsubFactory,
)
from tests.utils.pubsub.utils import (
connect_some,
dense_connect,
one_to_all_connect,
sparse_connect,
@ -134,7 +135,7 @@ async def test_handle_graft(monkeypatch):
# check if it is called in `handle_graft`
event_emit_prune = trio.Event()
async def emit_prune(topic, sender_peer_id):
async def emit_prune(topic, sender_peer_id, do_px, is_unsubscribe):
event_emit_prune.set()
await trio.lowlevel.checkpoint()
@ -193,7 +194,7 @@ async def test_handle_prune():
# alice emit prune message to bob, alice should be removed
# from bob's mesh peer
await gossipsubs[index_alice].emit_prune(topic, id_bob)
await gossipsubs[index_alice].emit_prune(topic, id_bob, False, False)
# `emit_prune` does not remove bob from alice's mesh peers
assert id_bob in gossipsubs[index_alice].mesh[topic]
@ -292,7 +293,9 @@ async def test_fanout():
@pytest.mark.trio
@pytest.mark.slow
async def test_fanout_maintenance():
async with PubsubFactory.create_batch_with_gossipsub(10) as pubsubs_gsub:
async with PubsubFactory.create_batch_with_gossipsub(
10, unsubscribe_back_off=1
) as pubsubs_gsub:
hosts = [pubsub.host for pubsub in pubsubs_gsub]
num_msgs = 5
@ -588,3 +591,166 @@ async def test_sparse_connect():
f"received the message. Ideally all nodes should receive it, but at "
f"minimum {min_required} required for sparse network scalability."
)
@pytest.mark.trio
async def test_connect_some_with_fewer_hosts_than_degree():
"""Test connect_some when there are fewer hosts than degree."""
# Create 3 hosts with degree=5
async with PubsubFactory.create_batch_with_floodsub(3) as pubsubs_fsub:
hosts = [pubsub.host for pubsub in pubsubs_fsub]
degree = 5
await connect_some(hosts, degree)
await trio.sleep(0.1) # Allow connections to establish
# Each host should connect to all other hosts (since there are only 2 others)
for i, pubsub in enumerate(pubsubs_fsub):
connected_peers = len(pubsub.peers)
expected_max_connections = len(hosts) - 1 # All others
assert connected_peers <= expected_max_connections, (
f"Host {i} has {connected_peers} connections, "
f"but can only connect to {expected_max_connections} others"
)
@pytest.mark.trio
async def test_connect_some_degree_limit_enforced():
"""Test that connect_some enforces degree limits and creates expected topology."""
# Test with small network where we can verify exact behavior
async with PubsubFactory.create_batch_with_floodsub(6) as pubsubs_fsub:
hosts = [pubsub.host for pubsub in pubsubs_fsub]
degree = 2
await connect_some(hosts, degree)
await trio.sleep(0.1)
# With 6 hosts and degree=2, expected connections:
# Host 0 → connects to hosts 1,2 (2 peers total)
# Host 1 → connects to hosts 2,3 (3 peers: 0,2,3)
# Host 2 → connects to hosts 3,4 (4 peers: 0,1,3,4)
# Host 3 → connects to hosts 4,5 (3 peers: 1,2,4,5) - wait, that's 4!
# Host 4 → connects to host 5 (3 peers: 2,3,5)
# Host 5 → (2 peers: 3,4)
peer_counts = [len(pubsub.peers) for pubsub in pubsubs_fsub]
# First and last hosts should have exactly degree connections
assert peer_counts[0] == degree, (
f"Host 0 should have {degree} peers, got {peer_counts[0]}"
)
assert peer_counts[-1] <= degree, (
f"Last host should have ≤ {degree} peers, got {peer_counts[-1]}"
)
# Middle hosts may have more due to bidirectional connections
# but the pattern should be consistent with degree limit
total_connections = sum(peer_counts)
# Should be less than full mesh (each host connected to all others)
full_mesh_connections = len(hosts) * (len(hosts) - 1)
assert total_connections < full_mesh_connections, (
f"Got {total_connections} total connections, "
f"but full mesh would be {full_mesh_connections}"
)
# Should be more than just a chain (each host connected to next only)
chain_connections = 2 * (len(hosts) - 1) # bidirectional chain
assert total_connections > chain_connections, (
f"Got {total_connections} total connections, which is too few "
f"(chain would be {chain_connections})"
)
@pytest.mark.trio
async def test_connect_some_degree_zero():
"""Test edge case: degree=0 should result in no connections."""
# Create 5 hosts with degree=0
async with PubsubFactory.create_batch_with_floodsub(5) as pubsubs_fsub:
hosts = [pubsub.host for pubsub in pubsubs_fsub]
degree = 0
await connect_some(hosts, degree)
await trio.sleep(0.1) # Allow any potential connections to establish
# Verify no connections were made
for i, pubsub in enumerate(pubsubs_fsub):
connected_peers = len(pubsub.peers)
assert connected_peers == 0, (
f"Host {i} has {connected_peers} connections, "
f"but degree=0 should result in no connections"
)
@pytest.mark.trio
async def test_connect_some_negative_degree():
"""Test edge case: negative degree should be handled gracefully."""
# Create 5 hosts with degree=-1
async with PubsubFactory.create_batch_with_floodsub(5) as pubsubs_fsub:
hosts = [pubsub.host for pubsub in pubsubs_fsub]
degree = -1
await connect_some(hosts, degree)
await trio.sleep(0.1) # Allow any potential connections to establish
# Verify no connections were made (negative degree should behave like 0)
for i, pubsub in enumerate(pubsubs_fsub):
connected_peers = len(pubsub.peers)
assert connected_peers == 0, (
f"Host {i} has {connected_peers} connections, "
f"but negative degree should result in no connections"
)
@pytest.mark.trio
async def test_sparse_connect_degree_zero():
"""Test sparse_connect with degree=0."""
async with PubsubFactory.create_batch_with_floodsub(8) as pubsubs_fsub:
hosts = [pubsub.host for pubsub in pubsubs_fsub]
degree = 0
await sparse_connect(hosts, degree)
await trio.sleep(0.1) # Allow connections to establish
# With degree=0, sparse_connect should still create neighbor connections
# for connectivity (this is part of the algorithm design)
for i, pubsub in enumerate(pubsubs_fsub):
connected_peers = len(pubsub.peers)
# Should have some connections due to neighbor connectivity
# (each node connects to immediate neighbors)
expected_neighbors = 2 # previous and next in ring
assert connected_peers >= expected_neighbors, (
f"Host {i} has {connected_peers} connections, "
f"expected at least {expected_neighbors} neighbor connections"
)
@pytest.mark.trio
async def test_empty_host_list():
"""Test edge case: empty host list should be handled gracefully."""
hosts = []
# All functions should handle empty lists gracefully
await connect_some(hosts, 5)
await sparse_connect(hosts, 3)
await dense_connect(hosts)
# If we reach here without exceptions, the test passes
@pytest.mark.trio
async def test_single_host():
"""Test edge case: single host should be handled gracefully."""
async with PubsubFactory.create_batch_with_floodsub(1) as pubsubs_fsub:
hosts = [pubsub.host for pubsub in pubsubs_fsub]
# All functions should handle single host gracefully
await connect_some(hosts, 5)
await sparse_connect(hosts, 3)
await dense_connect(hosts)
# Single host should have no connections
connected_peers = len(pubsubs_fsub[0].peers)
assert connected_peers == 0, (
f"Single host has {connected_peers} connections, expected 0"
)

View File

@ -0,0 +1,274 @@
import pytest
import trio
from libp2p.pubsub.gossipsub import (
GossipSub,
)
from libp2p.tools.utils import (
connect,
)
from tests.utils.factories import (
PubsubFactory,
)
@pytest.mark.trio
async def test_prune_backoff():
async with PubsubFactory.create_batch_with_gossipsub(
2, heartbeat_interval=0.5, prune_back_off=2
) as pubsubs:
gsub0 = pubsubs[0].router
gsub1 = pubsubs[1].router
assert isinstance(gsub0, GossipSub)
assert isinstance(gsub1, GossipSub)
host_0 = pubsubs[0].host
host_1 = pubsubs[1].host
topic = "test_prune_backoff"
# connect hosts
await connect(host_0, host_1)
await trio.sleep(0.5)
# both join the topic
await gsub0.join(topic)
await gsub1.join(topic)
await gsub0.emit_graft(topic, host_1.get_id())
await trio.sleep(0.5)
# ensure peer is registered in mesh
assert host_0.get_id() in gsub1.mesh[topic]
# prune host_1 from gsub0's mesh
await gsub0.emit_prune(topic, host_1.get_id(), False, False)
await trio.sleep(0.5)
# host_0 should not be in gsub1's mesh
assert host_0.get_id() not in gsub1.mesh[topic]
# try to graft again immediately (should be rejected due to backoff)
await gsub0.emit_graft(topic, host_1.get_id())
await trio.sleep(0.5)
assert host_0.get_id() not in gsub1.mesh[topic], (
"peer should be backoffed and not re-added"
)
# try to graft again (should succeed after backoff)
await trio.sleep(2)
await gsub0.emit_graft(topic, host_1.get_id())
await trio.sleep(1)
assert host_0.get_id() in gsub1.mesh[topic], (
"peer should be able to rejoin after backoff"
)
@pytest.mark.trio
async def test_unsubscribe_backoff():
async with PubsubFactory.create_batch_with_gossipsub(
2, heartbeat_interval=1, prune_back_off=1, unsubscribe_back_off=2
) as pubsubs:
gsub0 = pubsubs[0].router
gsub1 = pubsubs[1].router
assert isinstance(gsub0, GossipSub)
assert isinstance(gsub1, GossipSub)
host_0 = pubsubs[0].host
host_1 = pubsubs[1].host
topic = "test_unsubscribe_backoff"
# connect hosts
await connect(host_0, host_1)
await trio.sleep(0.5)
# both join the topic
await gsub0.join(topic)
await gsub1.join(topic)
await gsub0.emit_graft(topic, host_1.get_id())
await trio.sleep(0.5)
# ensure peer is registered in mesh
assert host_0.get_id() in gsub1.mesh[topic]
# host_1 unsubscribes from the topic
await gsub1.leave(topic)
await trio.sleep(0.5)
assert topic not in gsub1.mesh
# host_1 resubscribes to the topic
await gsub1.join(topic)
await trio.sleep(0.5)
assert topic in gsub1.mesh
# try to graft again immediately (should be rejected due to backoff)
await gsub0.emit_graft(topic, host_1.get_id())
await trio.sleep(0.5)
assert host_0.get_id() not in gsub1.mesh[topic], (
"peer should be backoffed and not re-added"
)
# try to graft again (should succeed after backoff)
await trio.sleep(1)
await gsub0.emit_graft(topic, host_1.get_id())
await trio.sleep(1)
assert host_0.get_id() in gsub1.mesh[topic], (
"peer should be able to rejoin after backoff"
)
@pytest.mark.trio
async def test_peer_exchange():
async with PubsubFactory.create_batch_with_gossipsub(
3,
heartbeat_interval=0.5,
do_px=True,
px_peers_count=1,
) as pubsubs:
gsub0 = pubsubs[0].router
gsub1 = pubsubs[1].router
gsub2 = pubsubs[2].router
assert isinstance(gsub0, GossipSub)
assert isinstance(gsub1, GossipSub)
assert isinstance(gsub2, GossipSub)
host_0 = pubsubs[0].host
host_1 = pubsubs[1].host
host_2 = pubsubs[2].host
topic = "test_peer_exchange"
# connect hosts
await connect(host_1, host_0)
await connect(host_1, host_2)
await trio.sleep(0.5)
# all join the topic and 0 <-> 1 and 1 <-> 2 graft
await pubsubs[1].subscribe(topic)
await pubsubs[0].subscribe(topic)
await pubsubs[2].subscribe(topic)
await gsub1.emit_graft(topic, host_0.get_id())
await gsub1.emit_graft(topic, host_2.get_id())
await gsub0.emit_graft(topic, host_1.get_id())
await gsub2.emit_graft(topic, host_1.get_id())
await trio.sleep(1)
# ensure peer is registered in mesh
assert host_0.get_id() in gsub1.mesh[topic]
assert host_2.get_id() in gsub1.mesh[topic]
assert host_2.get_id() not in gsub0.mesh[topic]
# host_1 unsubscribes from the topic
await gsub1.leave(topic)
await trio.sleep(1) # Wait for heartbeat to update mesh
assert topic not in gsub1.mesh
# Wait for gsub0 to graft host_2 into its mesh via PX
await trio.sleep(1)
assert host_2.get_id() in gsub0.mesh[topic]
@pytest.mark.trio
async def test_topics_are_isolated():
async with PubsubFactory.create_batch_with_gossipsub(
2, heartbeat_interval=0.5, prune_back_off=2
) as pubsubs:
gsub0 = pubsubs[0].router
gsub1 = pubsubs[1].router
assert isinstance(gsub0, GossipSub)
assert isinstance(gsub1, GossipSub)
host_0 = pubsubs[0].host
host_1 = pubsubs[1].host
topic1 = "test_prune_backoff"
topic2 = "test_prune_backoff2"
# connect hosts
await connect(host_0, host_1)
await trio.sleep(0.5)
# both peers join both the topics
await gsub0.join(topic1)
await gsub1.join(topic1)
await gsub0.join(topic2)
await gsub1.join(topic2)
await gsub0.emit_graft(topic1, host_1.get_id())
await trio.sleep(0.5)
# ensure topic1 for peer is registered in mesh
assert host_0.get_id() in gsub1.mesh[topic1]
# prune topic1 for host_1 from gsub0's mesh
await gsub0.emit_prune(topic1, host_1.get_id(), False, False)
await trio.sleep(0.5)
# topic1 for host_0 should not be in gsub1's mesh
assert host_0.get_id() not in gsub1.mesh[topic1]
# try to regraft topic1 and graft new topic2
await gsub0.emit_graft(topic1, host_1.get_id())
await gsub0.emit_graft(topic2, host_1.get_id())
await trio.sleep(0.5)
assert host_0.get_id() not in gsub1.mesh[topic1], (
"peer should be backoffed and not re-added"
)
assert host_0.get_id() in gsub1.mesh[topic2], (
"peer should be able to join a different topic"
)
@pytest.mark.trio
async def test_stress_churn():
NUM_PEERS = 5
CHURN_CYCLES = 30
TOPIC = "stress_churn_topic"
PRUNE_BACKOFF = 1
HEARTBEAT_INTERVAL = 0.2
async with PubsubFactory.create_batch_with_gossipsub(
NUM_PEERS,
heartbeat_interval=HEARTBEAT_INTERVAL,
prune_back_off=PRUNE_BACKOFF,
) as pubsubs:
routers: list[GossipSub] = []
for ps in pubsubs:
assert isinstance(ps.router, GossipSub)
routers.append(ps.router)
hosts = [ps.host for ps in pubsubs]
# fully connect all peers
for i in range(NUM_PEERS):
for j in range(i + 1, NUM_PEERS):
await connect(hosts[i], hosts[j])
await trio.sleep(1)
# all peers join the topic
for router in routers:
await router.join(TOPIC)
await trio.sleep(1)
# rapid join/prune cycles
for cycle in range(CHURN_CYCLES):
for i, router in enumerate(routers):
# prune all other peers from this router's mesh
for j, peer_host in enumerate(hosts):
if i != j:
await router.emit_prune(TOPIC, peer_host.get_id(), False, False)
await trio.sleep(0.1)
for i, router in enumerate(routers):
# graft all other peers back
for j, peer_host in enumerate(hosts):
if i != j:
await router.emit_graft(TOPIC, peer_host.get_id())
await trio.sleep(0.1)
# wait for backoff entries to expire and cleanup
await trio.sleep(PRUNE_BACKOFF * 2)
# check that the backoff table is not unbounded
for router in routers:
# backoff is a dict: topic -> peer -> expiry
backoff = getattr(router, "back_off", None)
assert backoff is not None, "router missing backoff table"
# only a small number of entries should remain (ideally 0)
total_entries = sum(len(peers) for peers in backoff.values())
assert total_entries < NUM_PEERS * 2, (
f"backoff table grew too large: {total_entries} entries"
)

View File

View File

View File

@ -0,0 +1,91 @@
"""
Unit tests for mDNS broadcaster component.
"""
from zeroconf import Zeroconf
from libp2p.discovery.mdns.broadcaster import PeerBroadcaster
from libp2p.peer.id import ID
class TestPeerBroadcaster:
"""Unit tests for PeerBroadcaster."""
def test_broadcaster_initialization(self):
"""Test that broadcaster initializes correctly."""
zeroconf = Zeroconf()
service_type = "_p2p._udp.local."
service_name = "test-peer._p2p._udp.local."
peer_id = (
"QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN" # String, not ID object
)
port = 8000
broadcaster = PeerBroadcaster(
zeroconf=zeroconf,
service_type=service_type,
service_name=service_name,
peer_id=peer_id,
port=port,
)
assert broadcaster.zeroconf == zeroconf
assert broadcaster.service_type == service_type
assert broadcaster.service_name == service_name
assert broadcaster.peer_id == peer_id
assert broadcaster.port == port
# Clean up
zeroconf.close()
def test_broadcaster_service_creation(self):
"""Test that broadcaster creates valid service info."""
zeroconf = Zeroconf()
service_type = "_p2p._udp.local."
service_name = "test-peer2._p2p._udp.local."
peer_id_obj = ID.from_base58("QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN")
peer_id = str(peer_id_obj) # Convert to string
port = 8000
broadcaster = PeerBroadcaster(
zeroconf=zeroconf,
service_type=service_type,
service_name=service_name,
peer_id=peer_id,
port=port,
)
# Verify service was created and registered
service_info = broadcaster.service_info
assert service_info is not None
assert service_info.type == service_type
assert service_info.name == service_name
assert service_info.port == port
assert b"id" in service_info.properties
assert service_info.properties[b"id"] == peer_id.encode()
# Clean up
zeroconf.close()
def test_broadcaster_start_stop(self):
"""Test that broadcaster can start and stop correctly."""
zeroconf = Zeroconf()
service_type = "_p2p._udp.local."
service_name = "test-start-stop._p2p._udp.local."
peer_id_obj = ID.from_base58("QmYyQSo1c1Ym7orWxLYvCrM2EmxFTANf8wXmmE7DWjhx5N")
peer_id = str(peer_id_obj) # Convert to string
port = 8001
broadcaster = PeerBroadcaster(
zeroconf=zeroconf,
service_type=service_type,
service_name=service_name,
peer_id=peer_id,
port=port,
)
# Service should be registered
assert broadcaster.service_info is not None
# Clean up
zeroconf.close()

View File

@ -0,0 +1,114 @@
"""
Unit tests for mDNS listener component.
"""
import socket
from zeroconf import ServiceInfo, Zeroconf
from libp2p.abc import Multiaddr
from libp2p.discovery.mdns.listener import PeerListener
from libp2p.peer.id import ID
from libp2p.peer.peerstore import PeerStore
class TestPeerListener:
"""Unit tests for PeerListener."""
def test_listener_initialization(self):
"""Test that listener initializes correctly."""
peerstore = PeerStore()
zeroconf = Zeroconf()
service_type = "_p2p._udp.local."
service_name = "local-peer._p2p._udp.local."
listener = PeerListener(
peerstore=peerstore,
zeroconf=zeroconf,
service_type=service_type,
service_name=service_name,
)
assert listener.peerstore == peerstore
assert listener.zeroconf == zeroconf
assert listener.service_type == service_type
assert listener.service_name == service_name
assert listener.discovered_services == {}
# Clean up
listener.stop()
zeroconf.close()
def test_listener_extract_peer_info_success(self):
"""Test successful PeerInfo extraction from ServiceInfo."""
peerstore = PeerStore()
zeroconf = Zeroconf()
listener = PeerListener(
peerstore=peerstore,
zeroconf=zeroconf,
service_type="_p2p._udp.local.",
service_name="local._p2p._udp.local.",
)
# Create sample service info
sample_peer_id = ID.from_base58(
"QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN"
)
hostname = socket.gethostname()
local_ip = "192.168.1.100"
sample_service_info = ServiceInfo(
type_="_p2p._udp.local.",
name="test-peer._p2p._udp.local.",
port=8000,
properties={b"id": str(sample_peer_id).encode()},
server=f"{hostname}.local.",
addresses=[socket.inet_aton(local_ip)],
)
peer_info = listener._extract_peer_info(sample_service_info)
assert peer_info is not None
assert isinstance(peer_info.peer_id, ID)
assert len(peer_info.addrs) > 0
assert all(isinstance(addr, Multiaddr) for addr in peer_info.addrs)
# Check that protocol is TCP since we always use TCP
assert "/tcp/" in str(peer_info.addrs[0])
# Clean up
listener.stop()
zeroconf.close()
def test_listener_extract_peer_info_invalid_id(self):
"""Test PeerInfo extraction fails with invalid peer ID."""
peerstore = PeerStore()
zeroconf = Zeroconf()
listener = PeerListener(
peerstore=peerstore,
zeroconf=zeroconf,
service_type="_p2p._udp.local.",
service_name="local._p2p._udp.local.",
)
# Create service info with invalid peer ID
hostname = socket.gethostname()
local_ip = "192.168.1.100"
service_info = ServiceInfo(
type_="_p2p._udp.local.",
name="invalid-peer._p2p._udp.local.",
port=8000,
properties={b"id": b"invalid_peer_id_format"},
server=f"{hostname}.local.",
addresses=[socket.inet_aton(local_ip)],
)
peer_info = listener._extract_peer_info(service_info)
assert peer_info is None
# Clean up
listener.stop()
zeroconf.close()

View File

@ -0,0 +1,121 @@
"""
Comprehensive integration tests for mDNS discovery functionality.
"""
import socket
from zeroconf import Zeroconf
from libp2p.discovery.mdns.broadcaster import PeerBroadcaster
from libp2p.discovery.mdns.listener import PeerListener
from libp2p.peer.id import ID
from libp2p.peer.peerstore import PeerStore
class TestMDNSDiscovery:
"""Comprehensive integration tests for mDNS peer discovery."""
def test_one_host_finds_another(self):
"""Test that one host can find another host using mDNS."""
# Create two separate Zeroconf instances to simulate different hosts
host1_zeroconf = Zeroconf()
host2_zeroconf = Zeroconf()
try:
# Host 1: Set up as broadcaster (the host to be discovered)
host1_peer_id_obj = ID.from_base58(
"QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN"
)
host1_peer_id = str(host1_peer_id_obj) # Convert to string
host1_broadcaster = PeerBroadcaster(
zeroconf=host1_zeroconf,
service_type="_p2p._udp.local.",
service_name="host1._p2p._udp.local.",
peer_id=host1_peer_id,
port=8000,
)
# Host 2: Set up as listener (the host that discovers others)
host2_peerstore = PeerStore()
host2_listener = PeerListener(
peerstore=host2_peerstore,
zeroconf=host2_zeroconf,
service_type="_p2p._udp.local.",
service_name="host2._p2p._udp.local.",
)
# Host 1 registers its service for discovery
host1_broadcaster.register()
# Verify that host2 discovered host1
assert len(host2_listener.discovered_services) > 0
assert "host1._p2p._udp.local." in host2_listener.discovered_services
# Verify that host1's peer info was added to host2's peerstore
discovered_peer_id = host2_listener.discovered_services[
"host1._p2p._udp.local."
]
assert str(discovered_peer_id) == host1_peer_id
# Verify addresses were added to peerstore
try:
addrs = host2_peerstore.addrs(discovered_peer_id)
assert len(addrs) > 0
# Should be TCP since we always use TCP protocol
assert "/tcp/8000" in str(addrs[0])
except Exception:
# If no addresses found, the discovery didn't work properly
assert False, "Host1 addresses should be in Host2's peerstore"
# Clean up
host1_broadcaster.unregister()
host2_listener.stop()
finally:
host1_zeroconf.close()
host2_zeroconf.close()
def test_service_info_extraction(self):
"""Test service info extraction functionality."""
peerstore = PeerStore()
zeroconf = Zeroconf()
try:
listener = PeerListener(
peerstore=peerstore,
zeroconf=zeroconf,
service_type="_p2p._udp.local.",
service_name="test-listener._p2p._udp.local.",
)
# Create a test service info
test_peer_id = ID.from_base58(
"QmYyQSo1c1Ym7orWxLYvCrM2EmxFTANf8wXmmE7DWjhx5N"
)
hostname = socket.gethostname()
from zeroconf import ServiceInfo
service_info = ServiceInfo(
type_="_p2p._udp.local.",
name="test-service._p2p._udp.local.",
port=8001,
properties={b"id": str(test_peer_id).encode()},
server=f"{hostname}.local.",
addresses=[socket.inet_aton("192.168.1.100")],
)
# Test extraction
peer_info = listener._extract_peer_info(service_info)
assert peer_info is not None
assert peer_info.peer_id == test_peer_id
assert len(peer_info.addrs) == 1
assert "/tcp/8001" in str(peer_info.addrs[0])
print("✅ Service info extraction test successful!")
print(f" Extracted peer ID: {peer_info.peer_id}")
print(f" Extracted addresses: {[str(addr) for addr in peer_info.addrs]}")
finally:
zeroconf.close()

View File

@ -0,0 +1,39 @@
"""
Basic unit tests for mDNS utils module.
"""
import string
from libp2p.discovery.mdns.utils import stringGen
class TestStringGen:
"""Unit tests for stringGen function."""
def test_stringgen_default_length(self):
"""Test stringGen with default length (63)."""
result = stringGen()
assert isinstance(result, str)
assert len(result) == 63
# Check that all characters are from the expected charset
charset = string.ascii_lowercase + string.digits
for char in result:
assert char in charset
def test_stringgen_custom_length(self):
"""Test stringGen with custom lengths."""
# Test various lengths
test_lengths = [1, 5, 10, 20, 50, 100]
for length in test_lengths:
result = stringGen(length)
assert isinstance(result, str)
assert len(result) == length
# Check that all characters are from the expected charset
charset = string.ascii_lowercase + string.digits
for char in result:
assert char in charset

View File

@ -443,6 +443,10 @@ class GossipsubFactory(factory.Factory):
heartbeat_interval = GOSSIPSUB_PARAMS.heartbeat_interval
direct_connect_initial_delay = GOSSIPSUB_PARAMS.direct_connect_initial_delay
direct_connect_interval = GOSSIPSUB_PARAMS.direct_connect_interval
do_px = GOSSIPSUB_PARAMS.do_px
px_peers_count = GOSSIPSUB_PARAMS.px_peers_count
prune_back_off = GOSSIPSUB_PARAMS.prune_back_off
unsubscribe_back_off = GOSSIPSUB_PARAMS.unsubscribe_back_off
class PubsubFactory(factory.Factory):
@ -568,6 +572,10 @@ class PubsubFactory(factory.Factory):
heartbeat_initial_delay: float = GOSSIPSUB_PARAMS.heartbeat_initial_delay,
direct_connect_initial_delay: float = GOSSIPSUB_PARAMS.direct_connect_initial_delay, # noqa: E501
direct_connect_interval: int = GOSSIPSUB_PARAMS.direct_connect_interval,
do_px: bool = GOSSIPSUB_PARAMS.do_px,
px_peers_count: int = GOSSIPSUB_PARAMS.px_peers_count,
prune_back_off: int = GOSSIPSUB_PARAMS.prune_back_off,
unsubscribe_back_off: int = GOSSIPSUB_PARAMS.unsubscribe_back_off,
security_protocol: TProtocol | None = None,
muxer_opt: TMuxerOptions | None = None,
msg_id_constructor: None
@ -588,6 +596,10 @@ class PubsubFactory(factory.Factory):
heartbeat_interval=heartbeat_interval,
direct_connect_initial_delay=direct_connect_initial_delay,
direct_connect_interval=direct_connect_interval,
do_px=do_px,
px_peers_count=px_peers_count,
prune_back_off=prune_back_off,
unsubscribe_back_off=unsubscribe_back_off,
)
else:
gossipsubs = GossipsubFactory.create_batch(
@ -602,6 +614,10 @@ class PubsubFactory(factory.Factory):
heartbeat_initial_delay=heartbeat_initial_delay,
direct_connect_initial_delay=direct_connect_initial_delay,
direct_connect_interval=direct_connect_interval,
do_px=do_px,
px_peers_count=px_peers_count,
prune_back_off=prune_back_off,
unsubscribe_back_off=unsubscribe_back_off,
)
async with cls._create_batch_with_router(

View File

@ -24,16 +24,22 @@ def make_pubsub_msg(
)
# TODO: Implement sparse connect
async def dense_connect(hosts: Sequence[IHost]) -> None:
await connect_some(hosts, 10)
# FIXME: `degree` is not used at all
async def connect_some(hosts: Sequence[IHost], degree: int) -> None:
"""
Connect each host to up to 'degree' number of other hosts.
Creates a sparse network topology where each node has limited connections.
"""
for i, host in enumerate(hosts):
for host2 in hosts[i + 1 :]:
await connect(host, host2)
connections_made = 0
for j in range(i + 1, len(hosts)):
if connections_made >= degree:
break
await connect(host, hosts[j])
connections_made += 1
async def one_to_all_connect(hosts: Sequence[IHost], central_host_index: int) -> None: