Merge branch 'main' into main

This commit is contained in:
Manu Sheel Gupta
2025-06-30 07:43:32 -07:00
committed by GitHub
44 changed files with 1685 additions and 109 deletions

64
docs/examples.mDNS.rst Normal file
View File

@ -0,0 +1,64 @@
mDNS Peer Discovery Example
===========================
This example demonstrates how to use mDNS (Multicast DNS) for peer discovery in py-libp2p.
Prerequisites
-------------
First, ensure you have py-libp2p installed and your environment is activated:
.. code-block:: console
$ python -m pip install libp2p
Running the Example
-------------------
The mDNS demo script allows you to discover peers on your local network using mDNS. To start a peer, run:
.. code-block:: console
$ mdns-demo
You should see output similar to:
.. code-block:: console
Run this from another console to start another peer on a different port:
python mdns-demo -p <ANOTHER_PORT>
Waiting for mDNS peer discovery events...
2025-06-20 23:28:12,052 - libp2p.example.discovery.mdns - INFO - Starting peer Discovery
To discover peers, open another terminal and run the same command with a different port:
.. code-block:: console
$ python mdns-demo -p 9001
You should see output indicating that a new peer has been discovered:
.. code-block:: console
Run this from the same folder in another console to start another peer on a different port:
python mdns-demo -p <ANOTHER_PORT>
Waiting for mDNS peer discovery events...
2025-06-20 23:43:43,786 - libp2p.example.discovery.mdns - INFO - Starting peer Discovery
2025-06-20 23:43:43,790 - libp2p.example.discovery.mdns - INFO - Discovered: 16Uiu2HAmGxy5NdQEjZWtrYUMrzdp3Syvg7MB2E5Lx8weA9DanYxj
When a new peer is discovered, its peer ID will be printed in the console output.
How it Works
------------
- Each node advertises itself on the local network using mDNS.
- When a new peer is discovered, the handler prints its peer ID.
- This is useful for local peer discovery without requiring a DHT or bootstrap nodes.
You can modify the script to perform additional actions when peers are discovered, such as opening streams or exchanging messages.

View File

@ -13,3 +13,4 @@ Examples
examples.pubsub
examples.circuit_relay
examples.kademlia
examples.mDNS

View File

@ -0,0 +1,21 @@
libp2p.discovery.events package
===============================
Submodules
----------
libp2p.discovery.events.peerDiscovery module
--------------------------------------------
.. automodule:: libp2p.discovery.events.peerDiscovery
:members:
:undoc-members:
:show-inheritance:
Module contents
---------------
.. automodule:: libp2p.discovery.events
:members:
:undoc-members:
:show-inheritance:

View File

@ -0,0 +1,45 @@
libp2p.discovery.mdns package
=============================
Submodules
----------
libp2p.discovery.mdns.broadcaster module
----------------------------------------
.. automodule:: libp2p.discovery.mdns.broadcaster
:members:
:undoc-members:
:show-inheritance:
libp2p.discovery.mdns.listener module
-------------------------------------
.. automodule:: libp2p.discovery.mdns.listener
:members:
:undoc-members:
:show-inheritance:
libp2p.discovery.mdns.mdns module
---------------------------------
.. automodule:: libp2p.discovery.mdns.mdns
:members:
:undoc-members:
:show-inheritance:
libp2p.discovery.mdns.utils module
----------------------------------
.. automodule:: libp2p.discovery.mdns.utils
:members:
:undoc-members:
:show-inheritance:
Module contents
---------------
.. automodule:: libp2p.discovery.mdns
:members:
:undoc-members:
:show-inheritance:

22
docs/libp2p.discovery.rst Normal file
View File

@ -0,0 +1,22 @@
libp2p.discovery package
========================
Subpackages
-----------
.. toctree::
:maxdepth: 4
libp2p.discovery.events
libp2p.discovery.mdns
Submodules
----------
Module contents
---------------
.. automodule:: libp2p.discovery
:members:
:undoc-members:
:show-inheritance:

View File

@ -8,6 +8,7 @@ Subpackages
:maxdepth: 4
libp2p.crypto
libp2p.discovery
libp2p.host
libp2p.identity
libp2p.io

74
examples/mDNS/mDNS.py Normal file
View File

@ -0,0 +1,74 @@
import argparse
import logging
import secrets
import multiaddr
import trio
from libp2p import (
new_host,
)
from libp2p.abc import PeerInfo
from libp2p.crypto.secp256k1 import (
create_new_key_pair,
)
from libp2p.discovery.events.peerDiscovery import peerDiscovery
logger = logging.getLogger("libp2p.discovery.mdns")
logger.setLevel(logging.INFO)
handler = logging.StreamHandler()
handler.setFormatter(
logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
)
logger.addHandler(handler)
# Set root logger to DEBUG to capture all logs from dependencies
logging.getLogger().setLevel(logging.DEBUG)
def onPeerDiscovery(peerinfo: PeerInfo):
logger.info(f"Discovered: {peerinfo.peer_id}")
async def run(port: int) -> None:
secret = secrets.token_bytes(32)
key_pair = create_new_key_pair(secret)
listen_addr = multiaddr.Multiaddr(f"/ip4/0.0.0.0/tcp/{port}")
peerDiscovery.register_peer_discovered_handler(onPeerDiscovery)
print(
"Run this from the same folder in another console to "
"start another peer on a different port:\n\n"
"mdns-demo -p <ANOTHER_PORT>\n"
)
print("Waiting for mDNS peer discovery events...\n")
logger.info("Starting peer Discovery")
host = new_host(key_pair=key_pair, enable_mDNS=True)
async with host.run(listen_addrs=[listen_addr]):
await trio.sleep_forever()
def main() -> None:
description = """
This program demonstrates mDNS peer discovery using libp2p.
To use it, run 'mdns-demo -p <PORT>', where <PORT> is the port number.
Start multiple peers on different ports to see discovery in action.
"""
parser = argparse.ArgumentParser(description=description)
parser.add_argument("-p", "--port", default=0, type=int, help="source port number")
parser.add_argument(
"-v", "--verbose", action="store_true", help="Enable verbose output"
)
args = parser.parse_args()
if args.verbose:
logger.setLevel(logging.DEBUG)
try:
trio.run(run, args.port)
except KeyboardInterrupt:
logger.info("Exiting...")
if __name__ == "__main__":
main()

View File

@ -32,6 +32,9 @@ from libp2p.custom_types import (
TProtocol,
TSecurityOptions,
)
from libp2p.discovery.mdns.mdns import (
MDNSDiscovery,
)
from libp2p.host.basic_host import (
BasicHost,
)
@ -245,6 +248,7 @@ def new_host(
disc_opt: IPeerRouting | None = None,
muxer_preference: Literal["YAMUX", "MPLEX"] | None = None,
listen_addrs: Sequence[multiaddr.Multiaddr] | None = None,
enable_mDNS: bool = False,
) -> IHost:
"""
Create a new libp2p host based on the given parameters.
@ -256,6 +260,7 @@ def new_host(
:param disc_opt: optional discovery
:param muxer_preference: optional explicit muxer preference
:param listen_addrs: optional list of multiaddrs to listen on
:param enable_mDNS: whether to enable mDNS discovery
:return: return a host instance
"""
swarm = new_swarm(
@ -268,8 +273,7 @@ def new_host(
)
if disc_opt is not None:
return RoutedHost(swarm, disc_opt)
return BasicHost(swarm)
return RoutedHost(swarm, disc_opt, enable_mDNS)
return BasicHost(swarm, enable_mDNS)
__version__ = __version("libp2p")

View File

View File

View File

@ -0,0 +1,26 @@
from collections.abc import (
Callable,
)
from libp2p.abc import (
PeerInfo,
)
TTL: int = 60 * 60 # Time-to-live for discovered peers in seconds
class PeerDiscovery:
def __init__(self) -> None:
self._peer_discovered_handlers: list[Callable[[PeerInfo], None]] = []
def register_peer_discovered_handler(
self, handler: Callable[[PeerInfo], None]
) -> None:
self._peer_discovered_handlers.append(handler)
def emit_peer_discovered(self, peer_info: PeerInfo) -> None:
for handler in self._peer_discovered_handlers:
handler(peer_info)
peerDiscovery = PeerDiscovery()

View File

View File

@ -0,0 +1,91 @@
import logging
import socket
from zeroconf import (
EventLoopBlocked,
ServiceInfo,
Zeroconf,
)
logger = logging.getLogger("libp2p.discovery.mdns.broadcaster")
class PeerBroadcaster:
"""
Broadcasts this peer's presence on the local network using mDNS/zeroconf.
Registers a service with the peer's ID in the TXT record as per libp2p spec.
"""
def __init__(
self,
zeroconf: Zeroconf,
service_type: str,
service_name: str,
peer_id: str,
port: int,
):
self.zeroconf = zeroconf
self.service_type = service_type
self.peer_id = peer_id
self.port = port
self.service_name = service_name
# Get the local IP address
local_ip = self._get_local_ip()
hostname = socket.gethostname()
self.service_info = ServiceInfo(
type_=self.service_type,
name=self.service_name,
port=self.port,
properties={b"id": self.peer_id.encode()},
server=f"{hostname}.local.",
addresses=[socket.inet_aton(local_ip)],
)
def _get_local_ip(self) -> str:
"""Get the local IP address of this machine"""
try:
# Connect to a remote address to determine the local IP
# This doesn't actually send data
with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as s:
s.connect(("8.8.8.8", 80))
local_ip = s.getsockname()[0]
return local_ip
except Exception:
# Fallback to localhost if we can't determine the IP
return "127.0.0.1"
def register(self) -> None:
"""Register the peer's mDNS service on the network."""
try:
self.zeroconf.register_service(self.service_info)
logger.debug(f"mDNS service registered: {self.service_name}")
except EventLoopBlocked as e:
logger.warning(
"EventLoopBlocked while registering mDNS '%s': %s", self.service_name, e
)
except Exception as e:
logger.error(
"Unexpected error during mDNS registration for '%s': %r",
self.service_name,
e,
)
def unregister(self) -> None:
"""Unregister the peer's mDNS service from the network."""
try:
self.zeroconf.unregister_service(self.service_info)
logger.debug(f"mDNS service unregistered: {self.service_name}")
except EventLoopBlocked as e:
logger.warning(
"EventLoopBlocked while unregistering mDNS '%s': %s",
self.service_name,
e,
)
except Exception as e:
logger.error(
"Unexpected error during mDNS unregistration for '%s': %r",
self.service_name,
e,
)

View File

@ -0,0 +1,83 @@
import logging
import socket
from zeroconf import (
ServiceBrowser,
ServiceInfo,
ServiceListener,
Zeroconf,
)
from libp2p.abc import IPeerStore, Multiaddr
from libp2p.discovery.events.peerDiscovery import peerDiscovery
from libp2p.peer.id import ID
from libp2p.peer.peerinfo import PeerInfo
logger = logging.getLogger("libp2p.discovery.mdns.listener")
class PeerListener(ServiceListener):
"""mDNS listener — now a true ServiceListener subclass."""
def __init__(
self,
peerstore: IPeerStore,
zeroconf: Zeroconf,
service_type: str,
service_name: str,
) -> None:
self.peerstore = peerstore
self.zeroconf = zeroconf
self.service_type = service_type
self.service_name = service_name
self.discovered_services: dict[str, ID] = {}
self.browser = ServiceBrowser(self.zeroconf, self.service_type, listener=self)
def add_service(self, zc: Zeroconf, type_: str, name: str) -> None:
if name == self.service_name:
return
logger.debug(f"Adding service: {name}")
info = zc.get_service_info(type_, name, timeout=5000)
if not info:
return
peer_info = self._extract_peer_info(info)
if peer_info:
self.discovered_services[name] = peer_info.peer_id
self.peerstore.add_addrs(peer_info.peer_id, peer_info.addrs, 10)
peerDiscovery.emit_peer_discovered(peer_info)
logger.debug(f"Discovered Peer: {peer_info.peer_id}")
def remove_service(self, zc: Zeroconf, type_: str, name: str) -> None:
if name == self.service_name:
return
logger.debug(f"Removing service: {name}")
peer_id = self.discovered_services.pop(name)
self.peerstore.clear_addrs(peer_id)
logger.debug(f"Removed Peer: {peer_id}")
def update_service(self, zc: Zeroconf, type_: str, name: str) -> None:
info = zc.get_service_info(type_, name, timeout=5000)
if not info:
return
peer_info = self._extract_peer_info(info)
if peer_info:
self.peerstore.clear_addrs(peer_info.peer_id)
self.peerstore.add_addrs(peer_info.peer_id, peer_info.addrs, 10)
logger.debug(f"Updated Peer {peer_info.peer_id}")
def _extract_peer_info(self, info: ServiceInfo) -> PeerInfo | None:
try:
addrs = [
Multiaddr(f"/ip4/{socket.inet_ntoa(addr)}/tcp/{info.port}")
for addr in info.addresses
]
pid_bytes = info.properties.get(b"id")
if not pid_bytes:
return None
pid = ID.from_base58(pid_bytes.decode())
return PeerInfo(peer_id=pid, addrs=addrs)
except Exception:
return None
def stop(self) -> None:
self.browser.cancel()

View File

@ -0,0 +1,73 @@
"""
mDNS-based peer discovery for py-libp2p.
Conforms to https://github.com/libp2p/specs/blob/master/discovery/mdns.md
Uses zeroconf for mDNS broadcast/listen. Async operations use trio.
"""
import logging
from zeroconf import (
Zeroconf,
)
from libp2p.abc import (
INetworkService,
)
from .broadcaster import (
PeerBroadcaster,
)
from .listener import (
PeerListener,
)
from .utils import (
stringGen,
)
logger = logging.getLogger("libp2p.discovery.mdns")
SERVICE_TYPE = "_p2p._udp.local."
MCAST_PORT = 5353
MCAST_ADDR = "224.0.0.251"
class MDNSDiscovery:
"""
mDNS-based peer discovery for py-libp2p, using zeroconf.
Conforms to the libp2p mDNS discovery spec.
"""
def __init__(self, swarm: INetworkService, port: int = 8000):
self.peer_id = str(swarm.get_peer_id())
self.port = port
self.zeroconf = Zeroconf()
self.serviceName = f"{stringGen()}.{SERVICE_TYPE}"
self.peerstore = swarm.peerstore
self.swarm = swarm
self.broadcaster = PeerBroadcaster(
zeroconf=self.zeroconf,
service_type=SERVICE_TYPE,
service_name=self.serviceName,
peer_id=self.peer_id,
port=self.port,
)
self.listener = PeerListener(
zeroconf=self.zeroconf,
peerstore=self.peerstore,
service_type=SERVICE_TYPE,
service_name=self.serviceName,
)
def start(self) -> None:
"""Register this peer and start listening for others."""
logger.debug(
f"Starting mDNS discovery for peer {self.peer_id} on port {self.port}"
)
self.broadcaster.register()
# Listener is started in constructor
def stop(self) -> None:
"""Unregister this peer and clean up zeroconf resources."""
logger.debug("Stopping mDNS discovery")
self.broadcaster.unregister()
self.zeroconf.close()

View File

@ -0,0 +1,11 @@
import random
import string
def stringGen(len: int = 63) -> str:
"""Generate a random string of lowercase letters and digits."""
charset = string.ascii_lowercase + string.digits
result = []
for _ in range(len):
result.append(random.choice(charset))
return "".join(result)

View File

@ -29,6 +29,7 @@ from libp2p.custom_types import (
StreamHandlerFn,
TProtocol,
)
from libp2p.discovery.mdns.mdns import MDNSDiscovery
from libp2p.host.defaults import (
get_default_protocols,
)
@ -89,6 +90,7 @@ class BasicHost(IHost):
def __init__(
self,
network: INetworkService,
enable_mDNS: bool = False,
default_protocols: Optional["OrderedDict[TProtocol, StreamHandlerFn]"] = None,
) -> None:
self._network = network
@ -98,6 +100,8 @@ class BasicHost(IHost):
default_protocols = default_protocols or get_default_protocols(self)
self.multiselect = Multiselect(dict(default_protocols.items()))
self.multiselect_client = MultiselectClient()
if enable_mDNS:
self.mDNS = MDNSDiscovery(network)
def get_id(self) -> ID:
"""
@ -162,7 +166,14 @@ class BasicHost(IHost):
network = self.get_network()
async with background_trio_service(network):
await network.listen(*listen_addrs)
yield
if hasattr(self, "mDNS") and self.mDNS is not None:
logger.debug("Starting mDNS Discovery")
self.mDNS.start()
try:
yield
finally:
if hasattr(self, "mDNS") and self.mDNS is not None:
self.mDNS.stop()
return _run()

View File

@ -18,8 +18,10 @@ from libp2p.peer.peerinfo import (
class RoutedHost(BasicHost):
_router: IPeerRouting
def __init__(self, network: INetworkService, router: IPeerRouting):
super().__init__(network)
def __init__(
self, network: INetworkService, router: IPeerRouting, enable_mDNS: bool = False
):
super().__init__(network, enable_mDNS)
self._router = router
async def connect(self, peer_info: PeerInfo) -> None:

View File

@ -66,5 +66,23 @@ def info_from_p2p_addr(addr: multiaddr.Multiaddr) -> PeerInfo:
return PeerInfo(peer_id, [addr])
def peer_info_to_bytes(peer_info: PeerInfo) -> bytes:
lines = [str(peer_info.peer_id)] + [str(addr) for addr in peer_info.addrs]
return "\n".join(lines).encode("utf-8")
def peer_info_from_bytes(data: bytes) -> PeerInfo:
try:
lines = data.decode("utf-8").splitlines()
if not lines:
raise InvalidAddrError("no data to decode PeerInfo")
peer_id = ID.from_base58(lines[0])
addrs = [multiaddr.Multiaddr(addr_str) for addr_str in lines[1:]]
return PeerInfo(peer_id, addrs)
except Exception as e:
raise InvalidAddrError(f"failed to decode PeerInfo: {e}")
class InvalidAddrError(ValueError):
pass

View File

@ -12,15 +12,9 @@ from libp2p.abc import (
from libp2p.custom_types import (
TProtocol,
)
from libp2p.network.stream.exceptions import (
StreamClosed,
)
from libp2p.peer.id import (
ID,
)
from libp2p.utils import (
encode_varint_prefixed,
)
from .exceptions import (
PubsubRouterError,
@ -120,13 +114,7 @@ class FloodSub(IPubsubRouter):
if peer_id not in pubsub.peers:
continue
stream = pubsub.peers[peer_id]
# FIXME: We should add a `WriteMsg` similar to write delimited messages.
# Ref: https://github.com/libp2p/go-libp2p-pubsub/blob/master/comm.go#L107
try:
await stream.write(encode_varint_prefixed(rpc_msg.SerializeToString()))
except StreamClosed:
logger.debug("Fail to publish message to %s: stream closed", peer_id)
pubsub._handle_dead_peer(peer_id)
await pubsub.write_msg(stream, rpc_msg)
async def join(self, topic: str) -> None:
"""

View File

@ -24,14 +24,13 @@ from libp2p.abc import (
from libp2p.custom_types import (
TProtocol,
)
from libp2p.network.stream.exceptions import (
StreamClosed,
)
from libp2p.peer.id import (
ID,
)
from libp2p.peer.peerinfo import (
PeerInfo,
peer_info_from_bytes,
peer_info_to_bytes,
)
from libp2p.peer.peerstore import (
PERMANENT_ADDR_TTL,
@ -42,9 +41,6 @@ from libp2p.pubsub import (
from libp2p.tools.async_service import (
Service,
)
from libp2p.utils import (
encode_varint_prefixed,
)
from .exceptions import (
NoPubsubAttached,
@ -92,6 +88,12 @@ class GossipSub(IPubsubRouter, Service):
direct_connect_initial_delay: float
direct_connect_interval: int
do_px: bool
px_peers_count: int
back_off: dict[str, dict[ID, int]]
prune_back_off: int
unsubscribe_back_off: int
def __init__(
self,
protocols: Sequence[TProtocol],
@ -106,6 +108,10 @@ class GossipSub(IPubsubRouter, Service):
heartbeat_interval: int = 120,
direct_connect_initial_delay: float = 0.1,
direct_connect_interval: int = 300,
do_px: bool = False,
px_peers_count: int = 16,
prune_back_off: int = 60,
unsubscribe_back_off: int = 10,
) -> None:
self.protocols = list(protocols)
self.pubsub = None
@ -140,6 +146,12 @@ class GossipSub(IPubsubRouter, Service):
self.direct_connect_initial_delay = direct_connect_initial_delay
self.time_since_last_publish = {}
self.do_px = do_px
self.px_peers_count = px_peers_count
self.back_off = dict()
self.prune_back_off = prune_back_off
self.unsubscribe_back_off = unsubscribe_back_off
async def run(self) -> None:
self.manager.run_daemon_task(self.heartbeat)
if len(self.direct_peers) > 0:
@ -249,14 +261,10 @@ class GossipSub(IPubsubRouter, Service):
if peer_id not in self.pubsub.peers:
continue
stream = self.pubsub.peers[peer_id]
# FIXME: We should add a `WriteMsg` similar to write delimited messages.
# Ref: https://github.com/libp2p/go-libp2p-pubsub/blob/master/comm.go#L107
# TODO: Go use `sendRPC`, which possibly piggybacks gossip/control messages.
try:
await stream.write(encode_varint_prefixed(rpc_msg.SerializeToString()))
except StreamClosed:
logger.debug("Fail to publish message to %s: stream closed", peer_id)
self.pubsub._handle_dead_peer(peer_id)
await self.pubsub.write_msg(stream, rpc_msg)
for topic in pubsub_msg.topicIDs:
self.time_since_last_publish[topic] = int(time.time())
@ -334,15 +342,22 @@ class GossipSub(IPubsubRouter, Service):
self.mesh[topic] = set()
topic_in_fanout: bool = topic in self.fanout
fanout_peers: set[ID] = self.fanout[topic] if topic_in_fanout else set()
fanout_peers: set[ID] = set()
if topic_in_fanout:
for peer in self.fanout[topic]:
if self._check_back_off(peer, topic):
continue
fanout_peers.add(peer)
fanout_size = len(fanout_peers)
if not topic_in_fanout or (topic_in_fanout and fanout_size < self.degree):
# There are less than D peers (let this number be x)
# in the fanout for a topic (or the topic is not in the fanout).
# Selects the remaining number of peers (D-x) from peers.gossipsub[topic].
if topic in self.pubsub.peer_topics:
if self.pubsub is not None and topic in self.pubsub.peer_topics:
selected_peers = self._get_in_topic_gossipsub_peers_from_minus(
topic, self.degree - fanout_size, fanout_peers
topic, self.degree - fanout_size, fanout_peers, True
)
# Combine fanout peers with selected peers
fanout_peers.update(selected_peers)
@ -369,7 +384,8 @@ class GossipSub(IPubsubRouter, Service):
return
# Notify the peers in mesh[topic] with a PRUNE(topic) message
for peer in self.mesh[topic]:
await self.emit_prune(topic, peer)
await self.emit_prune(topic, peer, self.do_px, True)
self._add_back_off(peer, topic, True)
# Forget mesh[topic]
self.mesh.pop(topic, None)
@ -459,8 +475,8 @@ class GossipSub(IPubsubRouter, Service):
self.fanout_heartbeat()
# Get the peers to send IHAVE to
peers_to_gossip = self.gossip_heartbeat()
# Pack GRAFT, PRUNE and IHAVE for the same peer into one control message and
# send it
# Pack(piggyback) GRAFT, PRUNE and IHAVE for the same peer into
# one control message and send it
await self._emit_control_msgs(
peers_to_graft, peers_to_prune, peers_to_gossip
)
@ -505,7 +521,7 @@ class GossipSub(IPubsubRouter, Service):
if num_mesh_peers_in_topic < self.degree_low:
# Select D - |mesh[topic]| peers from peers.gossipsub[topic] - mesh[topic] # noqa: E501
selected_peers = self._get_in_topic_gossipsub_peers_from_minus(
topic, self.degree - num_mesh_peers_in_topic, self.mesh[topic]
topic, self.degree - num_mesh_peers_in_topic, self.mesh[topic], True
)
for peer in selected_peers:
@ -568,9 +584,7 @@ class GossipSub(IPubsubRouter, Service):
if len(in_topic_peers) < self.degree:
# Select additional peers from peers.gossipsub[topic]
selected_peers = self._get_in_topic_gossipsub_peers_from_minus(
topic,
self.degree - len(in_topic_peers),
in_topic_peers,
topic, self.degree - len(in_topic_peers), in_topic_peers, True
)
# Add the selected peers
in_topic_peers.update(selected_peers)
@ -581,7 +595,7 @@ class GossipSub(IPubsubRouter, Service):
if msg_ids:
# Select D peers from peers.gossipsub[topic] excluding current peers
peers_to_emit_ihave_to = self._get_in_topic_gossipsub_peers_from_minus(
topic, self.degree, current_peers
topic, self.degree, current_peers, True
)
msg_id_strs = [str(msg_id) for msg_id in msg_ids]
for peer in peers_to_emit_ihave_to:
@ -655,7 +669,11 @@ class GossipSub(IPubsubRouter, Service):
return selection
def _get_in_topic_gossipsub_peers_from_minus(
self, topic: str, num_to_select: int, minus: Iterable[ID]
self,
topic: str,
num_to_select: int,
minus: Iterable[ID],
backoff_check: bool = False,
) -> list[ID]:
if self.pubsub is None:
raise NoPubsubAttached
@ -664,8 +682,88 @@ class GossipSub(IPubsubRouter, Service):
for peer_id in self.pubsub.peer_topics[topic]
if self.peer_protocol[peer_id] == PROTOCOL_ID
}
if backoff_check:
# filter out peers that are in back off for this topic
gossipsub_peers_in_topic = {
peer_id
for peer_id in gossipsub_peers_in_topic
if self._check_back_off(peer_id, topic) is False
}
return self.select_from_minus(num_to_select, gossipsub_peers_in_topic, minus)
def _add_back_off(
self, peer: ID, topic: str, is_unsubscribe: bool, backoff_duration: int = 0
) -> None:
"""
Add back off for a peer in a topic.
:param peer: peer to add back off for
:param topic: topic to add back off for
:param is_unsubscribe: whether this is an unsubscribe operation
:param backoff_duration: duration of back off in seconds, if 0, use default
"""
if topic not in self.back_off:
self.back_off[topic] = dict()
backoff_till = int(time.time())
if backoff_duration > 0:
backoff_till += backoff_duration
else:
if is_unsubscribe:
backoff_till += self.unsubscribe_back_off
else:
backoff_till += self.prune_back_off
if peer not in self.back_off[topic]:
self.back_off[topic][peer] = backoff_till
else:
self.back_off[topic][peer] = max(self.back_off[topic][peer], backoff_till)
def _check_back_off(self, peer: ID, topic: str) -> bool:
"""
Check if a peer is in back off for a topic and cleanup expired back off entries.
:param peer: peer to check
:param topic: topic to check
:return: True if the peer is in back off, False otherwise
"""
if topic not in self.back_off or peer not in self.back_off[topic]:
return False
if self.back_off[topic].get(peer, 0) > int(time.time()):
return True
else:
del self.back_off[topic][peer]
return False
async def _do_px(self, px_peers: list[rpc_pb2.PeerInfo]) -> None:
if len(px_peers) > self.px_peers_count:
px_peers = px_peers[: self.px_peers_count]
for peer in px_peers:
peer_id: ID = ID(peer.peerID)
if self.pubsub and peer_id in self.pubsub.peers:
continue
try:
peer_info = peer_info_from_bytes(peer.signedPeerRecord)
try:
if self.pubsub is None:
raise NoPubsubAttached
await self.pubsub.host.connect(peer_info)
except Exception as e:
logger.warning(
"failed to connect to px peer %s: %s",
peer_id,
e,
)
continue
except Exception as e:
logger.warning(
"failed to parse peer info from px peer %s: %s",
peer_id,
e,
)
continue
# RPC handlers
async def handle_ihave(
@ -722,8 +820,6 @@ class GossipSub(IPubsubRouter, Service):
packet.publish.extend(msgs_to_forward)
# 2) Serialize that packet
rpc_msg: bytes = packet.SerializeToString()
if self.pubsub is None:
raise NoPubsubAttached
@ -737,14 +833,7 @@ class GossipSub(IPubsubRouter, Service):
peer_stream = self.pubsub.peers[sender_peer_id]
# 4) And write the packet to the stream
try:
await peer_stream.write(encode_varint_prefixed(rpc_msg))
except StreamClosed:
logger.debug(
"Fail to responed to iwant request from %s: stream closed",
sender_peer_id,
)
self.pubsub._handle_dead_peer(sender_peer_id)
await self.pubsub.write_msg(peer_stream, packet)
async def handle_graft(
self, graft_msg: rpc_pb2.ControlGraft, sender_peer_id: ID
@ -758,24 +847,46 @@ class GossipSub(IPubsubRouter, Service):
logger.warning(
"GRAFT: ignoring request from direct peer %s", sender_peer_id
)
await self.emit_prune(topic, sender_peer_id)
await self.emit_prune(topic, sender_peer_id, False, False)
return
if self._check_back_off(sender_peer_id, topic):
logger.warning(
"GRAFT: ignoring request from %s, back off until %d",
sender_peer_id,
self.back_off[topic][sender_peer_id],
)
self._add_back_off(sender_peer_id, topic, False)
await self.emit_prune(topic, sender_peer_id, False, False)
return
if sender_peer_id not in self.mesh[topic]:
self.mesh[topic].add(sender_peer_id)
else:
# Respond with PRUNE if not subscribed to the topic
await self.emit_prune(topic, sender_peer_id)
await self.emit_prune(topic, sender_peer_id, self.do_px, False)
async def handle_prune(
self, prune_msg: rpc_pb2.ControlPrune, sender_peer_id: ID
) -> None:
topic: str = prune_msg.topicID
backoff_till: int = prune_msg.backoff
px_peers: list[rpc_pb2.PeerInfo] = []
for peer in prune_msg.peers:
px_peers.append(peer)
# Remove peer from mesh for topic
if topic in self.mesh:
if backoff_till > 0:
self._add_back_off(sender_peer_id, topic, False, backoff_till)
else:
self._add_back_off(sender_peer_id, topic, False)
self.mesh[topic].discard(sender_peer_id)
if px_peers:
await self._do_px(px_peers)
# RPC emitters
def pack_control_msgs(
@ -824,15 +935,36 @@ class GossipSub(IPubsubRouter, Service):
await self.emit_control_message(control_msg, id)
async def emit_prune(self, topic: str, id: ID) -> None:
async def emit_prune(
self, topic: str, to_peer: ID, do_px: bool, is_unsubscribe: bool
) -> None:
"""Emit graft message, sent to to_peer, for topic."""
prune_msg: rpc_pb2.ControlPrune = rpc_pb2.ControlPrune()
prune_msg.topicID = topic
back_off_duration = self.prune_back_off
if is_unsubscribe:
back_off_duration = self.unsubscribe_back_off
prune_msg.backoff = back_off_duration
if do_px:
exchange_peers = self._get_in_topic_gossipsub_peers_from_minus(
topic, self.px_peers_count, [to_peer]
)
for peer in exchange_peers:
if self.pubsub is None:
raise NoPubsubAttached
peer_info = self.pubsub.host.get_peerstore().peer_info(peer)
signed_peer_record: rpc_pb2.PeerInfo = rpc_pb2.PeerInfo()
signed_peer_record.peerID = peer.to_bytes()
signed_peer_record.signedPeerRecord = peer_info_to_bytes(peer_info)
prune_msg.peers.append(signed_peer_record)
control_msg: rpc_pb2.ControlMessage = rpc_pb2.ControlMessage()
control_msg.prune.extend([prune_msg])
await self.emit_control_message(control_msg, id)
await self.emit_control_message(control_msg, to_peer)
async def emit_control_message(
self, control_msg: rpc_pb2.ControlMessage, to_peer: ID
@ -843,8 +975,6 @@ class GossipSub(IPubsubRouter, Service):
packet: rpc_pb2.RPC = rpc_pb2.RPC()
packet.control.CopyFrom(control_msg)
rpc_msg: bytes = packet.SerializeToString()
# Get stream for peer from pubsub
if to_peer not in self.pubsub.peers:
logger.debug(
@ -854,8 +984,4 @@ class GossipSub(IPubsubRouter, Service):
peer_stream = self.pubsub.peers[to_peer]
# Write rpc to stream
try:
await peer_stream.write(encode_varint_prefixed(rpc_msg))
except StreamClosed:
logger.debug("Fail to emit control message to %s: stream closed", to_peer)
self.pubsub._handle_dead_peer(to_peer)
await self.pubsub.write_msg(peer_stream, packet)

View File

@ -47,6 +47,13 @@ message ControlGraft {
message ControlPrune {
optional string topicID = 1;
repeated PeerInfo peers = 2;
optional uint64 backoff = 3;
}
message PeerInfo {
optional bytes peerID = 1;
optional bytes signedPeerRecord = 2;
}
message TopicDescriptor {

View File

@ -1,6 +1,6 @@
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: libp2p/pubsub/pb/rpc.proto
# source: rpc.proto
"""Generated protocol buffer code."""
from google.protobuf.internal import builder as _builder
from google.protobuf import descriptor as _descriptor
@ -13,37 +13,39 @@ _sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1alibp2p/pubsub/pb/rpc.proto\x12\tpubsub.pb\"\xb4\x01\n\x03RPC\x12-\n\rsubscriptions\x18\x01 \x03(\x0b\x32\x16.pubsub.pb.RPC.SubOpts\x12#\n\x07publish\x18\x02 \x03(\x0b\x32\x12.pubsub.pb.Message\x12*\n\x07\x63ontrol\x18\x03 \x01(\x0b\x32\x19.pubsub.pb.ControlMessage\x1a-\n\x07SubOpts\x12\x11\n\tsubscribe\x18\x01 \x01(\x08\x12\x0f\n\x07topicid\x18\x02 \x01(\t\"i\n\x07Message\x12\x0f\n\x07\x66rom_id\x18\x01 \x01(\x0c\x12\x0c\n\x04\x64\x61ta\x18\x02 \x01(\x0c\x12\r\n\x05seqno\x18\x03 \x01(\x0c\x12\x10\n\x08topicIDs\x18\x04 \x03(\t\x12\x11\n\tsignature\x18\x05 \x01(\x0c\x12\x0b\n\x03key\x18\x06 \x01(\x0c\"\xb0\x01\n\x0e\x43ontrolMessage\x12&\n\x05ihave\x18\x01 \x03(\x0b\x32\x17.pubsub.pb.ControlIHave\x12&\n\x05iwant\x18\x02 \x03(\x0b\x32\x17.pubsub.pb.ControlIWant\x12&\n\x05graft\x18\x03 \x03(\x0b\x32\x17.pubsub.pb.ControlGraft\x12&\n\x05prune\x18\x04 \x03(\x0b\x32\x17.pubsub.pb.ControlPrune\"3\n\x0c\x43ontrolIHave\x12\x0f\n\x07topicID\x18\x01 \x01(\t\x12\x12\n\nmessageIDs\x18\x02 \x03(\t\"\"\n\x0c\x43ontrolIWant\x12\x12\n\nmessageIDs\x18\x01 \x03(\t\"\x1f\n\x0c\x43ontrolGraft\x12\x0f\n\x07topicID\x18\x01 \x01(\t\"\x1f\n\x0c\x43ontrolPrune\x12\x0f\n\x07topicID\x18\x01 \x01(\t\"\x87\x03\n\x0fTopicDescriptor\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x31\n\x04\x61uth\x18\x02 \x01(\x0b\x32#.pubsub.pb.TopicDescriptor.AuthOpts\x12/\n\x03\x65nc\x18\x03 \x01(\x0b\x32\".pubsub.pb.TopicDescriptor.EncOpts\x1a|\n\x08\x41uthOpts\x12:\n\x04mode\x18\x01 \x01(\x0e\x32,.pubsub.pb.TopicDescriptor.AuthOpts.AuthMode\x12\x0c\n\x04keys\x18\x02 \x03(\x0c\"&\n\x08\x41uthMode\x12\x08\n\x04NONE\x10\x00\x12\x07\n\x03KEY\x10\x01\x12\x07\n\x03WOT\x10\x02\x1a\x83\x01\n\x07\x45ncOpts\x12\x38\n\x04mode\x18\x01 \x01(\x0e\x32*.pubsub.pb.TopicDescriptor.EncOpts.EncMode\x12\x11\n\tkeyHashes\x18\x02 \x03(\x0c\"+\n\x07\x45ncMode\x12\x08\n\x04NONE\x10\x00\x12\r\n\tSHAREDKEY\x10\x01\x12\x07\n\x03WOT\x10\x02')
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\trpc.proto\x12\tpubsub.pb\"\xb4\x01\n\x03RPC\x12-\n\rsubscriptions\x18\x01 \x03(\x0b\x32\x16.pubsub.pb.RPC.SubOpts\x12#\n\x07publish\x18\x02 \x03(\x0b\x32\x12.pubsub.pb.Message\x12*\n\x07\x63ontrol\x18\x03 \x01(\x0b\x32\x19.pubsub.pb.ControlMessage\x1a-\n\x07SubOpts\x12\x11\n\tsubscribe\x18\x01 \x01(\x08\x12\x0f\n\x07topicid\x18\x02 \x01(\t\"i\n\x07Message\x12\x0f\n\x07\x66rom_id\x18\x01 \x01(\x0c\x12\x0c\n\x04\x64\x61ta\x18\x02 \x01(\x0c\x12\r\n\x05seqno\x18\x03 \x01(\x0c\x12\x10\n\x08topicIDs\x18\x04 \x03(\t\x12\x11\n\tsignature\x18\x05 \x01(\x0c\x12\x0b\n\x03key\x18\x06 \x01(\x0c\"\xb0\x01\n\x0e\x43ontrolMessage\x12&\n\x05ihave\x18\x01 \x03(\x0b\x32\x17.pubsub.pb.ControlIHave\x12&\n\x05iwant\x18\x02 \x03(\x0b\x32\x17.pubsub.pb.ControlIWant\x12&\n\x05graft\x18\x03 \x03(\x0b\x32\x17.pubsub.pb.ControlGraft\x12&\n\x05prune\x18\x04 \x03(\x0b\x32\x17.pubsub.pb.ControlPrune\"3\n\x0c\x43ontrolIHave\x12\x0f\n\x07topicID\x18\x01 \x01(\t\x12\x12\n\nmessageIDs\x18\x02 \x03(\t\"\"\n\x0c\x43ontrolIWant\x12\x12\n\nmessageIDs\x18\x01 \x03(\t\"\x1f\n\x0c\x43ontrolGraft\x12\x0f\n\x07topicID\x18\x01 \x01(\t\"T\n\x0c\x43ontrolPrune\x12\x0f\n\x07topicID\x18\x01 \x01(\t\x12\"\n\x05peers\x18\x02 \x03(\x0b\x32\x13.pubsub.pb.PeerInfo\x12\x0f\n\x07\x62\x61\x63koff\x18\x03 \x01(\x04\"4\n\x08PeerInfo\x12\x0e\n\x06peerID\x18\x01 \x01(\x0c\x12\x18\n\x10signedPeerRecord\x18\x02 \x01(\x0c\"\x87\x03\n\x0fTopicDescriptor\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x31\n\x04\x61uth\x18\x02 \x01(\x0b\x32#.pubsub.pb.TopicDescriptor.AuthOpts\x12/\n\x03\x65nc\x18\x03 \x01(\x0b\x32\".pubsub.pb.TopicDescriptor.EncOpts\x1a|\n\x08\x41uthOpts\x12:\n\x04mode\x18\x01 \x01(\x0e\x32,.pubsub.pb.TopicDescriptor.AuthOpts.AuthMode\x12\x0c\n\x04keys\x18\x02 \x03(\x0c\"&\n\x08\x41uthMode\x12\x08\n\x04NONE\x10\x00\x12\x07\n\x03KEY\x10\x01\x12\x07\n\x03WOT\x10\x02\x1a\x83\x01\n\x07\x45ncOpts\x12\x38\n\x04mode\x18\x01 \x01(\x0e\x32*.pubsub.pb.TopicDescriptor.EncOpts.EncMode\x12\x11\n\tkeyHashes\x18\x02 \x03(\x0c\"+\n\x07\x45ncMode\x12\x08\n\x04NONE\x10\x00\x12\r\n\tSHAREDKEY\x10\x01\x12\x07\n\x03WOT\x10\x02')
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals())
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'libp2p.pubsub.pb.rpc_pb2', globals())
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'rpc_pb2', globals())
if _descriptor._USE_C_DESCRIPTORS == False:
DESCRIPTOR._options = None
_RPC._serialized_start=42
_RPC._serialized_end=222
_RPC_SUBOPTS._serialized_start=177
_RPC_SUBOPTS._serialized_end=222
_MESSAGE._serialized_start=224
_MESSAGE._serialized_end=329
_CONTROLMESSAGE._serialized_start=332
_CONTROLMESSAGE._serialized_end=508
_CONTROLIHAVE._serialized_start=510
_CONTROLIHAVE._serialized_end=561
_CONTROLIWANT._serialized_start=563
_CONTROLIWANT._serialized_end=597
_CONTROLGRAFT._serialized_start=599
_CONTROLGRAFT._serialized_end=630
_CONTROLPRUNE._serialized_start=632
_CONTROLPRUNE._serialized_end=663
_TOPICDESCRIPTOR._serialized_start=666
_TOPICDESCRIPTOR._serialized_end=1057
_TOPICDESCRIPTOR_AUTHOPTS._serialized_start=799
_TOPICDESCRIPTOR_AUTHOPTS._serialized_end=923
_TOPICDESCRIPTOR_AUTHOPTS_AUTHMODE._serialized_start=885
_TOPICDESCRIPTOR_AUTHOPTS_AUTHMODE._serialized_end=923
_TOPICDESCRIPTOR_ENCOPTS._serialized_start=926
_TOPICDESCRIPTOR_ENCOPTS._serialized_end=1057
_TOPICDESCRIPTOR_ENCOPTS_ENCMODE._serialized_start=1014
_TOPICDESCRIPTOR_ENCOPTS_ENCMODE._serialized_end=1057
_RPC._serialized_start=25
_RPC._serialized_end=205
_RPC_SUBOPTS._serialized_start=160
_RPC_SUBOPTS._serialized_end=205
_MESSAGE._serialized_start=207
_MESSAGE._serialized_end=312
_CONTROLMESSAGE._serialized_start=315
_CONTROLMESSAGE._serialized_end=491
_CONTROLIHAVE._serialized_start=493
_CONTROLIHAVE._serialized_end=544
_CONTROLIWANT._serialized_start=546
_CONTROLIWANT._serialized_end=580
_CONTROLGRAFT._serialized_start=582
_CONTROLGRAFT._serialized_end=613
_CONTROLPRUNE._serialized_start=615
_CONTROLPRUNE._serialized_end=699
_PEERINFO._serialized_start=701
_PEERINFO._serialized_end=753
_TOPICDESCRIPTOR._serialized_start=756
_TOPICDESCRIPTOR._serialized_end=1147
_TOPICDESCRIPTOR_AUTHOPTS._serialized_start=889
_TOPICDESCRIPTOR_AUTHOPTS._serialized_end=1013
_TOPICDESCRIPTOR_AUTHOPTS_AUTHMODE._serialized_start=975
_TOPICDESCRIPTOR_AUTHOPTS_AUTHMODE._serialized_end=1013
_TOPICDESCRIPTOR_ENCOPTS._serialized_start=1016
_TOPICDESCRIPTOR_ENCOPTS._serialized_end=1147
_TOPICDESCRIPTOR_ENCOPTS_ENCMODE._serialized_start=1104
_TOPICDESCRIPTOR_ENCOPTS_ENCMODE._serialized_end=1147
# @@protoc_insertion_point(module_scope)

View File

@ -179,17 +179,43 @@ class ControlPrune(google.protobuf.message.Message):
DESCRIPTOR: google.protobuf.descriptor.Descriptor
TOPICID_FIELD_NUMBER: builtins.int
PEERS_FIELD_NUMBER: builtins.int
BACKOFF_FIELD_NUMBER: builtins.int
topicID: builtins.str
backoff: builtins.int
@property
def peers(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___PeerInfo]: ...
def __init__(
self,
*,
topicID: builtins.str | None = ...,
peers: collections.abc.Iterable[global___PeerInfo] | None = ...,
backoff: builtins.int | None = ...,
) -> None: ...
def HasField(self, field_name: typing.Literal["topicID", b"topicID"]) -> builtins.bool: ...
def ClearField(self, field_name: typing.Literal["topicID", b"topicID"]) -> None: ...
def HasField(self, field_name: typing.Literal["backoff", b"backoff", "topicID", b"topicID"]) -> builtins.bool: ...
def ClearField(self, field_name: typing.Literal["backoff", b"backoff", "peers", b"peers", "topicID", b"topicID"]) -> None: ...
global___ControlPrune = ControlPrune
@typing.final
class PeerInfo(google.protobuf.message.Message):
DESCRIPTOR: google.protobuf.descriptor.Descriptor
PEERID_FIELD_NUMBER: builtins.int
SIGNEDPEERRECORD_FIELD_NUMBER: builtins.int
peerID: builtins.bytes
signedPeerRecord: builtins.bytes
def __init__(
self,
*,
peerID: builtins.bytes | None = ...,
signedPeerRecord: builtins.bytes | None = ...,
) -> None: ...
def HasField(self, field_name: typing.Literal["peerID", b"peerID", "signedPeerRecord", b"signedPeerRecord"]) -> builtins.bool: ...
def ClearField(self, field_name: typing.Literal["peerID", b"peerID", "signedPeerRecord", b"signedPeerRecord"]) -> None: ...
global___PeerInfo = PeerInfo
@typing.final
class TopicDescriptor(google.protobuf.message.Message):
DESCRIPTOR: google.protobuf.descriptor.Descriptor

View File

@ -66,6 +66,7 @@ from libp2p.utils import (
encode_varint_prefixed,
read_varint_prefixed_bytes,
)
from libp2p.utils.varint import encode_uvarint
from .pb import (
rpc_pb2,
@ -682,19 +683,18 @@ class Pubsub(Service, IPubsub):
# TODO: Implement throttle on async validators
if len(async_topic_validators) > 0:
# TODO: Use a better pattern
final_result: bool = True
# Appends to lists are thread safe in CPython
results = []
async def run_async_validator(func: AsyncValidatorFn) -> None:
nonlocal final_result
result = await func(msg_forwarder, msg)
final_result = final_result and result
results.append(result)
async with trio.open_nursery() as nursery:
for async_validator in async_topic_validators:
nursery.start_soon(run_async_validator, async_validator)
if not final_result:
if not all(results):
raise ValidationError(f"Validation failed for msg={msg}")
async def push_msg(self, msg_forwarder: ID, msg: rpc_pb2.Message) -> None:
@ -779,3 +779,43 @@ class Pubsub(Service, IPubsub):
def _is_subscribed_to_msg(self, msg: rpc_pb2.Message) -> bool:
return any(topic in self.topic_ids for topic in msg.topicIDs)
async def write_msg(self, stream: INetStream, rpc_msg: rpc_pb2.RPC) -> bool:
"""
Write an RPC message to a stream with proper error handling.
Implements WriteMsg similar to go-msgio which is used in go-libp2p
Ref: https://github.com/libp2p/go-msgio/blob/master/protoio/uvarint_writer.go#L56
:param stream: stream to write the message to
:param rpc_msg: RPC message to write
:return: True if successful, False if stream was closed
"""
try:
# Calculate message size first
msg_bytes = rpc_msg.SerializeToString()
msg_size = len(msg_bytes)
# Calculate varint size and allocate exact buffer size needed
varint_bytes = encode_uvarint(msg_size)
varint_size = len(varint_bytes)
# Allocate buffer with exact size (like Go's pool.Get())
buf = bytearray(varint_size + msg_size)
# Write varint length prefix to buffer (like Go's binary.PutUvarint())
buf[:varint_size] = varint_bytes
# Write serialized message after varint (like Go's rpc.MarshalTo())
buf[varint_size:] = msg_bytes
# Single write operation (like Go's s.Write(buf))
await stream.write(bytes(buf))
return True
except StreamClosed:
peer_id = stream.muxed_conn.peer_id
logger.debug("Fail to write message to %s: stream closed", peer_id)
self._handle_dead_peer(peer_id)
return False

View File

@ -542,7 +542,7 @@ class Yamux(IMuxedConn):
f"type={typ}, flags={flags}, stream_id={stream_id},"
f"length={length}"
)
if typ == TYPE_DATA and flags & FLAG_SYN:
if (typ == TYPE_DATA or typ == TYPE_WINDOW_UPDATE) and flags & FLAG_SYN:
async with self.streams_lock:
if stream_id not in self.streams:
stream = YamuxStream(stream_id, self, False)

View File

@ -26,6 +26,7 @@ LISTEN_MADDR = multiaddr.Multiaddr("/ip4/127.0.0.1/tcp/0")
FLOODSUB_PROTOCOL_ID = floodsub.PROTOCOL_ID
GOSSIPSUB_PROTOCOL_ID = gossipsub.PROTOCOL_ID
GOSSIPSUB_PROTOCOL_ID_V1 = gossipsub.PROTOCOL_ID_V11
class GossipsubParams(NamedTuple):
@ -40,6 +41,10 @@ class GossipsubParams(NamedTuple):
heartbeat_interval: float = 0.5
direct_connect_initial_delay: float = 0.1
direct_connect_interval: int = 300
do_px: bool = False
px_peers_count: int = 16
prune_back_off: int = 60
unsubscribe_back_off: int = 10
GOSSIPSUB_PARAMS = GossipsubParams()

View File

@ -0,0 +1 @@
Added support for ``Multicast DNS`` in py-libp2p

View File

@ -0,0 +1 @@
Optimized pubsub message writing by implementing a write_msg() method that uses pre-allocated buffers and single write operations, improving performance by eliminating separate varint prefix encoding and write operations in FloodSub and GossipSub.

View File

@ -0,0 +1 @@
added peer exchange and backoff logic as part of Gossipsub v1.1 upgrade

View File

@ -0,0 +1 @@
align stream creation logic with yamux specification

View File

@ -0,0 +1 @@
Fixed an issue in `Pubsub` where async validators were not handled reliably under concurrency. Now uses a safe aggregator list for consistent behavior.

View File

@ -0,0 +1 @@
Added comprehensive tests for pubsub connection utility functions to verify degree limits are enforced, excess peers are handled correctly, and edge cases (degree=0, negative values, empty lists) are managed gracefully.

View File

@ -31,6 +31,7 @@ dependencies = [
"trio-typing>=0.0.4",
"trio>=0.26.0",
"fastecdsa==2.3.2; sys_platform != 'win32'",
"zeroconf (>=0.147.0,<0.148.0)",
]
classifiers = [
"Development Status :: 4 - Beta",
@ -54,6 +55,7 @@ identify-demo = "examples.identify.identify:main"
identify-push-demo = "examples.identify_push.identify_push_demo:run_main"
identify-push-listener-dialer-demo = "examples.identify_push.identify_push_listener_dialer:main"
pubsub-demo = "examples.pubsub.pubsub:main"
mdns-demo = "examples.mDNS.mDNS:main"
[project.optional-dependencies]
dev = [

View File

@ -15,6 +15,7 @@ from tests.utils.factories import (
PubsubFactory,
)
from tests.utils.pubsub.utils import (
connect_some,
dense_connect,
one_to_all_connect,
sparse_connect,
@ -134,7 +135,7 @@ async def test_handle_graft(monkeypatch):
# check if it is called in `handle_graft`
event_emit_prune = trio.Event()
async def emit_prune(topic, sender_peer_id):
async def emit_prune(topic, sender_peer_id, do_px, is_unsubscribe):
event_emit_prune.set()
await trio.lowlevel.checkpoint()
@ -193,7 +194,7 @@ async def test_handle_prune():
# alice emit prune message to bob, alice should be removed
# from bob's mesh peer
await gossipsubs[index_alice].emit_prune(topic, id_bob)
await gossipsubs[index_alice].emit_prune(topic, id_bob, False, False)
# `emit_prune` does not remove bob from alice's mesh peers
assert id_bob in gossipsubs[index_alice].mesh[topic]
@ -292,7 +293,9 @@ async def test_fanout():
@pytest.mark.trio
@pytest.mark.slow
async def test_fanout_maintenance():
async with PubsubFactory.create_batch_with_gossipsub(10) as pubsubs_gsub:
async with PubsubFactory.create_batch_with_gossipsub(
10, unsubscribe_back_off=1
) as pubsubs_gsub:
hosts = [pubsub.host for pubsub in pubsubs_gsub]
num_msgs = 5
@ -588,3 +591,166 @@ async def test_sparse_connect():
f"received the message. Ideally all nodes should receive it, but at "
f"minimum {min_required} required for sparse network scalability."
)
@pytest.mark.trio
async def test_connect_some_with_fewer_hosts_than_degree():
"""Test connect_some when there are fewer hosts than degree."""
# Create 3 hosts with degree=5
async with PubsubFactory.create_batch_with_floodsub(3) as pubsubs_fsub:
hosts = [pubsub.host for pubsub in pubsubs_fsub]
degree = 5
await connect_some(hosts, degree)
await trio.sleep(0.1) # Allow connections to establish
# Each host should connect to all other hosts (since there are only 2 others)
for i, pubsub in enumerate(pubsubs_fsub):
connected_peers = len(pubsub.peers)
expected_max_connections = len(hosts) - 1 # All others
assert connected_peers <= expected_max_connections, (
f"Host {i} has {connected_peers} connections, "
f"but can only connect to {expected_max_connections} others"
)
@pytest.mark.trio
async def test_connect_some_degree_limit_enforced():
"""Test that connect_some enforces degree limits and creates expected topology."""
# Test with small network where we can verify exact behavior
async with PubsubFactory.create_batch_with_floodsub(6) as pubsubs_fsub:
hosts = [pubsub.host for pubsub in pubsubs_fsub]
degree = 2
await connect_some(hosts, degree)
await trio.sleep(0.1)
# With 6 hosts and degree=2, expected connections:
# Host 0 → connects to hosts 1,2 (2 peers total)
# Host 1 → connects to hosts 2,3 (3 peers: 0,2,3)
# Host 2 → connects to hosts 3,4 (4 peers: 0,1,3,4)
# Host 3 → connects to hosts 4,5 (3 peers: 1,2,4,5) - wait, that's 4!
# Host 4 → connects to host 5 (3 peers: 2,3,5)
# Host 5 → (2 peers: 3,4)
peer_counts = [len(pubsub.peers) for pubsub in pubsubs_fsub]
# First and last hosts should have exactly degree connections
assert peer_counts[0] == degree, (
f"Host 0 should have {degree} peers, got {peer_counts[0]}"
)
assert peer_counts[-1] <= degree, (
f"Last host should have ≤ {degree} peers, got {peer_counts[-1]}"
)
# Middle hosts may have more due to bidirectional connections
# but the pattern should be consistent with degree limit
total_connections = sum(peer_counts)
# Should be less than full mesh (each host connected to all others)
full_mesh_connections = len(hosts) * (len(hosts) - 1)
assert total_connections < full_mesh_connections, (
f"Got {total_connections} total connections, "
f"but full mesh would be {full_mesh_connections}"
)
# Should be more than just a chain (each host connected to next only)
chain_connections = 2 * (len(hosts) - 1) # bidirectional chain
assert total_connections > chain_connections, (
f"Got {total_connections} total connections, which is too few "
f"(chain would be {chain_connections})"
)
@pytest.mark.trio
async def test_connect_some_degree_zero():
"""Test edge case: degree=0 should result in no connections."""
# Create 5 hosts with degree=0
async with PubsubFactory.create_batch_with_floodsub(5) as pubsubs_fsub:
hosts = [pubsub.host for pubsub in pubsubs_fsub]
degree = 0
await connect_some(hosts, degree)
await trio.sleep(0.1) # Allow any potential connections to establish
# Verify no connections were made
for i, pubsub in enumerate(pubsubs_fsub):
connected_peers = len(pubsub.peers)
assert connected_peers == 0, (
f"Host {i} has {connected_peers} connections, "
f"but degree=0 should result in no connections"
)
@pytest.mark.trio
async def test_connect_some_negative_degree():
"""Test edge case: negative degree should be handled gracefully."""
# Create 5 hosts with degree=-1
async with PubsubFactory.create_batch_with_floodsub(5) as pubsubs_fsub:
hosts = [pubsub.host for pubsub in pubsubs_fsub]
degree = -1
await connect_some(hosts, degree)
await trio.sleep(0.1) # Allow any potential connections to establish
# Verify no connections were made (negative degree should behave like 0)
for i, pubsub in enumerate(pubsubs_fsub):
connected_peers = len(pubsub.peers)
assert connected_peers == 0, (
f"Host {i} has {connected_peers} connections, "
f"but negative degree should result in no connections"
)
@pytest.mark.trio
async def test_sparse_connect_degree_zero():
"""Test sparse_connect with degree=0."""
async with PubsubFactory.create_batch_with_floodsub(8) as pubsubs_fsub:
hosts = [pubsub.host for pubsub in pubsubs_fsub]
degree = 0
await sparse_connect(hosts, degree)
await trio.sleep(0.1) # Allow connections to establish
# With degree=0, sparse_connect should still create neighbor connections
# for connectivity (this is part of the algorithm design)
for i, pubsub in enumerate(pubsubs_fsub):
connected_peers = len(pubsub.peers)
# Should have some connections due to neighbor connectivity
# (each node connects to immediate neighbors)
expected_neighbors = 2 # previous and next in ring
assert connected_peers >= expected_neighbors, (
f"Host {i} has {connected_peers} connections, "
f"expected at least {expected_neighbors} neighbor connections"
)
@pytest.mark.trio
async def test_empty_host_list():
"""Test edge case: empty host list should be handled gracefully."""
hosts = []
# All functions should handle empty lists gracefully
await connect_some(hosts, 5)
await sparse_connect(hosts, 3)
await dense_connect(hosts)
# If we reach here without exceptions, the test passes
@pytest.mark.trio
async def test_single_host():
"""Test edge case: single host should be handled gracefully."""
async with PubsubFactory.create_batch_with_floodsub(1) as pubsubs_fsub:
hosts = [pubsub.host for pubsub in pubsubs_fsub]
# All functions should handle single host gracefully
await connect_some(hosts, 5)
await sparse_connect(hosts, 3)
await dense_connect(hosts)
# Single host should have no connections
connected_peers = len(pubsubs_fsub[0].peers)
assert connected_peers == 0, (
f"Single host has {connected_peers} connections, expected 0"
)

View File

@ -0,0 +1,274 @@
import pytest
import trio
from libp2p.pubsub.gossipsub import (
GossipSub,
)
from libp2p.tools.utils import (
connect,
)
from tests.utils.factories import (
PubsubFactory,
)
@pytest.mark.trio
async def test_prune_backoff():
async with PubsubFactory.create_batch_with_gossipsub(
2, heartbeat_interval=0.5, prune_back_off=2
) as pubsubs:
gsub0 = pubsubs[0].router
gsub1 = pubsubs[1].router
assert isinstance(gsub0, GossipSub)
assert isinstance(gsub1, GossipSub)
host_0 = pubsubs[0].host
host_1 = pubsubs[1].host
topic = "test_prune_backoff"
# connect hosts
await connect(host_0, host_1)
await trio.sleep(0.5)
# both join the topic
await gsub0.join(topic)
await gsub1.join(topic)
await gsub0.emit_graft(topic, host_1.get_id())
await trio.sleep(0.5)
# ensure peer is registered in mesh
assert host_0.get_id() in gsub1.mesh[topic]
# prune host_1 from gsub0's mesh
await gsub0.emit_prune(topic, host_1.get_id(), False, False)
await trio.sleep(0.5)
# host_0 should not be in gsub1's mesh
assert host_0.get_id() not in gsub1.mesh[topic]
# try to graft again immediately (should be rejected due to backoff)
await gsub0.emit_graft(topic, host_1.get_id())
await trio.sleep(0.5)
assert host_0.get_id() not in gsub1.mesh[topic], (
"peer should be backoffed and not re-added"
)
# try to graft again (should succeed after backoff)
await trio.sleep(2)
await gsub0.emit_graft(topic, host_1.get_id())
await trio.sleep(1)
assert host_0.get_id() in gsub1.mesh[topic], (
"peer should be able to rejoin after backoff"
)
@pytest.mark.trio
async def test_unsubscribe_backoff():
async with PubsubFactory.create_batch_with_gossipsub(
2, heartbeat_interval=1, prune_back_off=1, unsubscribe_back_off=2
) as pubsubs:
gsub0 = pubsubs[0].router
gsub1 = pubsubs[1].router
assert isinstance(gsub0, GossipSub)
assert isinstance(gsub1, GossipSub)
host_0 = pubsubs[0].host
host_1 = pubsubs[1].host
topic = "test_unsubscribe_backoff"
# connect hosts
await connect(host_0, host_1)
await trio.sleep(0.5)
# both join the topic
await gsub0.join(topic)
await gsub1.join(topic)
await gsub0.emit_graft(topic, host_1.get_id())
await trio.sleep(0.5)
# ensure peer is registered in mesh
assert host_0.get_id() in gsub1.mesh[topic]
# host_1 unsubscribes from the topic
await gsub1.leave(topic)
await trio.sleep(0.5)
assert topic not in gsub1.mesh
# host_1 resubscribes to the topic
await gsub1.join(topic)
await trio.sleep(0.5)
assert topic in gsub1.mesh
# try to graft again immediately (should be rejected due to backoff)
await gsub0.emit_graft(topic, host_1.get_id())
await trio.sleep(0.5)
assert host_0.get_id() not in gsub1.mesh[topic], (
"peer should be backoffed and not re-added"
)
# try to graft again (should succeed after backoff)
await trio.sleep(1)
await gsub0.emit_graft(topic, host_1.get_id())
await trio.sleep(1)
assert host_0.get_id() in gsub1.mesh[topic], (
"peer should be able to rejoin after backoff"
)
@pytest.mark.trio
async def test_peer_exchange():
async with PubsubFactory.create_batch_with_gossipsub(
3,
heartbeat_interval=0.5,
do_px=True,
px_peers_count=1,
) as pubsubs:
gsub0 = pubsubs[0].router
gsub1 = pubsubs[1].router
gsub2 = pubsubs[2].router
assert isinstance(gsub0, GossipSub)
assert isinstance(gsub1, GossipSub)
assert isinstance(gsub2, GossipSub)
host_0 = pubsubs[0].host
host_1 = pubsubs[1].host
host_2 = pubsubs[2].host
topic = "test_peer_exchange"
# connect hosts
await connect(host_1, host_0)
await connect(host_1, host_2)
await trio.sleep(0.5)
# all join the topic and 0 <-> 1 and 1 <-> 2 graft
await pubsubs[1].subscribe(topic)
await pubsubs[0].subscribe(topic)
await pubsubs[2].subscribe(topic)
await gsub1.emit_graft(topic, host_0.get_id())
await gsub1.emit_graft(topic, host_2.get_id())
await gsub0.emit_graft(topic, host_1.get_id())
await gsub2.emit_graft(topic, host_1.get_id())
await trio.sleep(1)
# ensure peer is registered in mesh
assert host_0.get_id() in gsub1.mesh[topic]
assert host_2.get_id() in gsub1.mesh[topic]
assert host_2.get_id() not in gsub0.mesh[topic]
# host_1 unsubscribes from the topic
await gsub1.leave(topic)
await trio.sleep(1) # Wait for heartbeat to update mesh
assert topic not in gsub1.mesh
# Wait for gsub0 to graft host_2 into its mesh via PX
await trio.sleep(1)
assert host_2.get_id() in gsub0.mesh[topic]
@pytest.mark.trio
async def test_topics_are_isolated():
async with PubsubFactory.create_batch_with_gossipsub(
2, heartbeat_interval=0.5, prune_back_off=2
) as pubsubs:
gsub0 = pubsubs[0].router
gsub1 = pubsubs[1].router
assert isinstance(gsub0, GossipSub)
assert isinstance(gsub1, GossipSub)
host_0 = pubsubs[0].host
host_1 = pubsubs[1].host
topic1 = "test_prune_backoff"
topic2 = "test_prune_backoff2"
# connect hosts
await connect(host_0, host_1)
await trio.sleep(0.5)
# both peers join both the topics
await gsub0.join(topic1)
await gsub1.join(topic1)
await gsub0.join(topic2)
await gsub1.join(topic2)
await gsub0.emit_graft(topic1, host_1.get_id())
await trio.sleep(0.5)
# ensure topic1 for peer is registered in mesh
assert host_0.get_id() in gsub1.mesh[topic1]
# prune topic1 for host_1 from gsub0's mesh
await gsub0.emit_prune(topic1, host_1.get_id(), False, False)
await trio.sleep(0.5)
# topic1 for host_0 should not be in gsub1's mesh
assert host_0.get_id() not in gsub1.mesh[topic1]
# try to regraft topic1 and graft new topic2
await gsub0.emit_graft(topic1, host_1.get_id())
await gsub0.emit_graft(topic2, host_1.get_id())
await trio.sleep(0.5)
assert host_0.get_id() not in gsub1.mesh[topic1], (
"peer should be backoffed and not re-added"
)
assert host_0.get_id() in gsub1.mesh[topic2], (
"peer should be able to join a different topic"
)
@pytest.mark.trio
async def test_stress_churn():
NUM_PEERS = 5
CHURN_CYCLES = 30
TOPIC = "stress_churn_topic"
PRUNE_BACKOFF = 1
HEARTBEAT_INTERVAL = 0.2
async with PubsubFactory.create_batch_with_gossipsub(
NUM_PEERS,
heartbeat_interval=HEARTBEAT_INTERVAL,
prune_back_off=PRUNE_BACKOFF,
) as pubsubs:
routers: list[GossipSub] = []
for ps in pubsubs:
assert isinstance(ps.router, GossipSub)
routers.append(ps.router)
hosts = [ps.host for ps in pubsubs]
# fully connect all peers
for i in range(NUM_PEERS):
for j in range(i + 1, NUM_PEERS):
await connect(hosts[i], hosts[j])
await trio.sleep(1)
# all peers join the topic
for router in routers:
await router.join(TOPIC)
await trio.sleep(1)
# rapid join/prune cycles
for cycle in range(CHURN_CYCLES):
for i, router in enumerate(routers):
# prune all other peers from this router's mesh
for j, peer_host in enumerate(hosts):
if i != j:
await router.emit_prune(TOPIC, peer_host.get_id(), False, False)
await trio.sleep(0.1)
for i, router in enumerate(routers):
# graft all other peers back
for j, peer_host in enumerate(hosts):
if i != j:
await router.emit_graft(TOPIC, peer_host.get_id())
await trio.sleep(0.1)
# wait for backoff entries to expire and cleanup
await trio.sleep(PRUNE_BACKOFF * 2)
# check that the backoff table is not unbounded
for router in routers:
# backoff is a dict: topic -> peer -> expiry
backoff = getattr(router, "back_off", None)
assert backoff is not None, "router missing backoff table"
# only a small number of entries should remain (ideally 0)
total_entries = sum(len(peers) for peers in backoff.values())
assert total_entries < NUM_PEERS * 2, (
f"backoff table grew too large: {total_entries} entries"
)

View File

View File

View File

@ -0,0 +1,91 @@
"""
Unit tests for mDNS broadcaster component.
"""
from zeroconf import Zeroconf
from libp2p.discovery.mdns.broadcaster import PeerBroadcaster
from libp2p.peer.id import ID
class TestPeerBroadcaster:
"""Unit tests for PeerBroadcaster."""
def test_broadcaster_initialization(self):
"""Test that broadcaster initializes correctly."""
zeroconf = Zeroconf()
service_type = "_p2p._udp.local."
service_name = "test-peer._p2p._udp.local."
peer_id = (
"QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN" # String, not ID object
)
port = 8000
broadcaster = PeerBroadcaster(
zeroconf=zeroconf,
service_type=service_type,
service_name=service_name,
peer_id=peer_id,
port=port,
)
assert broadcaster.zeroconf == zeroconf
assert broadcaster.service_type == service_type
assert broadcaster.service_name == service_name
assert broadcaster.peer_id == peer_id
assert broadcaster.port == port
# Clean up
zeroconf.close()
def test_broadcaster_service_creation(self):
"""Test that broadcaster creates valid service info."""
zeroconf = Zeroconf()
service_type = "_p2p._udp.local."
service_name = "test-peer2._p2p._udp.local."
peer_id_obj = ID.from_base58("QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN")
peer_id = str(peer_id_obj) # Convert to string
port = 8000
broadcaster = PeerBroadcaster(
zeroconf=zeroconf,
service_type=service_type,
service_name=service_name,
peer_id=peer_id,
port=port,
)
# Verify service was created and registered
service_info = broadcaster.service_info
assert service_info is not None
assert service_info.type == service_type
assert service_info.name == service_name
assert service_info.port == port
assert b"id" in service_info.properties
assert service_info.properties[b"id"] == peer_id.encode()
# Clean up
zeroconf.close()
def test_broadcaster_start_stop(self):
"""Test that broadcaster can start and stop correctly."""
zeroconf = Zeroconf()
service_type = "_p2p._udp.local."
service_name = "test-start-stop._p2p._udp.local."
peer_id_obj = ID.from_base58("QmYyQSo1c1Ym7orWxLYvCrM2EmxFTANf8wXmmE7DWjhx5N")
peer_id = str(peer_id_obj) # Convert to string
port = 8001
broadcaster = PeerBroadcaster(
zeroconf=zeroconf,
service_type=service_type,
service_name=service_name,
peer_id=peer_id,
port=port,
)
# Service should be registered
assert broadcaster.service_info is not None
# Clean up
zeroconf.close()

View File

@ -0,0 +1,114 @@
"""
Unit tests for mDNS listener component.
"""
import socket
from zeroconf import ServiceInfo, Zeroconf
from libp2p.abc import Multiaddr
from libp2p.discovery.mdns.listener import PeerListener
from libp2p.peer.id import ID
from libp2p.peer.peerstore import PeerStore
class TestPeerListener:
"""Unit tests for PeerListener."""
def test_listener_initialization(self):
"""Test that listener initializes correctly."""
peerstore = PeerStore()
zeroconf = Zeroconf()
service_type = "_p2p._udp.local."
service_name = "local-peer._p2p._udp.local."
listener = PeerListener(
peerstore=peerstore,
zeroconf=zeroconf,
service_type=service_type,
service_name=service_name,
)
assert listener.peerstore == peerstore
assert listener.zeroconf == zeroconf
assert listener.service_type == service_type
assert listener.service_name == service_name
assert listener.discovered_services == {}
# Clean up
listener.stop()
zeroconf.close()
def test_listener_extract_peer_info_success(self):
"""Test successful PeerInfo extraction from ServiceInfo."""
peerstore = PeerStore()
zeroconf = Zeroconf()
listener = PeerListener(
peerstore=peerstore,
zeroconf=zeroconf,
service_type="_p2p._udp.local.",
service_name="local._p2p._udp.local.",
)
# Create sample service info
sample_peer_id = ID.from_base58(
"QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN"
)
hostname = socket.gethostname()
local_ip = "192.168.1.100"
sample_service_info = ServiceInfo(
type_="_p2p._udp.local.",
name="test-peer._p2p._udp.local.",
port=8000,
properties={b"id": str(sample_peer_id).encode()},
server=f"{hostname}.local.",
addresses=[socket.inet_aton(local_ip)],
)
peer_info = listener._extract_peer_info(sample_service_info)
assert peer_info is not None
assert isinstance(peer_info.peer_id, ID)
assert len(peer_info.addrs) > 0
assert all(isinstance(addr, Multiaddr) for addr in peer_info.addrs)
# Check that protocol is TCP since we always use TCP
assert "/tcp/" in str(peer_info.addrs[0])
# Clean up
listener.stop()
zeroconf.close()
def test_listener_extract_peer_info_invalid_id(self):
"""Test PeerInfo extraction fails with invalid peer ID."""
peerstore = PeerStore()
zeroconf = Zeroconf()
listener = PeerListener(
peerstore=peerstore,
zeroconf=zeroconf,
service_type="_p2p._udp.local.",
service_name="local._p2p._udp.local.",
)
# Create service info with invalid peer ID
hostname = socket.gethostname()
local_ip = "192.168.1.100"
service_info = ServiceInfo(
type_="_p2p._udp.local.",
name="invalid-peer._p2p._udp.local.",
port=8000,
properties={b"id": b"invalid_peer_id_format"},
server=f"{hostname}.local.",
addresses=[socket.inet_aton(local_ip)],
)
peer_info = listener._extract_peer_info(service_info)
assert peer_info is None
# Clean up
listener.stop()
zeroconf.close()

View File

@ -0,0 +1,121 @@
"""
Comprehensive integration tests for mDNS discovery functionality.
"""
import socket
from zeroconf import Zeroconf
from libp2p.discovery.mdns.broadcaster import PeerBroadcaster
from libp2p.discovery.mdns.listener import PeerListener
from libp2p.peer.id import ID
from libp2p.peer.peerstore import PeerStore
class TestMDNSDiscovery:
"""Comprehensive integration tests for mDNS peer discovery."""
def test_one_host_finds_another(self):
"""Test that one host can find another host using mDNS."""
# Create two separate Zeroconf instances to simulate different hosts
host1_zeroconf = Zeroconf()
host2_zeroconf = Zeroconf()
try:
# Host 1: Set up as broadcaster (the host to be discovered)
host1_peer_id_obj = ID.from_base58(
"QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN"
)
host1_peer_id = str(host1_peer_id_obj) # Convert to string
host1_broadcaster = PeerBroadcaster(
zeroconf=host1_zeroconf,
service_type="_p2p._udp.local.",
service_name="host1._p2p._udp.local.",
peer_id=host1_peer_id,
port=8000,
)
# Host 2: Set up as listener (the host that discovers others)
host2_peerstore = PeerStore()
host2_listener = PeerListener(
peerstore=host2_peerstore,
zeroconf=host2_zeroconf,
service_type="_p2p._udp.local.",
service_name="host2._p2p._udp.local.",
)
# Host 1 registers its service for discovery
host1_broadcaster.register()
# Verify that host2 discovered host1
assert len(host2_listener.discovered_services) > 0
assert "host1._p2p._udp.local." in host2_listener.discovered_services
# Verify that host1's peer info was added to host2's peerstore
discovered_peer_id = host2_listener.discovered_services[
"host1._p2p._udp.local."
]
assert str(discovered_peer_id) == host1_peer_id
# Verify addresses were added to peerstore
try:
addrs = host2_peerstore.addrs(discovered_peer_id)
assert len(addrs) > 0
# Should be TCP since we always use TCP protocol
assert "/tcp/8000" in str(addrs[0])
except Exception:
# If no addresses found, the discovery didn't work properly
assert False, "Host1 addresses should be in Host2's peerstore"
# Clean up
host1_broadcaster.unregister()
host2_listener.stop()
finally:
host1_zeroconf.close()
host2_zeroconf.close()
def test_service_info_extraction(self):
"""Test service info extraction functionality."""
peerstore = PeerStore()
zeroconf = Zeroconf()
try:
listener = PeerListener(
peerstore=peerstore,
zeroconf=zeroconf,
service_type="_p2p._udp.local.",
service_name="test-listener._p2p._udp.local.",
)
# Create a test service info
test_peer_id = ID.from_base58(
"QmYyQSo1c1Ym7orWxLYvCrM2EmxFTANf8wXmmE7DWjhx5N"
)
hostname = socket.gethostname()
from zeroconf import ServiceInfo
service_info = ServiceInfo(
type_="_p2p._udp.local.",
name="test-service._p2p._udp.local.",
port=8001,
properties={b"id": str(test_peer_id).encode()},
server=f"{hostname}.local.",
addresses=[socket.inet_aton("192.168.1.100")],
)
# Test extraction
peer_info = listener._extract_peer_info(service_info)
assert peer_info is not None
assert peer_info.peer_id == test_peer_id
assert len(peer_info.addrs) == 1
assert "/tcp/8001" in str(peer_info.addrs[0])
print("✅ Service info extraction test successful!")
print(f" Extracted peer ID: {peer_info.peer_id}")
print(f" Extracted addresses: {[str(addr) for addr in peer_info.addrs]}")
finally:
zeroconf.close()

View File

@ -0,0 +1,39 @@
"""
Basic unit tests for mDNS utils module.
"""
import string
from libp2p.discovery.mdns.utils import stringGen
class TestStringGen:
"""Unit tests for stringGen function."""
def test_stringgen_default_length(self):
"""Test stringGen with default length (63)."""
result = stringGen()
assert isinstance(result, str)
assert len(result) == 63
# Check that all characters are from the expected charset
charset = string.ascii_lowercase + string.digits
for char in result:
assert char in charset
def test_stringgen_custom_length(self):
"""Test stringGen with custom lengths."""
# Test various lengths
test_lengths = [1, 5, 10, 20, 50, 100]
for length in test_lengths:
result = stringGen(length)
assert isinstance(result, str)
assert len(result) == length
# Check that all characters are from the expected charset
charset = string.ascii_lowercase + string.digits
for char in result:
assert char in charset

View File

@ -443,6 +443,10 @@ class GossipsubFactory(factory.Factory):
heartbeat_interval = GOSSIPSUB_PARAMS.heartbeat_interval
direct_connect_initial_delay = GOSSIPSUB_PARAMS.direct_connect_initial_delay
direct_connect_interval = GOSSIPSUB_PARAMS.direct_connect_interval
do_px = GOSSIPSUB_PARAMS.do_px
px_peers_count = GOSSIPSUB_PARAMS.px_peers_count
prune_back_off = GOSSIPSUB_PARAMS.prune_back_off
unsubscribe_back_off = GOSSIPSUB_PARAMS.unsubscribe_back_off
class PubsubFactory(factory.Factory):
@ -568,6 +572,10 @@ class PubsubFactory(factory.Factory):
heartbeat_initial_delay: float = GOSSIPSUB_PARAMS.heartbeat_initial_delay,
direct_connect_initial_delay: float = GOSSIPSUB_PARAMS.direct_connect_initial_delay, # noqa: E501
direct_connect_interval: int = GOSSIPSUB_PARAMS.direct_connect_interval,
do_px: bool = GOSSIPSUB_PARAMS.do_px,
px_peers_count: int = GOSSIPSUB_PARAMS.px_peers_count,
prune_back_off: int = GOSSIPSUB_PARAMS.prune_back_off,
unsubscribe_back_off: int = GOSSIPSUB_PARAMS.unsubscribe_back_off,
security_protocol: TProtocol | None = None,
muxer_opt: TMuxerOptions | None = None,
msg_id_constructor: None
@ -588,6 +596,10 @@ class PubsubFactory(factory.Factory):
heartbeat_interval=heartbeat_interval,
direct_connect_initial_delay=direct_connect_initial_delay,
direct_connect_interval=direct_connect_interval,
do_px=do_px,
px_peers_count=px_peers_count,
prune_back_off=prune_back_off,
unsubscribe_back_off=unsubscribe_back_off,
)
else:
gossipsubs = GossipsubFactory.create_batch(
@ -602,6 +614,10 @@ class PubsubFactory(factory.Factory):
heartbeat_initial_delay=heartbeat_initial_delay,
direct_connect_initial_delay=direct_connect_initial_delay,
direct_connect_interval=direct_connect_interval,
do_px=do_px,
px_peers_count=px_peers_count,
prune_back_off=prune_back_off,
unsubscribe_back_off=unsubscribe_back_off,
)
async with cls._create_batch_with_router(

View File

@ -24,16 +24,22 @@ def make_pubsub_msg(
)
# TODO: Implement sparse connect
async def dense_connect(hosts: Sequence[IHost]) -> None:
await connect_some(hosts, 10)
# FIXME: `degree` is not used at all
async def connect_some(hosts: Sequence[IHost], degree: int) -> None:
"""
Connect each host to up to 'degree' number of other hosts.
Creates a sparse network topology where each node has limited connections.
"""
for i, host in enumerate(hosts):
for host2 in hosts[i + 1 :]:
await connect(host, host2)
connections_made = 0
for j in range(i + 1, len(hosts)):
if connections_made >= degree:
break
await connect(host, hosts[j])
connections_made += 1
async def one_to_all_connect(hosts: Sequence[IHost], central_host_index: int) -> None: