Merge branch 'main' into main

This commit is contained in:
Manu Sheel Gupta
2025-06-30 07:43:32 -07:00
committed by GitHub
44 changed files with 1685 additions and 109 deletions

View File

@ -32,6 +32,9 @@ from libp2p.custom_types import (
TProtocol,
TSecurityOptions,
)
from libp2p.discovery.mdns.mdns import (
MDNSDiscovery,
)
from libp2p.host.basic_host import (
BasicHost,
)
@ -245,6 +248,7 @@ def new_host(
disc_opt: IPeerRouting | None = None,
muxer_preference: Literal["YAMUX", "MPLEX"] | None = None,
listen_addrs: Sequence[multiaddr.Multiaddr] | None = None,
enable_mDNS: bool = False,
) -> IHost:
"""
Create a new libp2p host based on the given parameters.
@ -256,6 +260,7 @@ def new_host(
:param disc_opt: optional discovery
:param muxer_preference: optional explicit muxer preference
:param listen_addrs: optional list of multiaddrs to listen on
:param enable_mDNS: whether to enable mDNS discovery
:return: return a host instance
"""
swarm = new_swarm(
@ -268,8 +273,7 @@ def new_host(
)
if disc_opt is not None:
return RoutedHost(swarm, disc_opt)
return BasicHost(swarm)
return RoutedHost(swarm, disc_opt, enable_mDNS)
return BasicHost(swarm, enable_mDNS)
__version__ = __version("libp2p")

View File

View File

View File

@ -0,0 +1,26 @@
from collections.abc import (
Callable,
)
from libp2p.abc import (
PeerInfo,
)
TTL: int = 60 * 60 # Time-to-live for discovered peers in seconds
class PeerDiscovery:
def __init__(self) -> None:
self._peer_discovered_handlers: list[Callable[[PeerInfo], None]] = []
def register_peer_discovered_handler(
self, handler: Callable[[PeerInfo], None]
) -> None:
self._peer_discovered_handlers.append(handler)
def emit_peer_discovered(self, peer_info: PeerInfo) -> None:
for handler in self._peer_discovered_handlers:
handler(peer_info)
peerDiscovery = PeerDiscovery()

View File

View File

@ -0,0 +1,91 @@
import logging
import socket
from zeroconf import (
EventLoopBlocked,
ServiceInfo,
Zeroconf,
)
logger = logging.getLogger("libp2p.discovery.mdns.broadcaster")
class PeerBroadcaster:
"""
Broadcasts this peer's presence on the local network using mDNS/zeroconf.
Registers a service with the peer's ID in the TXT record as per libp2p spec.
"""
def __init__(
self,
zeroconf: Zeroconf,
service_type: str,
service_name: str,
peer_id: str,
port: int,
):
self.zeroconf = zeroconf
self.service_type = service_type
self.peer_id = peer_id
self.port = port
self.service_name = service_name
# Get the local IP address
local_ip = self._get_local_ip()
hostname = socket.gethostname()
self.service_info = ServiceInfo(
type_=self.service_type,
name=self.service_name,
port=self.port,
properties={b"id": self.peer_id.encode()},
server=f"{hostname}.local.",
addresses=[socket.inet_aton(local_ip)],
)
def _get_local_ip(self) -> str:
"""Get the local IP address of this machine"""
try:
# Connect to a remote address to determine the local IP
# This doesn't actually send data
with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as s:
s.connect(("8.8.8.8", 80))
local_ip = s.getsockname()[0]
return local_ip
except Exception:
# Fallback to localhost if we can't determine the IP
return "127.0.0.1"
def register(self) -> None:
"""Register the peer's mDNS service on the network."""
try:
self.zeroconf.register_service(self.service_info)
logger.debug(f"mDNS service registered: {self.service_name}")
except EventLoopBlocked as e:
logger.warning(
"EventLoopBlocked while registering mDNS '%s': %s", self.service_name, e
)
except Exception as e:
logger.error(
"Unexpected error during mDNS registration for '%s': %r",
self.service_name,
e,
)
def unregister(self) -> None:
"""Unregister the peer's mDNS service from the network."""
try:
self.zeroconf.unregister_service(self.service_info)
logger.debug(f"mDNS service unregistered: {self.service_name}")
except EventLoopBlocked as e:
logger.warning(
"EventLoopBlocked while unregistering mDNS '%s': %s",
self.service_name,
e,
)
except Exception as e:
logger.error(
"Unexpected error during mDNS unregistration for '%s': %r",
self.service_name,
e,
)

View File

@ -0,0 +1,83 @@
import logging
import socket
from zeroconf import (
ServiceBrowser,
ServiceInfo,
ServiceListener,
Zeroconf,
)
from libp2p.abc import IPeerStore, Multiaddr
from libp2p.discovery.events.peerDiscovery import peerDiscovery
from libp2p.peer.id import ID
from libp2p.peer.peerinfo import PeerInfo
logger = logging.getLogger("libp2p.discovery.mdns.listener")
class PeerListener(ServiceListener):
"""mDNS listener — now a true ServiceListener subclass."""
def __init__(
self,
peerstore: IPeerStore,
zeroconf: Zeroconf,
service_type: str,
service_name: str,
) -> None:
self.peerstore = peerstore
self.zeroconf = zeroconf
self.service_type = service_type
self.service_name = service_name
self.discovered_services: dict[str, ID] = {}
self.browser = ServiceBrowser(self.zeroconf, self.service_type, listener=self)
def add_service(self, zc: Zeroconf, type_: str, name: str) -> None:
if name == self.service_name:
return
logger.debug(f"Adding service: {name}")
info = zc.get_service_info(type_, name, timeout=5000)
if not info:
return
peer_info = self._extract_peer_info(info)
if peer_info:
self.discovered_services[name] = peer_info.peer_id
self.peerstore.add_addrs(peer_info.peer_id, peer_info.addrs, 10)
peerDiscovery.emit_peer_discovered(peer_info)
logger.debug(f"Discovered Peer: {peer_info.peer_id}")
def remove_service(self, zc: Zeroconf, type_: str, name: str) -> None:
if name == self.service_name:
return
logger.debug(f"Removing service: {name}")
peer_id = self.discovered_services.pop(name)
self.peerstore.clear_addrs(peer_id)
logger.debug(f"Removed Peer: {peer_id}")
def update_service(self, zc: Zeroconf, type_: str, name: str) -> None:
info = zc.get_service_info(type_, name, timeout=5000)
if not info:
return
peer_info = self._extract_peer_info(info)
if peer_info:
self.peerstore.clear_addrs(peer_info.peer_id)
self.peerstore.add_addrs(peer_info.peer_id, peer_info.addrs, 10)
logger.debug(f"Updated Peer {peer_info.peer_id}")
def _extract_peer_info(self, info: ServiceInfo) -> PeerInfo | None:
try:
addrs = [
Multiaddr(f"/ip4/{socket.inet_ntoa(addr)}/tcp/{info.port}")
for addr in info.addresses
]
pid_bytes = info.properties.get(b"id")
if not pid_bytes:
return None
pid = ID.from_base58(pid_bytes.decode())
return PeerInfo(peer_id=pid, addrs=addrs)
except Exception:
return None
def stop(self) -> None:
self.browser.cancel()

View File

@ -0,0 +1,73 @@
"""
mDNS-based peer discovery for py-libp2p.
Conforms to https://github.com/libp2p/specs/blob/master/discovery/mdns.md
Uses zeroconf for mDNS broadcast/listen. Async operations use trio.
"""
import logging
from zeroconf import (
Zeroconf,
)
from libp2p.abc import (
INetworkService,
)
from .broadcaster import (
PeerBroadcaster,
)
from .listener import (
PeerListener,
)
from .utils import (
stringGen,
)
logger = logging.getLogger("libp2p.discovery.mdns")
SERVICE_TYPE = "_p2p._udp.local."
MCAST_PORT = 5353
MCAST_ADDR = "224.0.0.251"
class MDNSDiscovery:
"""
mDNS-based peer discovery for py-libp2p, using zeroconf.
Conforms to the libp2p mDNS discovery spec.
"""
def __init__(self, swarm: INetworkService, port: int = 8000):
self.peer_id = str(swarm.get_peer_id())
self.port = port
self.zeroconf = Zeroconf()
self.serviceName = f"{stringGen()}.{SERVICE_TYPE}"
self.peerstore = swarm.peerstore
self.swarm = swarm
self.broadcaster = PeerBroadcaster(
zeroconf=self.zeroconf,
service_type=SERVICE_TYPE,
service_name=self.serviceName,
peer_id=self.peer_id,
port=self.port,
)
self.listener = PeerListener(
zeroconf=self.zeroconf,
peerstore=self.peerstore,
service_type=SERVICE_TYPE,
service_name=self.serviceName,
)
def start(self) -> None:
"""Register this peer and start listening for others."""
logger.debug(
f"Starting mDNS discovery for peer {self.peer_id} on port {self.port}"
)
self.broadcaster.register()
# Listener is started in constructor
def stop(self) -> None:
"""Unregister this peer and clean up zeroconf resources."""
logger.debug("Stopping mDNS discovery")
self.broadcaster.unregister()
self.zeroconf.close()

View File

@ -0,0 +1,11 @@
import random
import string
def stringGen(len: int = 63) -> str:
"""Generate a random string of lowercase letters and digits."""
charset = string.ascii_lowercase + string.digits
result = []
for _ in range(len):
result.append(random.choice(charset))
return "".join(result)

View File

@ -29,6 +29,7 @@ from libp2p.custom_types import (
StreamHandlerFn,
TProtocol,
)
from libp2p.discovery.mdns.mdns import MDNSDiscovery
from libp2p.host.defaults import (
get_default_protocols,
)
@ -89,6 +90,7 @@ class BasicHost(IHost):
def __init__(
self,
network: INetworkService,
enable_mDNS: bool = False,
default_protocols: Optional["OrderedDict[TProtocol, StreamHandlerFn]"] = None,
) -> None:
self._network = network
@ -98,6 +100,8 @@ class BasicHost(IHost):
default_protocols = default_protocols or get_default_protocols(self)
self.multiselect = Multiselect(dict(default_protocols.items()))
self.multiselect_client = MultiselectClient()
if enable_mDNS:
self.mDNS = MDNSDiscovery(network)
def get_id(self) -> ID:
"""
@ -162,7 +166,14 @@ class BasicHost(IHost):
network = self.get_network()
async with background_trio_service(network):
await network.listen(*listen_addrs)
yield
if hasattr(self, "mDNS") and self.mDNS is not None:
logger.debug("Starting mDNS Discovery")
self.mDNS.start()
try:
yield
finally:
if hasattr(self, "mDNS") and self.mDNS is not None:
self.mDNS.stop()
return _run()

View File

@ -18,8 +18,10 @@ from libp2p.peer.peerinfo import (
class RoutedHost(BasicHost):
_router: IPeerRouting
def __init__(self, network: INetworkService, router: IPeerRouting):
super().__init__(network)
def __init__(
self, network: INetworkService, router: IPeerRouting, enable_mDNS: bool = False
):
super().__init__(network, enable_mDNS)
self._router = router
async def connect(self, peer_info: PeerInfo) -> None:

View File

@ -66,5 +66,23 @@ def info_from_p2p_addr(addr: multiaddr.Multiaddr) -> PeerInfo:
return PeerInfo(peer_id, [addr])
def peer_info_to_bytes(peer_info: PeerInfo) -> bytes:
lines = [str(peer_info.peer_id)] + [str(addr) for addr in peer_info.addrs]
return "\n".join(lines).encode("utf-8")
def peer_info_from_bytes(data: bytes) -> PeerInfo:
try:
lines = data.decode("utf-8").splitlines()
if not lines:
raise InvalidAddrError("no data to decode PeerInfo")
peer_id = ID.from_base58(lines[0])
addrs = [multiaddr.Multiaddr(addr_str) for addr_str in lines[1:]]
return PeerInfo(peer_id, addrs)
except Exception as e:
raise InvalidAddrError(f"failed to decode PeerInfo: {e}")
class InvalidAddrError(ValueError):
pass

View File

@ -12,15 +12,9 @@ from libp2p.abc import (
from libp2p.custom_types import (
TProtocol,
)
from libp2p.network.stream.exceptions import (
StreamClosed,
)
from libp2p.peer.id import (
ID,
)
from libp2p.utils import (
encode_varint_prefixed,
)
from .exceptions import (
PubsubRouterError,
@ -120,13 +114,7 @@ class FloodSub(IPubsubRouter):
if peer_id not in pubsub.peers:
continue
stream = pubsub.peers[peer_id]
# FIXME: We should add a `WriteMsg` similar to write delimited messages.
# Ref: https://github.com/libp2p/go-libp2p-pubsub/blob/master/comm.go#L107
try:
await stream.write(encode_varint_prefixed(rpc_msg.SerializeToString()))
except StreamClosed:
logger.debug("Fail to publish message to %s: stream closed", peer_id)
pubsub._handle_dead_peer(peer_id)
await pubsub.write_msg(stream, rpc_msg)
async def join(self, topic: str) -> None:
"""

View File

@ -24,14 +24,13 @@ from libp2p.abc import (
from libp2p.custom_types import (
TProtocol,
)
from libp2p.network.stream.exceptions import (
StreamClosed,
)
from libp2p.peer.id import (
ID,
)
from libp2p.peer.peerinfo import (
PeerInfo,
peer_info_from_bytes,
peer_info_to_bytes,
)
from libp2p.peer.peerstore import (
PERMANENT_ADDR_TTL,
@ -42,9 +41,6 @@ from libp2p.pubsub import (
from libp2p.tools.async_service import (
Service,
)
from libp2p.utils import (
encode_varint_prefixed,
)
from .exceptions import (
NoPubsubAttached,
@ -92,6 +88,12 @@ class GossipSub(IPubsubRouter, Service):
direct_connect_initial_delay: float
direct_connect_interval: int
do_px: bool
px_peers_count: int
back_off: dict[str, dict[ID, int]]
prune_back_off: int
unsubscribe_back_off: int
def __init__(
self,
protocols: Sequence[TProtocol],
@ -106,6 +108,10 @@ class GossipSub(IPubsubRouter, Service):
heartbeat_interval: int = 120,
direct_connect_initial_delay: float = 0.1,
direct_connect_interval: int = 300,
do_px: bool = False,
px_peers_count: int = 16,
prune_back_off: int = 60,
unsubscribe_back_off: int = 10,
) -> None:
self.protocols = list(protocols)
self.pubsub = None
@ -140,6 +146,12 @@ class GossipSub(IPubsubRouter, Service):
self.direct_connect_initial_delay = direct_connect_initial_delay
self.time_since_last_publish = {}
self.do_px = do_px
self.px_peers_count = px_peers_count
self.back_off = dict()
self.prune_back_off = prune_back_off
self.unsubscribe_back_off = unsubscribe_back_off
async def run(self) -> None:
self.manager.run_daemon_task(self.heartbeat)
if len(self.direct_peers) > 0:
@ -249,14 +261,10 @@ class GossipSub(IPubsubRouter, Service):
if peer_id not in self.pubsub.peers:
continue
stream = self.pubsub.peers[peer_id]
# FIXME: We should add a `WriteMsg` similar to write delimited messages.
# Ref: https://github.com/libp2p/go-libp2p-pubsub/blob/master/comm.go#L107
# TODO: Go use `sendRPC`, which possibly piggybacks gossip/control messages.
try:
await stream.write(encode_varint_prefixed(rpc_msg.SerializeToString()))
except StreamClosed:
logger.debug("Fail to publish message to %s: stream closed", peer_id)
self.pubsub._handle_dead_peer(peer_id)
await self.pubsub.write_msg(stream, rpc_msg)
for topic in pubsub_msg.topicIDs:
self.time_since_last_publish[topic] = int(time.time())
@ -334,15 +342,22 @@ class GossipSub(IPubsubRouter, Service):
self.mesh[topic] = set()
topic_in_fanout: bool = topic in self.fanout
fanout_peers: set[ID] = self.fanout[topic] if topic_in_fanout else set()
fanout_peers: set[ID] = set()
if topic_in_fanout:
for peer in self.fanout[topic]:
if self._check_back_off(peer, topic):
continue
fanout_peers.add(peer)
fanout_size = len(fanout_peers)
if not topic_in_fanout or (topic_in_fanout and fanout_size < self.degree):
# There are less than D peers (let this number be x)
# in the fanout for a topic (or the topic is not in the fanout).
# Selects the remaining number of peers (D-x) from peers.gossipsub[topic].
if topic in self.pubsub.peer_topics:
if self.pubsub is not None and topic in self.pubsub.peer_topics:
selected_peers = self._get_in_topic_gossipsub_peers_from_minus(
topic, self.degree - fanout_size, fanout_peers
topic, self.degree - fanout_size, fanout_peers, True
)
# Combine fanout peers with selected peers
fanout_peers.update(selected_peers)
@ -369,7 +384,8 @@ class GossipSub(IPubsubRouter, Service):
return
# Notify the peers in mesh[topic] with a PRUNE(topic) message
for peer in self.mesh[topic]:
await self.emit_prune(topic, peer)
await self.emit_prune(topic, peer, self.do_px, True)
self._add_back_off(peer, topic, True)
# Forget mesh[topic]
self.mesh.pop(topic, None)
@ -459,8 +475,8 @@ class GossipSub(IPubsubRouter, Service):
self.fanout_heartbeat()
# Get the peers to send IHAVE to
peers_to_gossip = self.gossip_heartbeat()
# Pack GRAFT, PRUNE and IHAVE for the same peer into one control message and
# send it
# Pack(piggyback) GRAFT, PRUNE and IHAVE for the same peer into
# one control message and send it
await self._emit_control_msgs(
peers_to_graft, peers_to_prune, peers_to_gossip
)
@ -505,7 +521,7 @@ class GossipSub(IPubsubRouter, Service):
if num_mesh_peers_in_topic < self.degree_low:
# Select D - |mesh[topic]| peers from peers.gossipsub[topic] - mesh[topic] # noqa: E501
selected_peers = self._get_in_topic_gossipsub_peers_from_minus(
topic, self.degree - num_mesh_peers_in_topic, self.mesh[topic]
topic, self.degree - num_mesh_peers_in_topic, self.mesh[topic], True
)
for peer in selected_peers:
@ -568,9 +584,7 @@ class GossipSub(IPubsubRouter, Service):
if len(in_topic_peers) < self.degree:
# Select additional peers from peers.gossipsub[topic]
selected_peers = self._get_in_topic_gossipsub_peers_from_minus(
topic,
self.degree - len(in_topic_peers),
in_topic_peers,
topic, self.degree - len(in_topic_peers), in_topic_peers, True
)
# Add the selected peers
in_topic_peers.update(selected_peers)
@ -581,7 +595,7 @@ class GossipSub(IPubsubRouter, Service):
if msg_ids:
# Select D peers from peers.gossipsub[topic] excluding current peers
peers_to_emit_ihave_to = self._get_in_topic_gossipsub_peers_from_minus(
topic, self.degree, current_peers
topic, self.degree, current_peers, True
)
msg_id_strs = [str(msg_id) for msg_id in msg_ids]
for peer in peers_to_emit_ihave_to:
@ -655,7 +669,11 @@ class GossipSub(IPubsubRouter, Service):
return selection
def _get_in_topic_gossipsub_peers_from_minus(
self, topic: str, num_to_select: int, minus: Iterable[ID]
self,
topic: str,
num_to_select: int,
minus: Iterable[ID],
backoff_check: bool = False,
) -> list[ID]:
if self.pubsub is None:
raise NoPubsubAttached
@ -664,8 +682,88 @@ class GossipSub(IPubsubRouter, Service):
for peer_id in self.pubsub.peer_topics[topic]
if self.peer_protocol[peer_id] == PROTOCOL_ID
}
if backoff_check:
# filter out peers that are in back off for this topic
gossipsub_peers_in_topic = {
peer_id
for peer_id in gossipsub_peers_in_topic
if self._check_back_off(peer_id, topic) is False
}
return self.select_from_minus(num_to_select, gossipsub_peers_in_topic, minus)
def _add_back_off(
self, peer: ID, topic: str, is_unsubscribe: bool, backoff_duration: int = 0
) -> None:
"""
Add back off for a peer in a topic.
:param peer: peer to add back off for
:param topic: topic to add back off for
:param is_unsubscribe: whether this is an unsubscribe operation
:param backoff_duration: duration of back off in seconds, if 0, use default
"""
if topic not in self.back_off:
self.back_off[topic] = dict()
backoff_till = int(time.time())
if backoff_duration > 0:
backoff_till += backoff_duration
else:
if is_unsubscribe:
backoff_till += self.unsubscribe_back_off
else:
backoff_till += self.prune_back_off
if peer not in self.back_off[topic]:
self.back_off[topic][peer] = backoff_till
else:
self.back_off[topic][peer] = max(self.back_off[topic][peer], backoff_till)
def _check_back_off(self, peer: ID, topic: str) -> bool:
"""
Check if a peer is in back off for a topic and cleanup expired back off entries.
:param peer: peer to check
:param topic: topic to check
:return: True if the peer is in back off, False otherwise
"""
if topic not in self.back_off or peer not in self.back_off[topic]:
return False
if self.back_off[topic].get(peer, 0) > int(time.time()):
return True
else:
del self.back_off[topic][peer]
return False
async def _do_px(self, px_peers: list[rpc_pb2.PeerInfo]) -> None:
if len(px_peers) > self.px_peers_count:
px_peers = px_peers[: self.px_peers_count]
for peer in px_peers:
peer_id: ID = ID(peer.peerID)
if self.pubsub and peer_id in self.pubsub.peers:
continue
try:
peer_info = peer_info_from_bytes(peer.signedPeerRecord)
try:
if self.pubsub is None:
raise NoPubsubAttached
await self.pubsub.host.connect(peer_info)
except Exception as e:
logger.warning(
"failed to connect to px peer %s: %s",
peer_id,
e,
)
continue
except Exception as e:
logger.warning(
"failed to parse peer info from px peer %s: %s",
peer_id,
e,
)
continue
# RPC handlers
async def handle_ihave(
@ -722,8 +820,6 @@ class GossipSub(IPubsubRouter, Service):
packet.publish.extend(msgs_to_forward)
# 2) Serialize that packet
rpc_msg: bytes = packet.SerializeToString()
if self.pubsub is None:
raise NoPubsubAttached
@ -737,14 +833,7 @@ class GossipSub(IPubsubRouter, Service):
peer_stream = self.pubsub.peers[sender_peer_id]
# 4) And write the packet to the stream
try:
await peer_stream.write(encode_varint_prefixed(rpc_msg))
except StreamClosed:
logger.debug(
"Fail to responed to iwant request from %s: stream closed",
sender_peer_id,
)
self.pubsub._handle_dead_peer(sender_peer_id)
await self.pubsub.write_msg(peer_stream, packet)
async def handle_graft(
self, graft_msg: rpc_pb2.ControlGraft, sender_peer_id: ID
@ -758,24 +847,46 @@ class GossipSub(IPubsubRouter, Service):
logger.warning(
"GRAFT: ignoring request from direct peer %s", sender_peer_id
)
await self.emit_prune(topic, sender_peer_id)
await self.emit_prune(topic, sender_peer_id, False, False)
return
if self._check_back_off(sender_peer_id, topic):
logger.warning(
"GRAFT: ignoring request from %s, back off until %d",
sender_peer_id,
self.back_off[topic][sender_peer_id],
)
self._add_back_off(sender_peer_id, topic, False)
await self.emit_prune(topic, sender_peer_id, False, False)
return
if sender_peer_id not in self.mesh[topic]:
self.mesh[topic].add(sender_peer_id)
else:
# Respond with PRUNE if not subscribed to the topic
await self.emit_prune(topic, sender_peer_id)
await self.emit_prune(topic, sender_peer_id, self.do_px, False)
async def handle_prune(
self, prune_msg: rpc_pb2.ControlPrune, sender_peer_id: ID
) -> None:
topic: str = prune_msg.topicID
backoff_till: int = prune_msg.backoff
px_peers: list[rpc_pb2.PeerInfo] = []
for peer in prune_msg.peers:
px_peers.append(peer)
# Remove peer from mesh for topic
if topic in self.mesh:
if backoff_till > 0:
self._add_back_off(sender_peer_id, topic, False, backoff_till)
else:
self._add_back_off(sender_peer_id, topic, False)
self.mesh[topic].discard(sender_peer_id)
if px_peers:
await self._do_px(px_peers)
# RPC emitters
def pack_control_msgs(
@ -824,15 +935,36 @@ class GossipSub(IPubsubRouter, Service):
await self.emit_control_message(control_msg, id)
async def emit_prune(self, topic: str, id: ID) -> None:
async def emit_prune(
self, topic: str, to_peer: ID, do_px: bool, is_unsubscribe: bool
) -> None:
"""Emit graft message, sent to to_peer, for topic."""
prune_msg: rpc_pb2.ControlPrune = rpc_pb2.ControlPrune()
prune_msg.topicID = topic
back_off_duration = self.prune_back_off
if is_unsubscribe:
back_off_duration = self.unsubscribe_back_off
prune_msg.backoff = back_off_duration
if do_px:
exchange_peers = self._get_in_topic_gossipsub_peers_from_minus(
topic, self.px_peers_count, [to_peer]
)
for peer in exchange_peers:
if self.pubsub is None:
raise NoPubsubAttached
peer_info = self.pubsub.host.get_peerstore().peer_info(peer)
signed_peer_record: rpc_pb2.PeerInfo = rpc_pb2.PeerInfo()
signed_peer_record.peerID = peer.to_bytes()
signed_peer_record.signedPeerRecord = peer_info_to_bytes(peer_info)
prune_msg.peers.append(signed_peer_record)
control_msg: rpc_pb2.ControlMessage = rpc_pb2.ControlMessage()
control_msg.prune.extend([prune_msg])
await self.emit_control_message(control_msg, id)
await self.emit_control_message(control_msg, to_peer)
async def emit_control_message(
self, control_msg: rpc_pb2.ControlMessage, to_peer: ID
@ -843,8 +975,6 @@ class GossipSub(IPubsubRouter, Service):
packet: rpc_pb2.RPC = rpc_pb2.RPC()
packet.control.CopyFrom(control_msg)
rpc_msg: bytes = packet.SerializeToString()
# Get stream for peer from pubsub
if to_peer not in self.pubsub.peers:
logger.debug(
@ -854,8 +984,4 @@ class GossipSub(IPubsubRouter, Service):
peer_stream = self.pubsub.peers[to_peer]
# Write rpc to stream
try:
await peer_stream.write(encode_varint_prefixed(rpc_msg))
except StreamClosed:
logger.debug("Fail to emit control message to %s: stream closed", to_peer)
self.pubsub._handle_dead_peer(to_peer)
await self.pubsub.write_msg(peer_stream, packet)

View File

@ -47,6 +47,13 @@ message ControlGraft {
message ControlPrune {
optional string topicID = 1;
repeated PeerInfo peers = 2;
optional uint64 backoff = 3;
}
message PeerInfo {
optional bytes peerID = 1;
optional bytes signedPeerRecord = 2;
}
message TopicDescriptor {

View File

@ -1,6 +1,6 @@
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: libp2p/pubsub/pb/rpc.proto
# source: rpc.proto
"""Generated protocol buffer code."""
from google.protobuf.internal import builder as _builder
from google.protobuf import descriptor as _descriptor
@ -13,37 +13,39 @@ _sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1alibp2p/pubsub/pb/rpc.proto\x12\tpubsub.pb\"\xb4\x01\n\x03RPC\x12-\n\rsubscriptions\x18\x01 \x03(\x0b\x32\x16.pubsub.pb.RPC.SubOpts\x12#\n\x07publish\x18\x02 \x03(\x0b\x32\x12.pubsub.pb.Message\x12*\n\x07\x63ontrol\x18\x03 \x01(\x0b\x32\x19.pubsub.pb.ControlMessage\x1a-\n\x07SubOpts\x12\x11\n\tsubscribe\x18\x01 \x01(\x08\x12\x0f\n\x07topicid\x18\x02 \x01(\t\"i\n\x07Message\x12\x0f\n\x07\x66rom_id\x18\x01 \x01(\x0c\x12\x0c\n\x04\x64\x61ta\x18\x02 \x01(\x0c\x12\r\n\x05seqno\x18\x03 \x01(\x0c\x12\x10\n\x08topicIDs\x18\x04 \x03(\t\x12\x11\n\tsignature\x18\x05 \x01(\x0c\x12\x0b\n\x03key\x18\x06 \x01(\x0c\"\xb0\x01\n\x0e\x43ontrolMessage\x12&\n\x05ihave\x18\x01 \x03(\x0b\x32\x17.pubsub.pb.ControlIHave\x12&\n\x05iwant\x18\x02 \x03(\x0b\x32\x17.pubsub.pb.ControlIWant\x12&\n\x05graft\x18\x03 \x03(\x0b\x32\x17.pubsub.pb.ControlGraft\x12&\n\x05prune\x18\x04 \x03(\x0b\x32\x17.pubsub.pb.ControlPrune\"3\n\x0c\x43ontrolIHave\x12\x0f\n\x07topicID\x18\x01 \x01(\t\x12\x12\n\nmessageIDs\x18\x02 \x03(\t\"\"\n\x0c\x43ontrolIWant\x12\x12\n\nmessageIDs\x18\x01 \x03(\t\"\x1f\n\x0c\x43ontrolGraft\x12\x0f\n\x07topicID\x18\x01 \x01(\t\"\x1f\n\x0c\x43ontrolPrune\x12\x0f\n\x07topicID\x18\x01 \x01(\t\"\x87\x03\n\x0fTopicDescriptor\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x31\n\x04\x61uth\x18\x02 \x01(\x0b\x32#.pubsub.pb.TopicDescriptor.AuthOpts\x12/\n\x03\x65nc\x18\x03 \x01(\x0b\x32\".pubsub.pb.TopicDescriptor.EncOpts\x1a|\n\x08\x41uthOpts\x12:\n\x04mode\x18\x01 \x01(\x0e\x32,.pubsub.pb.TopicDescriptor.AuthOpts.AuthMode\x12\x0c\n\x04keys\x18\x02 \x03(\x0c\"&\n\x08\x41uthMode\x12\x08\n\x04NONE\x10\x00\x12\x07\n\x03KEY\x10\x01\x12\x07\n\x03WOT\x10\x02\x1a\x83\x01\n\x07\x45ncOpts\x12\x38\n\x04mode\x18\x01 \x01(\x0e\x32*.pubsub.pb.TopicDescriptor.EncOpts.EncMode\x12\x11\n\tkeyHashes\x18\x02 \x03(\x0c\"+\n\x07\x45ncMode\x12\x08\n\x04NONE\x10\x00\x12\r\n\tSHAREDKEY\x10\x01\x12\x07\n\x03WOT\x10\x02')
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\trpc.proto\x12\tpubsub.pb\"\xb4\x01\n\x03RPC\x12-\n\rsubscriptions\x18\x01 \x03(\x0b\x32\x16.pubsub.pb.RPC.SubOpts\x12#\n\x07publish\x18\x02 \x03(\x0b\x32\x12.pubsub.pb.Message\x12*\n\x07\x63ontrol\x18\x03 \x01(\x0b\x32\x19.pubsub.pb.ControlMessage\x1a-\n\x07SubOpts\x12\x11\n\tsubscribe\x18\x01 \x01(\x08\x12\x0f\n\x07topicid\x18\x02 \x01(\t\"i\n\x07Message\x12\x0f\n\x07\x66rom_id\x18\x01 \x01(\x0c\x12\x0c\n\x04\x64\x61ta\x18\x02 \x01(\x0c\x12\r\n\x05seqno\x18\x03 \x01(\x0c\x12\x10\n\x08topicIDs\x18\x04 \x03(\t\x12\x11\n\tsignature\x18\x05 \x01(\x0c\x12\x0b\n\x03key\x18\x06 \x01(\x0c\"\xb0\x01\n\x0e\x43ontrolMessage\x12&\n\x05ihave\x18\x01 \x03(\x0b\x32\x17.pubsub.pb.ControlIHave\x12&\n\x05iwant\x18\x02 \x03(\x0b\x32\x17.pubsub.pb.ControlIWant\x12&\n\x05graft\x18\x03 \x03(\x0b\x32\x17.pubsub.pb.ControlGraft\x12&\n\x05prune\x18\x04 \x03(\x0b\x32\x17.pubsub.pb.ControlPrune\"3\n\x0c\x43ontrolIHave\x12\x0f\n\x07topicID\x18\x01 \x01(\t\x12\x12\n\nmessageIDs\x18\x02 \x03(\t\"\"\n\x0c\x43ontrolIWant\x12\x12\n\nmessageIDs\x18\x01 \x03(\t\"\x1f\n\x0c\x43ontrolGraft\x12\x0f\n\x07topicID\x18\x01 \x01(\t\"T\n\x0c\x43ontrolPrune\x12\x0f\n\x07topicID\x18\x01 \x01(\t\x12\"\n\x05peers\x18\x02 \x03(\x0b\x32\x13.pubsub.pb.PeerInfo\x12\x0f\n\x07\x62\x61\x63koff\x18\x03 \x01(\x04\"4\n\x08PeerInfo\x12\x0e\n\x06peerID\x18\x01 \x01(\x0c\x12\x18\n\x10signedPeerRecord\x18\x02 \x01(\x0c\"\x87\x03\n\x0fTopicDescriptor\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x31\n\x04\x61uth\x18\x02 \x01(\x0b\x32#.pubsub.pb.TopicDescriptor.AuthOpts\x12/\n\x03\x65nc\x18\x03 \x01(\x0b\x32\".pubsub.pb.TopicDescriptor.EncOpts\x1a|\n\x08\x41uthOpts\x12:\n\x04mode\x18\x01 \x01(\x0e\x32,.pubsub.pb.TopicDescriptor.AuthOpts.AuthMode\x12\x0c\n\x04keys\x18\x02 \x03(\x0c\"&\n\x08\x41uthMode\x12\x08\n\x04NONE\x10\x00\x12\x07\n\x03KEY\x10\x01\x12\x07\n\x03WOT\x10\x02\x1a\x83\x01\n\x07\x45ncOpts\x12\x38\n\x04mode\x18\x01 \x01(\x0e\x32*.pubsub.pb.TopicDescriptor.EncOpts.EncMode\x12\x11\n\tkeyHashes\x18\x02 \x03(\x0c\"+\n\x07\x45ncMode\x12\x08\n\x04NONE\x10\x00\x12\r\n\tSHAREDKEY\x10\x01\x12\x07\n\x03WOT\x10\x02')
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals())
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'libp2p.pubsub.pb.rpc_pb2', globals())
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'rpc_pb2', globals())
if _descriptor._USE_C_DESCRIPTORS == False:
DESCRIPTOR._options = None
_RPC._serialized_start=42
_RPC._serialized_end=222
_RPC_SUBOPTS._serialized_start=177
_RPC_SUBOPTS._serialized_end=222
_MESSAGE._serialized_start=224
_MESSAGE._serialized_end=329
_CONTROLMESSAGE._serialized_start=332
_CONTROLMESSAGE._serialized_end=508
_CONTROLIHAVE._serialized_start=510
_CONTROLIHAVE._serialized_end=561
_CONTROLIWANT._serialized_start=563
_CONTROLIWANT._serialized_end=597
_CONTROLGRAFT._serialized_start=599
_CONTROLGRAFT._serialized_end=630
_CONTROLPRUNE._serialized_start=632
_CONTROLPRUNE._serialized_end=663
_TOPICDESCRIPTOR._serialized_start=666
_TOPICDESCRIPTOR._serialized_end=1057
_TOPICDESCRIPTOR_AUTHOPTS._serialized_start=799
_TOPICDESCRIPTOR_AUTHOPTS._serialized_end=923
_TOPICDESCRIPTOR_AUTHOPTS_AUTHMODE._serialized_start=885
_TOPICDESCRIPTOR_AUTHOPTS_AUTHMODE._serialized_end=923
_TOPICDESCRIPTOR_ENCOPTS._serialized_start=926
_TOPICDESCRIPTOR_ENCOPTS._serialized_end=1057
_TOPICDESCRIPTOR_ENCOPTS_ENCMODE._serialized_start=1014
_TOPICDESCRIPTOR_ENCOPTS_ENCMODE._serialized_end=1057
_RPC._serialized_start=25
_RPC._serialized_end=205
_RPC_SUBOPTS._serialized_start=160
_RPC_SUBOPTS._serialized_end=205
_MESSAGE._serialized_start=207
_MESSAGE._serialized_end=312
_CONTROLMESSAGE._serialized_start=315
_CONTROLMESSAGE._serialized_end=491
_CONTROLIHAVE._serialized_start=493
_CONTROLIHAVE._serialized_end=544
_CONTROLIWANT._serialized_start=546
_CONTROLIWANT._serialized_end=580
_CONTROLGRAFT._serialized_start=582
_CONTROLGRAFT._serialized_end=613
_CONTROLPRUNE._serialized_start=615
_CONTROLPRUNE._serialized_end=699
_PEERINFO._serialized_start=701
_PEERINFO._serialized_end=753
_TOPICDESCRIPTOR._serialized_start=756
_TOPICDESCRIPTOR._serialized_end=1147
_TOPICDESCRIPTOR_AUTHOPTS._serialized_start=889
_TOPICDESCRIPTOR_AUTHOPTS._serialized_end=1013
_TOPICDESCRIPTOR_AUTHOPTS_AUTHMODE._serialized_start=975
_TOPICDESCRIPTOR_AUTHOPTS_AUTHMODE._serialized_end=1013
_TOPICDESCRIPTOR_ENCOPTS._serialized_start=1016
_TOPICDESCRIPTOR_ENCOPTS._serialized_end=1147
_TOPICDESCRIPTOR_ENCOPTS_ENCMODE._serialized_start=1104
_TOPICDESCRIPTOR_ENCOPTS_ENCMODE._serialized_end=1147
# @@protoc_insertion_point(module_scope)

View File

@ -179,17 +179,43 @@ class ControlPrune(google.protobuf.message.Message):
DESCRIPTOR: google.protobuf.descriptor.Descriptor
TOPICID_FIELD_NUMBER: builtins.int
PEERS_FIELD_NUMBER: builtins.int
BACKOFF_FIELD_NUMBER: builtins.int
topicID: builtins.str
backoff: builtins.int
@property
def peers(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___PeerInfo]: ...
def __init__(
self,
*,
topicID: builtins.str | None = ...,
peers: collections.abc.Iterable[global___PeerInfo] | None = ...,
backoff: builtins.int | None = ...,
) -> None: ...
def HasField(self, field_name: typing.Literal["topicID", b"topicID"]) -> builtins.bool: ...
def ClearField(self, field_name: typing.Literal["topicID", b"topicID"]) -> None: ...
def HasField(self, field_name: typing.Literal["backoff", b"backoff", "topicID", b"topicID"]) -> builtins.bool: ...
def ClearField(self, field_name: typing.Literal["backoff", b"backoff", "peers", b"peers", "topicID", b"topicID"]) -> None: ...
global___ControlPrune = ControlPrune
@typing.final
class PeerInfo(google.protobuf.message.Message):
DESCRIPTOR: google.protobuf.descriptor.Descriptor
PEERID_FIELD_NUMBER: builtins.int
SIGNEDPEERRECORD_FIELD_NUMBER: builtins.int
peerID: builtins.bytes
signedPeerRecord: builtins.bytes
def __init__(
self,
*,
peerID: builtins.bytes | None = ...,
signedPeerRecord: builtins.bytes | None = ...,
) -> None: ...
def HasField(self, field_name: typing.Literal["peerID", b"peerID", "signedPeerRecord", b"signedPeerRecord"]) -> builtins.bool: ...
def ClearField(self, field_name: typing.Literal["peerID", b"peerID", "signedPeerRecord", b"signedPeerRecord"]) -> None: ...
global___PeerInfo = PeerInfo
@typing.final
class TopicDescriptor(google.protobuf.message.Message):
DESCRIPTOR: google.protobuf.descriptor.Descriptor

View File

@ -66,6 +66,7 @@ from libp2p.utils import (
encode_varint_prefixed,
read_varint_prefixed_bytes,
)
from libp2p.utils.varint import encode_uvarint
from .pb import (
rpc_pb2,
@ -682,19 +683,18 @@ class Pubsub(Service, IPubsub):
# TODO: Implement throttle on async validators
if len(async_topic_validators) > 0:
# TODO: Use a better pattern
final_result: bool = True
# Appends to lists are thread safe in CPython
results = []
async def run_async_validator(func: AsyncValidatorFn) -> None:
nonlocal final_result
result = await func(msg_forwarder, msg)
final_result = final_result and result
results.append(result)
async with trio.open_nursery() as nursery:
for async_validator in async_topic_validators:
nursery.start_soon(run_async_validator, async_validator)
if not final_result:
if not all(results):
raise ValidationError(f"Validation failed for msg={msg}")
async def push_msg(self, msg_forwarder: ID, msg: rpc_pb2.Message) -> None:
@ -779,3 +779,43 @@ class Pubsub(Service, IPubsub):
def _is_subscribed_to_msg(self, msg: rpc_pb2.Message) -> bool:
return any(topic in self.topic_ids for topic in msg.topicIDs)
async def write_msg(self, stream: INetStream, rpc_msg: rpc_pb2.RPC) -> bool:
"""
Write an RPC message to a stream with proper error handling.
Implements WriteMsg similar to go-msgio which is used in go-libp2p
Ref: https://github.com/libp2p/go-msgio/blob/master/protoio/uvarint_writer.go#L56
:param stream: stream to write the message to
:param rpc_msg: RPC message to write
:return: True if successful, False if stream was closed
"""
try:
# Calculate message size first
msg_bytes = rpc_msg.SerializeToString()
msg_size = len(msg_bytes)
# Calculate varint size and allocate exact buffer size needed
varint_bytes = encode_uvarint(msg_size)
varint_size = len(varint_bytes)
# Allocate buffer with exact size (like Go's pool.Get())
buf = bytearray(varint_size + msg_size)
# Write varint length prefix to buffer (like Go's binary.PutUvarint())
buf[:varint_size] = varint_bytes
# Write serialized message after varint (like Go's rpc.MarshalTo())
buf[varint_size:] = msg_bytes
# Single write operation (like Go's s.Write(buf))
await stream.write(bytes(buf))
return True
except StreamClosed:
peer_id = stream.muxed_conn.peer_id
logger.debug("Fail to write message to %s: stream closed", peer_id)
self._handle_dead_peer(peer_id)
return False

View File

@ -542,7 +542,7 @@ class Yamux(IMuxedConn):
f"type={typ}, flags={flags}, stream_id={stream_id},"
f"length={length}"
)
if typ == TYPE_DATA and flags & FLAG_SYN:
if (typ == TYPE_DATA or typ == TYPE_WINDOW_UPDATE) and flags & FLAG_SYN:
async with self.streams_lock:
if stream_id not in self.streams:
stream = YamuxStream(stream_id, self, False)

View File

@ -26,6 +26,7 @@ LISTEN_MADDR = multiaddr.Multiaddr("/ip4/127.0.0.1/tcp/0")
FLOODSUB_PROTOCOL_ID = floodsub.PROTOCOL_ID
GOSSIPSUB_PROTOCOL_ID = gossipsub.PROTOCOL_ID
GOSSIPSUB_PROTOCOL_ID_V1 = gossipsub.PROTOCOL_ID_V11
class GossipsubParams(NamedTuple):
@ -40,6 +41,10 @@ class GossipsubParams(NamedTuple):
heartbeat_interval: float = 0.5
direct_connect_initial_delay: float = 0.1
direct_connect_interval: int = 300
do_px: bool = False
px_peers_count: int = 16
prune_back_off: int = 60
unsubscribe_back_off: int = 10
GOSSIPSUB_PARAMS = GossipsubParams()