mirror of
https://github.com/varun-r-mallya/py-libp2p.git
synced 2025-12-31 20:36:24 +00:00
Merge branch 'main' into write_msg_pubsub
This commit is contained in:
@ -2130,14 +2130,14 @@ class IPubsub(ServiceAPI):
|
||||
...
|
||||
|
||||
@abstractmethod
|
||||
async def publish(self, topic_id: str, data: bytes) -> None:
|
||||
async def publish(self, topic_id: str | list[str], data: bytes) -> None:
|
||||
"""
|
||||
Publish a message to a topic.
|
||||
Publish a message to a topic or multiple topics.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
topic_id : str
|
||||
The identifier of the topic.
|
||||
topic_id : str | list[str]
|
||||
The identifier of the topic (str) or topics (list[str]).
|
||||
data : bytes
|
||||
The data to publish.
|
||||
|
||||
|
||||
30
libp2p/kad_dht/__init__.py
Normal file
30
libp2p/kad_dht/__init__.py
Normal file
@ -0,0 +1,30 @@
|
||||
"""
|
||||
Kademlia DHT implementation for py-libp2p.
|
||||
|
||||
This module provides a Distributed Hash Table (DHT) implementation
|
||||
based on the Kademlia protocol.
|
||||
"""
|
||||
|
||||
from .kad_dht import (
|
||||
KadDHT,
|
||||
)
|
||||
from .peer_routing import (
|
||||
PeerRouting,
|
||||
)
|
||||
from .routing_table import (
|
||||
RoutingTable,
|
||||
)
|
||||
from .utils import (
|
||||
create_key_from_binary,
|
||||
)
|
||||
from .value_store import (
|
||||
ValueStore,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
"KadDHT",
|
||||
"RoutingTable",
|
||||
"PeerRouting",
|
||||
"ValueStore",
|
||||
"create_key_from_binary",
|
||||
]
|
||||
616
libp2p/kad_dht/kad_dht.py
Normal file
616
libp2p/kad_dht/kad_dht.py
Normal file
@ -0,0 +1,616 @@
|
||||
"""
|
||||
Kademlia DHT implementation for py-libp2p.
|
||||
|
||||
This module provides a complete Distributed Hash Table (DHT)
|
||||
implementation based on the Kademlia algorithm and protocol.
|
||||
"""
|
||||
|
||||
from enum import Enum
|
||||
import logging
|
||||
import time
|
||||
|
||||
from multiaddr import (
|
||||
Multiaddr,
|
||||
)
|
||||
import trio
|
||||
import varint
|
||||
|
||||
from libp2p.abc import (
|
||||
IHost,
|
||||
)
|
||||
from libp2p.custom_types import (
|
||||
TProtocol,
|
||||
)
|
||||
from libp2p.network.stream.net_stream import (
|
||||
INetStream,
|
||||
)
|
||||
from libp2p.peer.id import (
|
||||
ID,
|
||||
)
|
||||
from libp2p.peer.peerinfo import (
|
||||
PeerInfo,
|
||||
)
|
||||
from libp2p.tools.async_service import (
|
||||
Service,
|
||||
)
|
||||
|
||||
from .pb.kademlia_pb2 import (
|
||||
Message,
|
||||
)
|
||||
from .peer_routing import (
|
||||
PeerRouting,
|
||||
)
|
||||
from .provider_store import (
|
||||
ProviderStore,
|
||||
)
|
||||
from .routing_table import (
|
||||
RoutingTable,
|
||||
)
|
||||
from .value_store import (
|
||||
ValueStore,
|
||||
)
|
||||
|
||||
logger = logging.getLogger("kademlia-example.kad_dht")
|
||||
# logger = logging.getLogger("libp2p.kademlia")
|
||||
# Default parameters
|
||||
PROTOCOL_ID = TProtocol("/ipfs/kad/1.0.0")
|
||||
ROUTING_TABLE_REFRESH_INTERVAL = 1 * 60 # 1 min in seconds for testing
|
||||
TTL = 24 * 60 * 60 # 24 hours in seconds
|
||||
ALPHA = 3
|
||||
QUERY_TIMEOUT = 10 # seconds
|
||||
|
||||
|
||||
class DHTMode(Enum):
|
||||
"""DHT operation modes."""
|
||||
|
||||
CLIENT = "CLIENT"
|
||||
SERVER = "SERVER"
|
||||
|
||||
|
||||
class KadDHT(Service):
|
||||
"""
|
||||
Kademlia DHT implementation for libp2p.
|
||||
|
||||
This class provides a DHT implementation that combines routing table management,
|
||||
peer discovery, content routing, and value storage.
|
||||
"""
|
||||
|
||||
def __init__(self, host: IHost, mode: DHTMode):
|
||||
"""
|
||||
Initialize a new Kademlia DHT node.
|
||||
|
||||
:param host: The libp2p host.
|
||||
:param mode: The mode of host (Client or Server) - must be DHTMode enum
|
||||
"""
|
||||
super().__init__()
|
||||
|
||||
self.host = host
|
||||
self.local_peer_id = host.get_id()
|
||||
|
||||
# Validate that mode is a DHTMode enum
|
||||
if not isinstance(mode, DHTMode):
|
||||
raise TypeError(f"mode must be DHTMode enum, got {type(mode)}")
|
||||
|
||||
self.mode = mode
|
||||
|
||||
# Initialize the routing table
|
||||
self.routing_table = RoutingTable(self.local_peer_id, self.host)
|
||||
|
||||
# Initialize peer routing
|
||||
self.peer_routing = PeerRouting(host, self.routing_table)
|
||||
|
||||
# Initialize value store
|
||||
self.value_store = ValueStore(host=host, local_peer_id=self.local_peer_id)
|
||||
|
||||
# Initialize provider store with host and peer_routing references
|
||||
self.provider_store = ProviderStore(host=host, peer_routing=self.peer_routing)
|
||||
|
||||
# Last time we republished provider records
|
||||
self._last_provider_republish = time.time()
|
||||
|
||||
# Set protocol handlers
|
||||
host.set_stream_handler(PROTOCOL_ID, self.handle_stream)
|
||||
|
||||
async def run(self) -> None:
|
||||
"""Run the DHT service."""
|
||||
logger.info(f"Starting Kademlia DHT with peer ID {self.local_peer_id}")
|
||||
|
||||
# Main service loop
|
||||
while self.manager.is_running:
|
||||
# Periodically refresh the routing table
|
||||
await self.refresh_routing_table()
|
||||
|
||||
# Check if it's time to republish provider records
|
||||
current_time = time.time()
|
||||
# await self._republish_provider_records()
|
||||
self._last_provider_republish = current_time
|
||||
|
||||
# Clean up expired values and provider records
|
||||
expired_values = self.value_store.cleanup_expired()
|
||||
if expired_values > 0:
|
||||
logger.debug(f"Cleaned up {expired_values} expired values")
|
||||
|
||||
self.provider_store.cleanup_expired()
|
||||
|
||||
# Wait before next maintenance cycle
|
||||
await trio.sleep(ROUTING_TABLE_REFRESH_INTERVAL)
|
||||
|
||||
async def switch_mode(self, new_mode: DHTMode) -> DHTMode:
|
||||
"""
|
||||
Switch the DHT mode.
|
||||
|
||||
:param new_mode: The new mode - must be DHTMode enum
|
||||
:return: The new mode as DHTMode enum
|
||||
"""
|
||||
# Validate that new_mode is a DHTMode enum
|
||||
if not isinstance(new_mode, DHTMode):
|
||||
raise TypeError(f"new_mode must be DHTMode enum, got {type(new_mode)}")
|
||||
|
||||
if new_mode == DHTMode.CLIENT:
|
||||
self.routing_table.cleanup_routing_table()
|
||||
self.mode = new_mode
|
||||
logger.info(f"Switched to {new_mode.value} mode")
|
||||
return self.mode
|
||||
|
||||
async def handle_stream(self, stream: INetStream) -> None:
|
||||
"""
|
||||
Handle an incoming DHT stream using varint length prefixes.
|
||||
"""
|
||||
if self.mode == DHTMode.CLIENT:
|
||||
stream.close
|
||||
return
|
||||
peer_id = stream.muxed_conn.peer_id
|
||||
logger.debug(f"Received DHT stream from peer {peer_id}")
|
||||
await self.add_peer(peer_id)
|
||||
logger.debug(f"Added peer {peer_id} to routing table")
|
||||
|
||||
try:
|
||||
# Read varint-prefixed length for the message
|
||||
length_prefix = b""
|
||||
while True:
|
||||
byte = await stream.read(1)
|
||||
if not byte:
|
||||
logger.warning("Stream closed while reading varint length")
|
||||
await stream.close()
|
||||
return
|
||||
length_prefix += byte
|
||||
if byte[0] & 0x80 == 0:
|
||||
break
|
||||
msg_length = varint.decode_bytes(length_prefix)
|
||||
|
||||
# Read the message bytes
|
||||
msg_bytes = await stream.read(msg_length)
|
||||
if len(msg_bytes) < msg_length:
|
||||
logger.warning("Failed to read full message from stream")
|
||||
await stream.close()
|
||||
return
|
||||
|
||||
try:
|
||||
# Parse as protobuf
|
||||
message = Message()
|
||||
message.ParseFromString(msg_bytes)
|
||||
logger.debug(
|
||||
f"Received DHT message from {peer_id}, type: {message.type}"
|
||||
)
|
||||
|
||||
# Handle FIND_NODE message
|
||||
if message.type == Message.MessageType.FIND_NODE:
|
||||
# Get target key directly from protobuf
|
||||
target_key = message.key
|
||||
|
||||
# Find closest peers to the target key
|
||||
closest_peers = self.routing_table.find_local_closest_peers(
|
||||
target_key, 20
|
||||
)
|
||||
logger.debug(f"Found {len(closest_peers)} peers close to target")
|
||||
|
||||
# Build response message with protobuf
|
||||
response = Message()
|
||||
response.type = Message.MessageType.FIND_NODE
|
||||
|
||||
# Add closest peers to response
|
||||
for peer in closest_peers:
|
||||
# Skip if the peer is the requester
|
||||
if peer == peer_id:
|
||||
continue
|
||||
|
||||
# Add peer to closerPeers field
|
||||
peer_proto = response.closerPeers.add()
|
||||
peer_proto.id = peer.to_bytes()
|
||||
peer_proto.connection = Message.ConnectionType.CAN_CONNECT
|
||||
|
||||
# Add addresses if available
|
||||
try:
|
||||
addrs = self.host.get_peerstore().addrs(peer)
|
||||
if addrs:
|
||||
for addr in addrs:
|
||||
peer_proto.addrs.append(addr.to_bytes())
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# Serialize and send response
|
||||
response_bytes = response.SerializeToString()
|
||||
await stream.write(varint.encode(len(response_bytes)))
|
||||
await stream.write(response_bytes)
|
||||
logger.debug(
|
||||
f"Sent FIND_NODE response with{len(response.closerPeers)} peers"
|
||||
)
|
||||
|
||||
# Handle ADD_PROVIDER message
|
||||
elif message.type == Message.MessageType.ADD_PROVIDER:
|
||||
# Process ADD_PROVIDER
|
||||
key = message.key
|
||||
logger.debug(f"Received ADD_PROVIDER for key {key.hex()}")
|
||||
|
||||
# Extract provider information
|
||||
for provider_proto in message.providerPeers:
|
||||
try:
|
||||
# Validate that the provider is the sender
|
||||
provider_id = ID(provider_proto.id)
|
||||
if provider_id != peer_id:
|
||||
logger.warning(
|
||||
f"Provider ID {provider_id} doesn't"
|
||||
f"match sender {peer_id}, ignoring"
|
||||
)
|
||||
continue
|
||||
|
||||
# Convert addresses to Multiaddr
|
||||
addrs = []
|
||||
for addr_bytes in provider_proto.addrs:
|
||||
try:
|
||||
addrs.append(Multiaddr(addr_bytes))
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to parse address: {e}")
|
||||
|
||||
# Add to provider store
|
||||
provider_info = PeerInfo(provider_id, addrs)
|
||||
self.provider_store.add_provider(key, provider_info)
|
||||
logger.debug(
|
||||
f"Added provider {provider_id} for key {key.hex()}"
|
||||
)
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to process provider info: {e}")
|
||||
|
||||
# Send acknowledgement
|
||||
response = Message()
|
||||
response.type = Message.MessageType.ADD_PROVIDER
|
||||
response.key = key
|
||||
|
||||
response_bytes = response.SerializeToString()
|
||||
await stream.write(varint.encode(len(response_bytes)))
|
||||
await stream.write(response_bytes)
|
||||
logger.debug("Sent ADD_PROVIDER acknowledgement")
|
||||
|
||||
# Handle GET_PROVIDERS message
|
||||
elif message.type == Message.MessageType.GET_PROVIDERS:
|
||||
# Process GET_PROVIDERS
|
||||
key = message.key
|
||||
logger.debug(f"Received GET_PROVIDERS request for key {key.hex()}")
|
||||
|
||||
# Find providers for the key
|
||||
providers = self.provider_store.get_providers(key)
|
||||
logger.debug(
|
||||
f"Found {len(providers)} providers for key {key.hex()}"
|
||||
)
|
||||
|
||||
# Create response
|
||||
response = Message()
|
||||
response.type = Message.MessageType.GET_PROVIDERS
|
||||
response.key = key
|
||||
|
||||
# Add provider information to response
|
||||
for provider_info in providers:
|
||||
provider_proto = response.providerPeers.add()
|
||||
provider_proto.id = provider_info.peer_id.to_bytes()
|
||||
provider_proto.connection = Message.ConnectionType.CAN_CONNECT
|
||||
|
||||
# Add addresses if available
|
||||
for addr in provider_info.addrs:
|
||||
provider_proto.addrs.append(addr.to_bytes())
|
||||
|
||||
# Also include closest peers if we don't have providers
|
||||
if not providers:
|
||||
closest_peers = self.routing_table.find_local_closest_peers(
|
||||
key, 20
|
||||
)
|
||||
logger.debug(
|
||||
f"No providers found, including {len(closest_peers)}"
|
||||
"closest peers"
|
||||
)
|
||||
|
||||
for peer in closest_peers:
|
||||
# Skip if peer is the requester
|
||||
if peer == peer_id:
|
||||
continue
|
||||
|
||||
peer_proto = response.closerPeers.add()
|
||||
peer_proto.id = peer.to_bytes()
|
||||
peer_proto.connection = Message.ConnectionType.CAN_CONNECT
|
||||
|
||||
# Add addresses if available
|
||||
try:
|
||||
addrs = self.host.get_peerstore().addrs(peer)
|
||||
for addr in addrs:
|
||||
peer_proto.addrs.append(addr.to_bytes())
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# Serialize and send response
|
||||
response_bytes = response.SerializeToString()
|
||||
await stream.write(varint.encode(len(response_bytes)))
|
||||
await stream.write(response_bytes)
|
||||
logger.debug("Sent GET_PROVIDERS response")
|
||||
|
||||
# Handle GET_VALUE message
|
||||
elif message.type == Message.MessageType.GET_VALUE:
|
||||
# Process GET_VALUE
|
||||
key = message.key
|
||||
logger.debug(f"Received GET_VALUE request for key {key.hex()}")
|
||||
|
||||
value = self.value_store.get(key)
|
||||
if value:
|
||||
logger.debug(f"Found value for key {key.hex()}")
|
||||
|
||||
# Create response using protobuf
|
||||
response = Message()
|
||||
response.type = Message.MessageType.GET_VALUE
|
||||
|
||||
# Create record
|
||||
response.key = key
|
||||
response.record.key = key
|
||||
response.record.value = value
|
||||
response.record.timeReceived = str(time.time())
|
||||
|
||||
# Serialize and send response
|
||||
response_bytes = response.SerializeToString()
|
||||
await stream.write(varint.encode(len(response_bytes)))
|
||||
await stream.write(response_bytes)
|
||||
logger.debug("Sent GET_VALUE response")
|
||||
else:
|
||||
logger.debug(f"No value found for key {key.hex()}")
|
||||
|
||||
# Create response with closest peers when no value is found
|
||||
response = Message()
|
||||
response.type = Message.MessageType.GET_VALUE
|
||||
response.key = key
|
||||
|
||||
# Add closest peers to key
|
||||
closest_peers = self.routing_table.find_local_closest_peers(
|
||||
key, 20
|
||||
)
|
||||
logger.debug(
|
||||
"No value found,"
|
||||
f"including {len(closest_peers)} closest peers"
|
||||
)
|
||||
|
||||
for peer in closest_peers:
|
||||
# Skip if peer is the requester
|
||||
if peer == peer_id:
|
||||
continue
|
||||
|
||||
peer_proto = response.closerPeers.add()
|
||||
peer_proto.id = peer.to_bytes()
|
||||
peer_proto.connection = Message.ConnectionType.CAN_CONNECT
|
||||
|
||||
# Add addresses if available
|
||||
try:
|
||||
addrs = self.host.get_peerstore().addrs(peer)
|
||||
for addr in addrs:
|
||||
peer_proto.addrs.append(addr.to_bytes())
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# Serialize and send response
|
||||
response_bytes = response.SerializeToString()
|
||||
await stream.write(varint.encode(len(response_bytes)))
|
||||
await stream.write(response_bytes)
|
||||
logger.debug("Sent GET_VALUE response with closest peers")
|
||||
|
||||
# Handle PUT_VALUE message
|
||||
elif message.type == Message.MessageType.PUT_VALUE and message.HasField(
|
||||
"record"
|
||||
):
|
||||
# Process PUT_VALUE
|
||||
key = message.record.key
|
||||
value = message.record.value
|
||||
success = False
|
||||
try:
|
||||
if not (key and value):
|
||||
raise ValueError(
|
||||
"Missing key or value in PUT_VALUE message"
|
||||
)
|
||||
|
||||
self.value_store.put(key, value)
|
||||
logger.debug(f"Stored value {value.hex()} for key {key.hex()}")
|
||||
success = True
|
||||
except Exception as e:
|
||||
logger.warning(
|
||||
f"Failed to store value {value.hex()} for key "
|
||||
f"{key.hex()}: {e}"
|
||||
)
|
||||
finally:
|
||||
# Send acknowledgement
|
||||
response = Message()
|
||||
response.type = Message.MessageType.PUT_VALUE
|
||||
if success:
|
||||
response.key = key
|
||||
response_bytes = response.SerializeToString()
|
||||
await stream.write(varint.encode(len(response_bytes)))
|
||||
await stream.write(response_bytes)
|
||||
logger.debug("Sent PUT_VALUE acknowledgement")
|
||||
|
||||
except Exception as proto_err:
|
||||
logger.warning(f"Failed to parse protobuf message: {proto_err}")
|
||||
|
||||
await stream.close()
|
||||
except Exception as e:
|
||||
logger.error(f"Error handling DHT stream: {e}")
|
||||
await stream.close()
|
||||
|
||||
async def refresh_routing_table(self) -> None:
|
||||
"""Refresh the routing table."""
|
||||
logger.debug("Refreshing routing table")
|
||||
await self.peer_routing.refresh_routing_table()
|
||||
|
||||
# Peer routing methods
|
||||
|
||||
async def find_peer(self, peer_id: ID) -> PeerInfo | None:
|
||||
"""
|
||||
Find a peer with the given ID.
|
||||
"""
|
||||
logger.debug(f"Finding peer: {peer_id}")
|
||||
return await self.peer_routing.find_peer(peer_id)
|
||||
|
||||
# Value storage and retrieval methods
|
||||
|
||||
async def put_value(self, key: bytes, value: bytes) -> None:
|
||||
"""
|
||||
Store a value in the DHT.
|
||||
"""
|
||||
logger.debug(f"Storing value for key {key.hex()}")
|
||||
|
||||
# 1. Store locally first
|
||||
self.value_store.put(key, value)
|
||||
try:
|
||||
decoded_value = value.decode("utf-8")
|
||||
except UnicodeDecodeError:
|
||||
decoded_value = value.hex()
|
||||
logger.debug(
|
||||
f"Stored value locally for key {key.hex()} with value {decoded_value}"
|
||||
)
|
||||
|
||||
# 2. Get closest peers, excluding self
|
||||
closest_peers = [
|
||||
peer
|
||||
for peer in self.routing_table.find_local_closest_peers(key)
|
||||
if peer != self.local_peer_id
|
||||
]
|
||||
logger.debug(f"Found {len(closest_peers)} peers to store value at")
|
||||
|
||||
# 3. Store at remote peers in batches of ALPHA, in parallel
|
||||
stored_count = 0
|
||||
for i in range(0, len(closest_peers), ALPHA):
|
||||
batch = closest_peers[i : i + ALPHA]
|
||||
batch_results = [False] * len(batch)
|
||||
|
||||
async def store_one(idx: int, peer: ID) -> None:
|
||||
try:
|
||||
with trio.move_on_after(QUERY_TIMEOUT):
|
||||
success = await self.value_store._store_at_peer(
|
||||
peer, key, value
|
||||
)
|
||||
batch_results[idx] = success
|
||||
if success:
|
||||
logger.debug(f"Stored value at peer {peer}")
|
||||
else:
|
||||
logger.debug(f"Failed to store value at peer {peer}")
|
||||
except Exception as e:
|
||||
logger.debug(f"Error storing value at peer {peer}: {e}")
|
||||
|
||||
async with trio.open_nursery() as nursery:
|
||||
for idx, peer in enumerate(batch):
|
||||
nursery.start_soon(store_one, idx, peer)
|
||||
|
||||
stored_count += sum(batch_results)
|
||||
|
||||
logger.info(f"Successfully stored value at {stored_count} peers")
|
||||
|
||||
async def get_value(self, key: bytes) -> bytes | None:
|
||||
logger.debug(f"Getting value for key: {key.hex()}")
|
||||
|
||||
# 1. Check local store first
|
||||
value = self.value_store.get(key)
|
||||
if value:
|
||||
logger.debug("Found value locally")
|
||||
return value
|
||||
|
||||
# 2. Get closest peers, excluding self
|
||||
closest_peers = [
|
||||
peer
|
||||
for peer in self.routing_table.find_local_closest_peers(key)
|
||||
if peer != self.local_peer_id
|
||||
]
|
||||
logger.debug(f"Searching {len(closest_peers)} peers for value")
|
||||
|
||||
# 3. Query ALPHA peers at a time in parallel
|
||||
for i in range(0, len(closest_peers), ALPHA):
|
||||
batch = closest_peers[i : i + ALPHA]
|
||||
found_value = None
|
||||
|
||||
async def query_one(peer: ID) -> None:
|
||||
nonlocal found_value
|
||||
try:
|
||||
with trio.move_on_after(QUERY_TIMEOUT):
|
||||
value = await self.value_store._get_from_peer(peer, key)
|
||||
if value is not None and found_value is None:
|
||||
found_value = value
|
||||
logger.debug(f"Found value at peer {peer}")
|
||||
except Exception as e:
|
||||
logger.debug(f"Error querying peer {peer}: {e}")
|
||||
|
||||
async with trio.open_nursery() as nursery:
|
||||
for peer in batch:
|
||||
nursery.start_soon(query_one, peer)
|
||||
|
||||
if found_value is not None:
|
||||
self.value_store.put(key, found_value)
|
||||
logger.info("Successfully retrieved value from network")
|
||||
return found_value
|
||||
|
||||
# 4. Not found
|
||||
logger.warning(f"Value not found for key {key.hex()}")
|
||||
return None
|
||||
|
||||
# Add these methods in the Utility methods section
|
||||
|
||||
# Utility methods
|
||||
|
||||
async def add_peer(self, peer_id: ID) -> bool:
|
||||
"""
|
||||
Add a peer to the routing table.
|
||||
|
||||
params: peer_id: The peer ID to add.
|
||||
|
||||
Returns
|
||||
-------
|
||||
bool
|
||||
True if peer was added or updated, False otherwise.
|
||||
|
||||
"""
|
||||
return await self.routing_table.add_peer(peer_id)
|
||||
|
||||
async def provide(self, key: bytes) -> bool:
|
||||
"""
|
||||
Reference to provider_store.provide for convenience.
|
||||
"""
|
||||
return await self.provider_store.provide(key)
|
||||
|
||||
async def find_providers(self, key: bytes, count: int = 20) -> list[PeerInfo]:
|
||||
"""
|
||||
Reference to provider_store.find_providers for convenience.
|
||||
"""
|
||||
return await self.provider_store.find_providers(key, count)
|
||||
|
||||
def get_routing_table_size(self) -> int:
|
||||
"""
|
||||
Get the number of peers in the routing table.
|
||||
|
||||
Returns
|
||||
-------
|
||||
int
|
||||
Number of peers.
|
||||
|
||||
"""
|
||||
return self.routing_table.size()
|
||||
|
||||
def get_value_store_size(self) -> int:
|
||||
"""
|
||||
Get the number of items in the value store.
|
||||
|
||||
Returns
|
||||
-------
|
||||
int
|
||||
Number of items.
|
||||
|
||||
"""
|
||||
return self.value_store.size()
|
||||
0
libp2p/kad_dht/pb/__init__.py
Normal file
0
libp2p/kad_dht/pb/__init__.py
Normal file
38
libp2p/kad_dht/pb/kademlia.proto
Normal file
38
libp2p/kad_dht/pb/kademlia.proto
Normal file
@ -0,0 +1,38 @@
|
||||
syntax = "proto3";
|
||||
|
||||
message Record {
|
||||
bytes key = 1;
|
||||
bytes value = 2;
|
||||
string timeReceived = 5;
|
||||
};
|
||||
|
||||
message Message {
|
||||
enum MessageType {
|
||||
PUT_VALUE = 0;
|
||||
GET_VALUE = 1;
|
||||
ADD_PROVIDER = 2;
|
||||
GET_PROVIDERS = 3;
|
||||
FIND_NODE = 4;
|
||||
PING = 5;
|
||||
}
|
||||
|
||||
enum ConnectionType {
|
||||
NOT_CONNECTED = 0;
|
||||
CONNECTED = 1;
|
||||
CAN_CONNECT = 2;
|
||||
CANNOT_CONNECT = 3;
|
||||
}
|
||||
|
||||
message Peer {
|
||||
bytes id = 1;
|
||||
repeated bytes addrs = 2;
|
||||
ConnectionType connection = 3;
|
||||
}
|
||||
|
||||
MessageType type = 1;
|
||||
int32 clusterLevelRaw = 10;
|
||||
bytes key = 2;
|
||||
Record record = 3;
|
||||
repeated Peer closerPeers = 8;
|
||||
repeated Peer providerPeers = 9;
|
||||
}
|
||||
33
libp2p/kad_dht/pb/kademlia_pb2.py
Normal file
33
libp2p/kad_dht/pb/kademlia_pb2.py
Normal file
@ -0,0 +1,33 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Generated by the protocol buffer compiler. DO NOT EDIT!
|
||||
# source: libp2p/kad_dht/pb/kademlia.proto
|
||||
"""Generated protocol buffer code."""
|
||||
from google.protobuf import descriptor as _descriptor
|
||||
from google.protobuf import descriptor_pool as _descriptor_pool
|
||||
from google.protobuf import symbol_database as _symbol_database
|
||||
from google.protobuf.internal import builder as _builder
|
||||
# @@protoc_insertion_point(imports)
|
||||
|
||||
_sym_db = _symbol_database.Default()
|
||||
|
||||
|
||||
|
||||
|
||||
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n libp2p/kad_dht/pb/kademlia.proto\":\n\x06Record\x12\x0b\n\x03key\x18\x01 \x01(\x0c\x12\r\n\x05value\x18\x02 \x01(\x0c\x12\x14\n\x0ctimeReceived\x18\x05 \x01(\t\"\xca\x03\n\x07Message\x12\"\n\x04type\x18\x01 \x01(\x0e\x32\x14.Message.MessageType\x12\x17\n\x0f\x63lusterLevelRaw\x18\n \x01(\x05\x12\x0b\n\x03key\x18\x02 \x01(\x0c\x12\x17\n\x06record\x18\x03 \x01(\x0b\x32\x07.Record\x12\"\n\x0b\x63loserPeers\x18\x08 \x03(\x0b\x32\r.Message.Peer\x12$\n\rproviderPeers\x18\t \x03(\x0b\x32\r.Message.Peer\x1aN\n\x04Peer\x12\n\n\x02id\x18\x01 \x01(\x0c\x12\r\n\x05\x61\x64\x64rs\x18\x02 \x03(\x0c\x12+\n\nconnection\x18\x03 \x01(\x0e\x32\x17.Message.ConnectionType\"i\n\x0bMessageType\x12\r\n\tPUT_VALUE\x10\x00\x12\r\n\tGET_VALUE\x10\x01\x12\x10\n\x0c\x41\x44\x44_PROVIDER\x10\x02\x12\x11\n\rGET_PROVIDERS\x10\x03\x12\r\n\tFIND_NODE\x10\x04\x12\x08\n\x04PING\x10\x05\"W\n\x0e\x43onnectionType\x12\x11\n\rNOT_CONNECTED\x10\x00\x12\r\n\tCONNECTED\x10\x01\x12\x0f\n\x0b\x43\x41N_CONNECT\x10\x02\x12\x12\n\x0e\x43\x41NNOT_CONNECT\x10\x03\x62\x06proto3')
|
||||
|
||||
_globals = globals()
|
||||
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
|
||||
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'libp2p.kad_dht.pb.kademlia_pb2', _globals)
|
||||
if _descriptor._USE_C_DESCRIPTORS == False:
|
||||
DESCRIPTOR._options = None
|
||||
_globals['_RECORD']._serialized_start=36
|
||||
_globals['_RECORD']._serialized_end=94
|
||||
_globals['_MESSAGE']._serialized_start=97
|
||||
_globals['_MESSAGE']._serialized_end=555
|
||||
_globals['_MESSAGE_PEER']._serialized_start=281
|
||||
_globals['_MESSAGE_PEER']._serialized_end=359
|
||||
_globals['_MESSAGE_MESSAGETYPE']._serialized_start=361
|
||||
_globals['_MESSAGE_MESSAGETYPE']._serialized_end=466
|
||||
_globals['_MESSAGE_CONNECTIONTYPE']._serialized_start=468
|
||||
_globals['_MESSAGE_CONNECTIONTYPE']._serialized_end=555
|
||||
# @@protoc_insertion_point(module_scope)
|
||||
133
libp2p/kad_dht/pb/kademlia_pb2.pyi
Normal file
133
libp2p/kad_dht/pb/kademlia_pb2.pyi
Normal file
@ -0,0 +1,133 @@
|
||||
"""
|
||||
@generated by mypy-protobuf. Do not edit manually!
|
||||
isort:skip_file
|
||||
"""
|
||||
|
||||
import builtins
|
||||
import collections.abc
|
||||
import google.protobuf.descriptor
|
||||
import google.protobuf.internal.containers
|
||||
import google.protobuf.internal.enum_type_wrapper
|
||||
import google.protobuf.message
|
||||
import sys
|
||||
import typing
|
||||
|
||||
if sys.version_info >= (3, 10):
|
||||
import typing as typing_extensions
|
||||
else:
|
||||
import typing_extensions
|
||||
|
||||
DESCRIPTOR: google.protobuf.descriptor.FileDescriptor
|
||||
|
||||
@typing.final
|
||||
class Record(google.protobuf.message.Message):
|
||||
DESCRIPTOR: google.protobuf.descriptor.Descriptor
|
||||
|
||||
KEY_FIELD_NUMBER: builtins.int
|
||||
VALUE_FIELD_NUMBER: builtins.int
|
||||
TIMERECEIVED_FIELD_NUMBER: builtins.int
|
||||
key: builtins.bytes
|
||||
value: builtins.bytes
|
||||
timeReceived: builtins.str
|
||||
def __init__(
|
||||
self,
|
||||
*,
|
||||
key: builtins.bytes = ...,
|
||||
value: builtins.bytes = ...,
|
||||
timeReceived: builtins.str = ...,
|
||||
) -> None: ...
|
||||
def ClearField(self, field_name: typing.Literal["key", b"key", "timeReceived", b"timeReceived", "value", b"value"]) -> None: ...
|
||||
|
||||
global___Record = Record
|
||||
|
||||
@typing.final
|
||||
class Message(google.protobuf.message.Message):
|
||||
DESCRIPTOR: google.protobuf.descriptor.Descriptor
|
||||
|
||||
class _MessageType:
|
||||
ValueType = typing.NewType("ValueType", builtins.int)
|
||||
V: typing_extensions.TypeAlias = ValueType
|
||||
|
||||
class _MessageTypeEnumTypeWrapper(google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[Message._MessageType.ValueType], builtins.type):
|
||||
DESCRIPTOR: google.protobuf.descriptor.EnumDescriptor
|
||||
PUT_VALUE: Message._MessageType.ValueType # 0
|
||||
GET_VALUE: Message._MessageType.ValueType # 1
|
||||
ADD_PROVIDER: Message._MessageType.ValueType # 2
|
||||
GET_PROVIDERS: Message._MessageType.ValueType # 3
|
||||
FIND_NODE: Message._MessageType.ValueType # 4
|
||||
PING: Message._MessageType.ValueType # 5
|
||||
|
||||
class MessageType(_MessageType, metaclass=_MessageTypeEnumTypeWrapper): ...
|
||||
PUT_VALUE: Message.MessageType.ValueType # 0
|
||||
GET_VALUE: Message.MessageType.ValueType # 1
|
||||
ADD_PROVIDER: Message.MessageType.ValueType # 2
|
||||
GET_PROVIDERS: Message.MessageType.ValueType # 3
|
||||
FIND_NODE: Message.MessageType.ValueType # 4
|
||||
PING: Message.MessageType.ValueType # 5
|
||||
|
||||
class _ConnectionType:
|
||||
ValueType = typing.NewType("ValueType", builtins.int)
|
||||
V: typing_extensions.TypeAlias = ValueType
|
||||
|
||||
class _ConnectionTypeEnumTypeWrapper(google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[Message._ConnectionType.ValueType], builtins.type):
|
||||
DESCRIPTOR: google.protobuf.descriptor.EnumDescriptor
|
||||
NOT_CONNECTED: Message._ConnectionType.ValueType # 0
|
||||
CONNECTED: Message._ConnectionType.ValueType # 1
|
||||
CAN_CONNECT: Message._ConnectionType.ValueType # 2
|
||||
CANNOT_CONNECT: Message._ConnectionType.ValueType # 3
|
||||
|
||||
class ConnectionType(_ConnectionType, metaclass=_ConnectionTypeEnumTypeWrapper): ...
|
||||
NOT_CONNECTED: Message.ConnectionType.ValueType # 0
|
||||
CONNECTED: Message.ConnectionType.ValueType # 1
|
||||
CAN_CONNECT: Message.ConnectionType.ValueType # 2
|
||||
CANNOT_CONNECT: Message.ConnectionType.ValueType # 3
|
||||
|
||||
@typing.final
|
||||
class Peer(google.protobuf.message.Message):
|
||||
DESCRIPTOR: google.protobuf.descriptor.Descriptor
|
||||
|
||||
ID_FIELD_NUMBER: builtins.int
|
||||
ADDRS_FIELD_NUMBER: builtins.int
|
||||
CONNECTION_FIELD_NUMBER: builtins.int
|
||||
id: builtins.bytes
|
||||
connection: global___Message.ConnectionType.ValueType
|
||||
@property
|
||||
def addrs(self) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[builtins.bytes]: ...
|
||||
def __init__(
|
||||
self,
|
||||
*,
|
||||
id: builtins.bytes = ...,
|
||||
addrs: collections.abc.Iterable[builtins.bytes] | None = ...,
|
||||
connection: global___Message.ConnectionType.ValueType = ...,
|
||||
) -> None: ...
|
||||
def ClearField(self, field_name: typing.Literal["addrs", b"addrs", "connection", b"connection", "id", b"id"]) -> None: ...
|
||||
|
||||
TYPE_FIELD_NUMBER: builtins.int
|
||||
CLUSTERLEVELRAW_FIELD_NUMBER: builtins.int
|
||||
KEY_FIELD_NUMBER: builtins.int
|
||||
RECORD_FIELD_NUMBER: builtins.int
|
||||
CLOSERPEERS_FIELD_NUMBER: builtins.int
|
||||
PROVIDERPEERS_FIELD_NUMBER: builtins.int
|
||||
type: global___Message.MessageType.ValueType
|
||||
clusterLevelRaw: builtins.int
|
||||
key: builtins.bytes
|
||||
@property
|
||||
def record(self) -> global___Record: ...
|
||||
@property
|
||||
def closerPeers(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___Message.Peer]: ...
|
||||
@property
|
||||
def providerPeers(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___Message.Peer]: ...
|
||||
def __init__(
|
||||
self,
|
||||
*,
|
||||
type: global___Message.MessageType.ValueType = ...,
|
||||
clusterLevelRaw: builtins.int = ...,
|
||||
key: builtins.bytes = ...,
|
||||
record: global___Record | None = ...,
|
||||
closerPeers: collections.abc.Iterable[global___Message.Peer] | None = ...,
|
||||
providerPeers: collections.abc.Iterable[global___Message.Peer] | None = ...,
|
||||
) -> None: ...
|
||||
def HasField(self, field_name: typing.Literal["record", b"record"]) -> builtins.bool: ...
|
||||
def ClearField(self, field_name: typing.Literal["closerPeers", b"closerPeers", "clusterLevelRaw", b"clusterLevelRaw", "key", b"key", "providerPeers", b"providerPeers", "record", b"record", "type", b"type"]) -> None: ...
|
||||
|
||||
global___Message = Message
|
||||
418
libp2p/kad_dht/peer_routing.py
Normal file
418
libp2p/kad_dht/peer_routing.py
Normal file
@ -0,0 +1,418 @@
|
||||
"""
|
||||
Peer routing implementation for Kademlia DHT.
|
||||
|
||||
This module implements the peer routing interface using Kademlia's algorithm
|
||||
to efficiently locate peers in a distributed network.
|
||||
"""
|
||||
|
||||
import logging
|
||||
|
||||
import trio
|
||||
import varint
|
||||
|
||||
from libp2p.abc import (
|
||||
IHost,
|
||||
INetStream,
|
||||
IPeerRouting,
|
||||
)
|
||||
from libp2p.custom_types import (
|
||||
TProtocol,
|
||||
)
|
||||
from libp2p.peer.id import (
|
||||
ID,
|
||||
)
|
||||
from libp2p.peer.peerinfo import (
|
||||
PeerInfo,
|
||||
)
|
||||
|
||||
from .pb.kademlia_pb2 import (
|
||||
Message,
|
||||
)
|
||||
from .routing_table import (
|
||||
RoutingTable,
|
||||
)
|
||||
from .utils import (
|
||||
sort_peer_ids_by_distance,
|
||||
)
|
||||
|
||||
# logger = logging.getLogger("libp2p.kademlia.peer_routing")
|
||||
logger = logging.getLogger("kademlia-example.peer_routing")
|
||||
|
||||
# Constants for the Kademlia algorithm
|
||||
ALPHA = 3 # Concurrency parameter
|
||||
MAX_PEER_LOOKUP_ROUNDS = 20 # Maximum number of rounds in peer lookup
|
||||
PROTOCOL_ID = TProtocol("/ipfs/kad/1.0.0")
|
||||
|
||||
|
||||
class PeerRouting(IPeerRouting):
|
||||
"""
|
||||
Implementation of peer routing using the Kademlia algorithm.
|
||||
|
||||
This class provides methods to find peers in the DHT network
|
||||
and helps maintain the routing table.
|
||||
"""
|
||||
|
||||
def __init__(self, host: IHost, routing_table: RoutingTable):
|
||||
"""
|
||||
Initialize the peer routing service.
|
||||
|
||||
:param host: The libp2p host
|
||||
:param routing_table: The Kademlia routing table
|
||||
|
||||
"""
|
||||
self.host = host
|
||||
self.routing_table = routing_table
|
||||
self.protocol_id = PROTOCOL_ID
|
||||
|
||||
async def find_peer(self, peer_id: ID) -> PeerInfo | None:
|
||||
"""
|
||||
Find a peer with the given ID.
|
||||
|
||||
:param peer_id: The ID of the peer to find
|
||||
|
||||
Returns
|
||||
-------
|
||||
Optional[PeerInfo]
|
||||
The peer information if found, None otherwise
|
||||
|
||||
"""
|
||||
# Check if this is actually our peer ID
|
||||
if peer_id == self.host.get_id():
|
||||
try:
|
||||
# Return our own peer info
|
||||
return PeerInfo(peer_id, self.host.get_addrs())
|
||||
except Exception:
|
||||
logger.exception("Error getting our own peer info")
|
||||
return None
|
||||
|
||||
# First check if the peer is in our routing table
|
||||
peer_info = self.routing_table.get_peer_info(peer_id)
|
||||
if peer_info:
|
||||
logger.debug(f"Found peer {peer_id} in routing table")
|
||||
return peer_info
|
||||
|
||||
# Then check if the peer is in our peerstore
|
||||
try:
|
||||
addrs = self.host.get_peerstore().addrs(peer_id)
|
||||
if addrs:
|
||||
logger.debug(f"Found peer {peer_id} in peerstore")
|
||||
return PeerInfo(peer_id, addrs)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# If not found locally, search the network
|
||||
try:
|
||||
closest_peers = await self.find_closest_peers_network(peer_id.to_bytes())
|
||||
logger.info(f"Closest peers found: {closest_peers}")
|
||||
|
||||
# Check if we found the peer we're looking for
|
||||
for found_peer in closest_peers:
|
||||
if found_peer == peer_id:
|
||||
try:
|
||||
addrs = self.host.get_peerstore().addrs(found_peer)
|
||||
if addrs:
|
||||
return PeerInfo(found_peer, addrs)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error searching for peer {peer_id}: {e}")
|
||||
|
||||
# Not found
|
||||
logger.info(f"Peer {peer_id} not found")
|
||||
return None
|
||||
|
||||
async def _query_single_peer_for_closest(
|
||||
self, peer: ID, target_key: bytes, new_peers: list[ID]
|
||||
) -> None:
|
||||
"""
|
||||
Query a single peer for closest peers and append results to the shared list.
|
||||
|
||||
params: peer : ID
|
||||
The peer to query
|
||||
params: target_key : bytes
|
||||
The target key to find closest peers for
|
||||
params: new_peers : list[ID]
|
||||
Shared list to append results to
|
||||
|
||||
"""
|
||||
try:
|
||||
result = await self._query_peer_for_closest(peer, target_key)
|
||||
# Add deduplication to prevent duplicate peers
|
||||
for peer_id in result:
|
||||
if peer_id not in new_peers:
|
||||
new_peers.append(peer_id)
|
||||
logger.debug(
|
||||
"Queried peer %s for closest peers, got %d results (%d unique)",
|
||||
peer,
|
||||
len(result),
|
||||
len([p for p in result if p not in new_peers[: -len(result)]]),
|
||||
)
|
||||
except Exception as e:
|
||||
logger.debug(f"Query to peer {peer} failed: {e}")
|
||||
|
||||
async def find_closest_peers_network(
|
||||
self, target_key: bytes, count: int = 20
|
||||
) -> list[ID]:
|
||||
"""
|
||||
Find the closest peers to a target key in the entire network.
|
||||
|
||||
Performs an iterative lookup by querying peers for their closest peers.
|
||||
|
||||
Returns
|
||||
-------
|
||||
list[ID]
|
||||
Closest peer IDs
|
||||
|
||||
"""
|
||||
# Start with closest peers from our routing table
|
||||
closest_peers = self.routing_table.find_local_closest_peers(target_key, count)
|
||||
logger.debug("Local closest peers: %d found", len(closest_peers))
|
||||
queried_peers: set[ID] = set()
|
||||
rounds = 0
|
||||
|
||||
# Return early if we have no peers to start with
|
||||
if not closest_peers:
|
||||
logger.warning("No local peers available for network lookup")
|
||||
return []
|
||||
|
||||
# Iterative lookup until convergence
|
||||
while rounds < MAX_PEER_LOOKUP_ROUNDS:
|
||||
rounds += 1
|
||||
logger.debug(f"Lookup round {rounds}/{MAX_PEER_LOOKUP_ROUNDS}")
|
||||
|
||||
# Find peers we haven't queried yet
|
||||
peers_to_query = [p for p in closest_peers if p not in queried_peers]
|
||||
if not peers_to_query:
|
||||
logger.debug("No more unqueried peers available, ending lookup")
|
||||
break # No more peers to query
|
||||
|
||||
# Query these peers for their closest peers to target
|
||||
peers_batch = peers_to_query[:ALPHA] # Limit to ALPHA peers at a time
|
||||
|
||||
# Mark these peers as queried before we actually query them
|
||||
for peer in peers_batch:
|
||||
queried_peers.add(peer)
|
||||
|
||||
# Run queries in parallel for this batch using trio nursery
|
||||
new_peers: list[ID] = [] # Shared array to collect all results
|
||||
|
||||
async with trio.open_nursery() as nursery:
|
||||
for peer in peers_batch:
|
||||
nursery.start_soon(
|
||||
self._query_single_peer_for_closest, peer, target_key, new_peers
|
||||
)
|
||||
|
||||
# If we got no new peers, we're done
|
||||
if not new_peers:
|
||||
logger.debug("No new peers discovered in this round, ending lookup")
|
||||
break
|
||||
|
||||
# Update our list of closest peers
|
||||
all_candidates = closest_peers + new_peers
|
||||
old_closest_peers = closest_peers[:]
|
||||
closest_peers = sort_peer_ids_by_distance(target_key, all_candidates)[
|
||||
:count
|
||||
]
|
||||
logger.debug(f"Updated closest peers count: {len(closest_peers)}")
|
||||
|
||||
# Check if we made any progress (found closer peers)
|
||||
if closest_peers == old_closest_peers:
|
||||
logger.debug("No improvement in closest peers, ending lookup")
|
||||
break
|
||||
|
||||
logger.info(
|
||||
f"Network lookup completed after {rounds} rounds, "
|
||||
f"found {len(closest_peers)} peers"
|
||||
)
|
||||
return closest_peers
|
||||
|
||||
async def _query_peer_for_closest(self, peer: ID, target_key: bytes) -> list[ID]:
|
||||
"""
|
||||
Query a peer for their closest peers
|
||||
to the target key using varint length prefix
|
||||
"""
|
||||
stream = None
|
||||
results = []
|
||||
try:
|
||||
# Add the peer to our routing table regardless of query outcome
|
||||
try:
|
||||
addrs = self.host.get_peerstore().addrs(peer)
|
||||
if addrs:
|
||||
peer_info = PeerInfo(peer, addrs)
|
||||
await self.routing_table.add_peer(peer_info)
|
||||
except Exception as e:
|
||||
logger.debug(f"Failed to add peer {peer} to routing table: {e}")
|
||||
|
||||
# Open a stream to the peer using the Kademlia protocol
|
||||
logger.debug(f"Opening stream to {peer} for closest peers query")
|
||||
try:
|
||||
stream = await self.host.new_stream(peer, [self.protocol_id])
|
||||
logger.debug(f"Stream opened to {peer}")
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to open stream to {peer}: {e}")
|
||||
return []
|
||||
|
||||
# Create and send FIND_NODE request using protobuf
|
||||
find_node_msg = Message()
|
||||
find_node_msg.type = Message.MessageType.FIND_NODE
|
||||
find_node_msg.key = target_key # Set target key directly as bytes
|
||||
|
||||
# Serialize and send the protobuf message with varint length prefix
|
||||
proto_bytes = find_node_msg.SerializeToString()
|
||||
logger.debug(
|
||||
f"Sending FIND_NODE: {proto_bytes.hex()} (len={len(proto_bytes)})"
|
||||
)
|
||||
await stream.write(varint.encode(len(proto_bytes)))
|
||||
await stream.write(proto_bytes)
|
||||
|
||||
# Read varint-prefixed response length
|
||||
length_bytes = b""
|
||||
while True:
|
||||
b = await stream.read(1)
|
||||
if not b:
|
||||
logger.warning(
|
||||
"Error reading varint length from stream: connection closed"
|
||||
)
|
||||
return []
|
||||
length_bytes += b
|
||||
if b[0] & 0x80 == 0:
|
||||
break
|
||||
response_length = varint.decode_bytes(length_bytes)
|
||||
|
||||
# Read response data
|
||||
response_bytes = b""
|
||||
remaining = response_length
|
||||
while remaining > 0:
|
||||
chunk = await stream.read(remaining)
|
||||
if not chunk:
|
||||
logger.debug(f"Connection closed by peer {peer} while reading data")
|
||||
return []
|
||||
response_bytes += chunk
|
||||
remaining -= len(chunk)
|
||||
|
||||
# Parse the protobuf response
|
||||
response_msg = Message()
|
||||
response_msg.ParseFromString(response_bytes)
|
||||
logger.debug(
|
||||
"Received response from %s with %d peers",
|
||||
peer,
|
||||
len(response_msg.closerPeers),
|
||||
)
|
||||
|
||||
# Process closest peers from response
|
||||
if response_msg.type == Message.MessageType.FIND_NODE:
|
||||
for peer_data in response_msg.closerPeers:
|
||||
new_peer_id = ID(peer_data.id)
|
||||
if new_peer_id not in results:
|
||||
results.append(new_peer_id)
|
||||
if peer_data.addrs:
|
||||
from multiaddr import (
|
||||
Multiaddr,
|
||||
)
|
||||
|
||||
addrs = [Multiaddr(addr) for addr in peer_data.addrs]
|
||||
self.host.get_peerstore().add_addrs(new_peer_id, addrs, 3600)
|
||||
|
||||
except Exception as e:
|
||||
logger.debug(f"Error querying peer {peer} for closest: {e}")
|
||||
|
||||
finally:
|
||||
if stream:
|
||||
await stream.close()
|
||||
return results
|
||||
|
||||
async def _handle_kad_stream(self, stream: INetStream) -> None:
|
||||
"""
|
||||
Handle incoming Kademlia protocol streams.
|
||||
|
||||
params: stream: The incoming stream
|
||||
|
||||
Returns
|
||||
-------
|
||||
None
|
||||
|
||||
"""
|
||||
try:
|
||||
# Read message length
|
||||
length_bytes = await stream.read(4)
|
||||
if not length_bytes:
|
||||
return
|
||||
|
||||
message_length = int.from_bytes(length_bytes, byteorder="big")
|
||||
|
||||
# Read message
|
||||
message_bytes = await stream.read(message_length)
|
||||
if not message_bytes:
|
||||
return
|
||||
|
||||
# Parse protobuf message
|
||||
kad_message = Message()
|
||||
try:
|
||||
kad_message.ParseFromString(message_bytes)
|
||||
|
||||
if kad_message.type == Message.MessageType.FIND_NODE:
|
||||
# Get target key directly from protobuf message
|
||||
target_key = kad_message.key
|
||||
|
||||
# Find closest peers to target
|
||||
closest_peers = self.routing_table.find_local_closest_peers(
|
||||
target_key, 20
|
||||
)
|
||||
|
||||
# Create protobuf response
|
||||
response = Message()
|
||||
response.type = Message.MessageType.FIND_NODE
|
||||
|
||||
# Add peer information to response
|
||||
for peer_id in closest_peers:
|
||||
peer_proto = response.closerPeers.add()
|
||||
peer_proto.id = peer_id.to_bytes()
|
||||
peer_proto.connection = Message.ConnectionType.CAN_CONNECT
|
||||
|
||||
# Add addresses if available
|
||||
try:
|
||||
addrs = self.host.get_peerstore().addrs(peer_id)
|
||||
if addrs:
|
||||
for addr in addrs:
|
||||
peer_proto.addrs.append(addr.to_bytes())
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# Send response
|
||||
response_bytes = response.SerializeToString()
|
||||
await stream.write(len(response_bytes).to_bytes(4, byteorder="big"))
|
||||
await stream.write(response_bytes)
|
||||
|
||||
except Exception as parse_err:
|
||||
logger.error(f"Failed to parse protocol buffer message: {parse_err}")
|
||||
|
||||
except Exception as e:
|
||||
logger.debug(f"Error handling Kademlia stream: {e}")
|
||||
finally:
|
||||
await stream.close()
|
||||
|
||||
async def refresh_routing_table(self) -> None:
|
||||
"""
|
||||
Refresh the routing table by performing lookups for random keys.
|
||||
|
||||
Returns
|
||||
-------
|
||||
None
|
||||
|
||||
"""
|
||||
logger.info("Refreshing routing table")
|
||||
|
||||
# Perform a lookup for ourselves to populate the routing table
|
||||
local_id = self.host.get_id()
|
||||
closest_peers = await self.find_closest_peers_network(local_id.to_bytes())
|
||||
|
||||
# Add discovered peers to routing table
|
||||
for peer_id in closest_peers:
|
||||
try:
|
||||
addrs = self.host.get_peerstore().addrs(peer_id)
|
||||
if addrs:
|
||||
peer_info = PeerInfo(peer_id, addrs)
|
||||
await self.routing_table.add_peer(peer_info)
|
||||
except Exception as e:
|
||||
logger.debug(f"Failed to add discovered peer {peer_id}: {e}")
|
||||
575
libp2p/kad_dht/provider_store.py
Normal file
575
libp2p/kad_dht/provider_store.py
Normal file
@ -0,0 +1,575 @@
|
||||
"""
|
||||
Provider record storage for Kademlia DHT.
|
||||
|
||||
This module implements the storage for content provider records in the Kademlia DHT.
|
||||
"""
|
||||
|
||||
import logging
|
||||
import time
|
||||
from typing import (
|
||||
Any,
|
||||
)
|
||||
|
||||
from multiaddr import (
|
||||
Multiaddr,
|
||||
)
|
||||
import trio
|
||||
import varint
|
||||
|
||||
from libp2p.abc import (
|
||||
IHost,
|
||||
)
|
||||
from libp2p.custom_types import (
|
||||
TProtocol,
|
||||
)
|
||||
from libp2p.peer.id import (
|
||||
ID,
|
||||
)
|
||||
from libp2p.peer.peerinfo import (
|
||||
PeerInfo,
|
||||
)
|
||||
|
||||
from .pb.kademlia_pb2 import (
|
||||
Message,
|
||||
)
|
||||
|
||||
# logger = logging.getLogger("libp2p.kademlia.provider_store")
|
||||
logger = logging.getLogger("kademlia-example.provider_store")
|
||||
|
||||
# Constants for provider records (based on IPFS standards)
|
||||
PROVIDER_RECORD_REPUBLISH_INTERVAL = 22 * 60 * 60 # 22 hours in seconds
|
||||
PROVIDER_RECORD_EXPIRATION_INTERVAL = 48 * 60 * 60 # 48 hours in seconds
|
||||
PROVIDER_ADDRESS_TTL = 30 * 60 # 30 minutes in seconds
|
||||
PROTOCOL_ID = TProtocol("/ipfs/kad/1.0.0")
|
||||
ALPHA = 3 # Number of parallel queries/advertisements
|
||||
QUERY_TIMEOUT = 10 # Timeout for each query in seconds
|
||||
|
||||
|
||||
class ProviderRecord:
|
||||
"""
|
||||
A record for a content provider in the DHT.
|
||||
|
||||
Contains the peer information and timestamp.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
provider_info: PeerInfo,
|
||||
timestamp: float | None = None,
|
||||
) -> None:
|
||||
"""
|
||||
Initialize a new provider record.
|
||||
|
||||
:param provider_info: The provider's peer information
|
||||
:param timestamp: Time this record was created/updated
|
||||
(defaults to current time)
|
||||
|
||||
"""
|
||||
self.provider_info = provider_info
|
||||
self.timestamp = timestamp or time.time()
|
||||
|
||||
def is_expired(self) -> bool:
|
||||
"""
|
||||
Check if this provider record has expired.
|
||||
|
||||
Returns
|
||||
-------
|
||||
bool
|
||||
True if the record has expired
|
||||
|
||||
"""
|
||||
current_time = time.time()
|
||||
return (current_time - self.timestamp) >= PROVIDER_RECORD_EXPIRATION_INTERVAL
|
||||
|
||||
def should_republish(self) -> bool:
|
||||
"""
|
||||
Check if this provider record should be republished.
|
||||
|
||||
Returns
|
||||
-------
|
||||
bool
|
||||
True if the record should be republished
|
||||
|
||||
"""
|
||||
current_time = time.time()
|
||||
return (current_time - self.timestamp) >= PROVIDER_RECORD_REPUBLISH_INTERVAL
|
||||
|
||||
@property
|
||||
def peer_id(self) -> ID:
|
||||
"""Get the provider's peer ID."""
|
||||
return self.provider_info.peer_id
|
||||
|
||||
@property
|
||||
def addresses(self) -> list[Multiaddr]:
|
||||
"""Get the provider's addresses."""
|
||||
return self.provider_info.addrs
|
||||
|
||||
|
||||
class ProviderStore:
|
||||
"""
|
||||
Store for content provider records in the Kademlia DHT.
|
||||
|
||||
Maps content keys to provider records, with support for expiration.
|
||||
"""
|
||||
|
||||
def __init__(self, host: IHost, peer_routing: Any = None) -> None:
|
||||
"""
|
||||
Initialize a new provider store.
|
||||
|
||||
:param host: The libp2p host instance (optional)
|
||||
:param peer_routing: The peer routing instance (optional)
|
||||
"""
|
||||
# Maps content keys to a dict of provider records (peer_id -> record)
|
||||
self.providers: dict[bytes, dict[str, ProviderRecord]] = {}
|
||||
self.host = host
|
||||
self.peer_routing = peer_routing
|
||||
self.providing_keys: set[bytes] = set()
|
||||
self.local_peer_id = host.get_id()
|
||||
|
||||
async def _republish_provider_records(self) -> None:
|
||||
"""Republish all provider records for content this node is providing."""
|
||||
# First, republish keys we're actively providing
|
||||
for key in self.providing_keys:
|
||||
logger.debug(f"Republishing provider record for key {key.hex()}")
|
||||
await self.provide(key)
|
||||
|
||||
# Also check for any records that should be republished
|
||||
time.time()
|
||||
for key, providers in self.providers.items():
|
||||
for peer_id_str, record in providers.items():
|
||||
# Only republish records for our own peer
|
||||
if self.local_peer_id and str(self.local_peer_id) == peer_id_str:
|
||||
if record.should_republish():
|
||||
logger.debug(
|
||||
f"Republishing old provider record for key {key.hex()}"
|
||||
)
|
||||
await self.provide(key)
|
||||
|
||||
async def provide(self, key: bytes) -> bool:
|
||||
"""
|
||||
Advertise that this node can provide a piece of content.
|
||||
|
||||
Finds the k closest peers to the key and sends them ADD_PROVIDER messages.
|
||||
|
||||
:param key: The content key (multihash) to advertise
|
||||
|
||||
Returns
|
||||
-------
|
||||
bool
|
||||
True if the advertisement was successful
|
||||
|
||||
"""
|
||||
if not self.host or not self.peer_routing:
|
||||
logger.error("Host or peer_routing not initialized, cannot provide content")
|
||||
return False
|
||||
|
||||
# Add to local provider store
|
||||
local_addrs = []
|
||||
for addr in self.host.get_addrs():
|
||||
local_addrs.append(addr)
|
||||
|
||||
local_peer_info = PeerInfo(self.host.get_id(), local_addrs)
|
||||
self.add_provider(key, local_peer_info)
|
||||
|
||||
# Track that we're providing this key
|
||||
self.providing_keys.add(key)
|
||||
|
||||
# Find the k closest peers to the key
|
||||
closest_peers = await self.peer_routing.find_closest_peers_network(key)
|
||||
logger.debug(
|
||||
"Found %d peers close to key %s for provider advertisement",
|
||||
len(closest_peers),
|
||||
key.hex(),
|
||||
)
|
||||
|
||||
# Send ADD_PROVIDER messages to these ALPHA peers in parallel.
|
||||
success_count = 0
|
||||
for i in range(0, len(closest_peers), ALPHA):
|
||||
batch = closest_peers[i : i + ALPHA]
|
||||
results: list[bool] = [False] * len(batch)
|
||||
|
||||
async def send_one(
|
||||
idx: int, peer_id: ID, results: list[bool] = results
|
||||
) -> None:
|
||||
if peer_id == self.local_peer_id:
|
||||
return
|
||||
try:
|
||||
with trio.move_on_after(QUERY_TIMEOUT):
|
||||
success = await self._send_add_provider(peer_id, key)
|
||||
results[idx] = success
|
||||
if not success:
|
||||
logger.warning(f"Failed to send ADD_PROVIDER to {peer_id}")
|
||||
except Exception as e:
|
||||
logger.warning(f"Error sending ADD_PROVIDER to {peer_id}: {e}")
|
||||
|
||||
async with trio.open_nursery() as nursery:
|
||||
for idx, peer_id in enumerate(batch):
|
||||
nursery.start_soon(send_one, idx, peer_id, results)
|
||||
success_count += sum(results)
|
||||
|
||||
logger.info(f"Successfully advertised to {success_count} peers")
|
||||
return success_count > 0
|
||||
|
||||
async def _send_add_provider(self, peer_id: ID, key: bytes) -> bool:
|
||||
"""
|
||||
Send ADD_PROVIDER message to a specific peer.
|
||||
|
||||
:param peer_id: The peer to send the message to
|
||||
:param key: The content key being provided
|
||||
|
||||
Returns
|
||||
-------
|
||||
bool
|
||||
True if the message was successfully sent and acknowledged
|
||||
|
||||
"""
|
||||
try:
|
||||
result = False
|
||||
# Open a stream to the peer
|
||||
stream = await self.host.new_stream(peer_id, [TProtocol(PROTOCOL_ID)])
|
||||
|
||||
# Get our addresses to include in the message
|
||||
addrs = []
|
||||
for addr in self.host.get_addrs():
|
||||
addrs.append(addr.to_bytes())
|
||||
|
||||
# Create the ADD_PROVIDER message
|
||||
message = Message()
|
||||
message.type = Message.MessageType.ADD_PROVIDER
|
||||
message.key = key
|
||||
|
||||
# Add our provider info
|
||||
provider = message.providerPeers.add()
|
||||
provider.id = self.local_peer_id.to_bytes()
|
||||
provider.addrs.extend(addrs)
|
||||
|
||||
# Serialize and send the message
|
||||
proto_bytes = message.SerializeToString()
|
||||
await stream.write(varint.encode(len(proto_bytes)))
|
||||
await stream.write(proto_bytes)
|
||||
logger.debug(f"Sent ADD_PROVIDER to {peer_id} for key {key.hex()}")
|
||||
# Read response length prefix
|
||||
length_bytes = b""
|
||||
while True:
|
||||
logger.debug("Reading response length prefix in add provider")
|
||||
b = await stream.read(1)
|
||||
if not b:
|
||||
return False
|
||||
length_bytes += b
|
||||
if b[0] & 0x80 == 0:
|
||||
break
|
||||
|
||||
response_length = varint.decode_bytes(length_bytes)
|
||||
# Read response data
|
||||
response_bytes = b""
|
||||
remaining = response_length
|
||||
while remaining > 0:
|
||||
chunk = await stream.read(remaining)
|
||||
if not chunk:
|
||||
return False
|
||||
response_bytes += chunk
|
||||
remaining -= len(chunk)
|
||||
|
||||
# Parse response
|
||||
response = Message()
|
||||
response.ParseFromString(response_bytes)
|
||||
|
||||
# Check response type
|
||||
response.type == Message.MessageType.ADD_PROVIDER
|
||||
if response.type:
|
||||
result = True
|
||||
|
||||
except Exception as e:
|
||||
logger.warning(f"Error sending ADD_PROVIDER to {peer_id}: {e}")
|
||||
|
||||
finally:
|
||||
await stream.close()
|
||||
return result
|
||||
|
||||
async def find_providers(self, key: bytes, count: int = 20) -> list[PeerInfo]:
|
||||
"""
|
||||
Find content providers for a given key.
|
||||
|
||||
:param key: The content key to look for
|
||||
:param count: Maximum number of providers to return
|
||||
|
||||
Returns
|
||||
-------
|
||||
List[PeerInfo]
|
||||
List of content providers
|
||||
|
||||
"""
|
||||
if not self.host or not self.peer_routing:
|
||||
logger.error("Host or peer_routing not initialized, cannot find providers")
|
||||
return []
|
||||
|
||||
# Check local provider store first
|
||||
local_providers = self.get_providers(key)
|
||||
if local_providers:
|
||||
logger.debug(
|
||||
f"Found {len(local_providers)} providers locally for {key.hex()}"
|
||||
)
|
||||
return local_providers[:count]
|
||||
logger.debug("local providers are %s", local_providers)
|
||||
|
||||
# Find the closest peers to the key
|
||||
closest_peers = await self.peer_routing.find_closest_peers_network(key)
|
||||
logger.debug(
|
||||
f"Searching {len(closest_peers)} peers for providers of {key.hex()}"
|
||||
)
|
||||
|
||||
# Query these peers for providers in batches of ALPHA, in parallel, with timeout
|
||||
all_providers = []
|
||||
for i in range(0, len(closest_peers), ALPHA):
|
||||
batch = closest_peers[i : i + ALPHA]
|
||||
batch_results: list[list[PeerInfo]] = [[] for _ in batch]
|
||||
|
||||
async def get_one(
|
||||
idx: int,
|
||||
peer_id: ID,
|
||||
batch_results: list[list[PeerInfo]] = batch_results,
|
||||
) -> None:
|
||||
if peer_id == self.local_peer_id:
|
||||
return
|
||||
try:
|
||||
with trio.move_on_after(QUERY_TIMEOUT):
|
||||
providers = await self._get_providers_from_peer(peer_id, key)
|
||||
if providers:
|
||||
for provider in providers:
|
||||
self.add_provider(key, provider)
|
||||
batch_results[idx] = providers
|
||||
else:
|
||||
logger.debug(f"No providers found at peer {peer_id}")
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to get providers from {peer_id}: {e}")
|
||||
|
||||
async with trio.open_nursery() as nursery:
|
||||
for idx, peer_id in enumerate(batch):
|
||||
nursery.start_soon(get_one, idx, peer_id, batch_results)
|
||||
|
||||
for providers in batch_results:
|
||||
all_providers.extend(providers)
|
||||
if len(all_providers) >= count:
|
||||
return all_providers[:count]
|
||||
|
||||
return all_providers[:count]
|
||||
|
||||
async def _get_providers_from_peer(self, peer_id: ID, key: bytes) -> list[PeerInfo]:
|
||||
"""
|
||||
Get content providers from a specific peer.
|
||||
|
||||
:param peer_id: The peer to query
|
||||
:param key: The content key to look for
|
||||
|
||||
Returns
|
||||
-------
|
||||
List[PeerInfo]
|
||||
List of provider information
|
||||
|
||||
"""
|
||||
providers: list[PeerInfo] = []
|
||||
try:
|
||||
# Open a stream to the peer
|
||||
stream = await self.host.new_stream(peer_id, [TProtocol(PROTOCOL_ID)])
|
||||
|
||||
try:
|
||||
# Create the GET_PROVIDERS message
|
||||
message = Message()
|
||||
message.type = Message.MessageType.GET_PROVIDERS
|
||||
message.key = key
|
||||
|
||||
# Serialize and send the message
|
||||
proto_bytes = message.SerializeToString()
|
||||
await stream.write(varint.encode(len(proto_bytes)))
|
||||
await stream.write(proto_bytes)
|
||||
|
||||
# Read response length prefix
|
||||
length_bytes = b""
|
||||
while True:
|
||||
b = await stream.read(1)
|
||||
if not b:
|
||||
return []
|
||||
length_bytes += b
|
||||
if b[0] & 0x80 == 0:
|
||||
break
|
||||
|
||||
response_length = varint.decode_bytes(length_bytes)
|
||||
# Read response data
|
||||
response_bytes = b""
|
||||
remaining = response_length
|
||||
while remaining > 0:
|
||||
chunk = await stream.read(remaining)
|
||||
if not chunk:
|
||||
return []
|
||||
response_bytes += chunk
|
||||
remaining -= len(chunk)
|
||||
|
||||
# Parse response
|
||||
response = Message()
|
||||
response.ParseFromString(response_bytes)
|
||||
|
||||
# Check response type
|
||||
if response.type != Message.MessageType.GET_PROVIDERS:
|
||||
return []
|
||||
|
||||
# Extract provider information
|
||||
providers = []
|
||||
for provider_proto in response.providerPeers:
|
||||
try:
|
||||
# Create peer ID from bytes
|
||||
provider_id = ID(provider_proto.id)
|
||||
|
||||
# Convert addresses to Multiaddr
|
||||
addrs = []
|
||||
for addr_bytes in provider_proto.addrs:
|
||||
try:
|
||||
addrs.append(Multiaddr(addr_bytes))
|
||||
except Exception:
|
||||
pass # Skip invalid addresses
|
||||
|
||||
# Create PeerInfo and add to result
|
||||
providers.append(PeerInfo(provider_id, addrs))
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to parse provider info: {e}")
|
||||
|
||||
finally:
|
||||
await stream.close()
|
||||
return providers
|
||||
|
||||
except Exception as e:
|
||||
logger.warning(f"Error getting providers from {peer_id}: {e}")
|
||||
return []
|
||||
|
||||
def add_provider(self, key: bytes, provider: PeerInfo) -> None:
|
||||
"""
|
||||
Add a provider for a given content key.
|
||||
|
||||
:param key: The content key
|
||||
:param provider: The provider's peer information
|
||||
|
||||
Returns
|
||||
-------
|
||||
None
|
||||
|
||||
"""
|
||||
# Initialize providers for this key if needed
|
||||
if key not in self.providers:
|
||||
self.providers[key] = {}
|
||||
|
||||
# Add or update the provider record
|
||||
peer_id_str = str(provider.peer_id) # Use string representation as dict key
|
||||
self.providers[key][peer_id_str] = ProviderRecord(
|
||||
provider_info=provider, timestamp=time.time()
|
||||
)
|
||||
logger.debug(f"Added provider {provider.peer_id} for key {key.hex()}")
|
||||
|
||||
def get_providers(self, key: bytes) -> list[PeerInfo]:
|
||||
"""
|
||||
Get all providers for a given content key.
|
||||
|
||||
:param key: The content key
|
||||
|
||||
Returns
|
||||
-------
|
||||
List[PeerInfo]
|
||||
List of providers for the key
|
||||
|
||||
"""
|
||||
if key not in self.providers:
|
||||
return []
|
||||
|
||||
# Collect valid provider records (not expired)
|
||||
result = []
|
||||
current_time = time.time()
|
||||
expired_peers = []
|
||||
|
||||
for peer_id_str, record in self.providers[key].items():
|
||||
# Check if the record has expired
|
||||
if current_time - record.timestamp > PROVIDER_RECORD_EXPIRATION_INTERVAL:
|
||||
expired_peers.append(peer_id_str)
|
||||
continue
|
||||
|
||||
# Use addresses only if they haven't expired
|
||||
addresses = []
|
||||
if current_time - record.timestamp <= PROVIDER_ADDRESS_TTL:
|
||||
addresses = record.addresses
|
||||
|
||||
# Create PeerInfo and add to results
|
||||
result.append(PeerInfo(record.peer_id, addresses))
|
||||
|
||||
# Clean up expired records
|
||||
for peer_id in expired_peers:
|
||||
del self.providers[key][peer_id]
|
||||
|
||||
# Remove the key if no providers left
|
||||
if not self.providers[key]:
|
||||
del self.providers[key]
|
||||
|
||||
return result
|
||||
|
||||
def cleanup_expired(self) -> None:
|
||||
"""Remove expired provider records."""
|
||||
current_time = time.time()
|
||||
expired_keys = []
|
||||
|
||||
for key, providers in self.providers.items():
|
||||
expired_providers = []
|
||||
|
||||
for peer_id_str, record in providers.items():
|
||||
if (
|
||||
current_time - record.timestamp
|
||||
> PROVIDER_RECORD_EXPIRATION_INTERVAL
|
||||
):
|
||||
expired_providers.append(peer_id_str)
|
||||
logger.debug(
|
||||
f"Removing expired provider {peer_id_str} for key {key.hex()}"
|
||||
)
|
||||
|
||||
# Remove expired providers
|
||||
for peer_id in expired_providers:
|
||||
del providers[peer_id]
|
||||
|
||||
# Track empty keys for removal
|
||||
if not providers:
|
||||
expired_keys.append(key)
|
||||
|
||||
# Remove empty keys
|
||||
for key in expired_keys:
|
||||
del self.providers[key]
|
||||
logger.debug(f"Removed key with no providers: {key.hex()}")
|
||||
|
||||
def get_provided_keys(self, peer_id: ID) -> list[bytes]:
|
||||
"""
|
||||
Get all content keys provided by a specific peer.
|
||||
|
||||
:param peer_id: The peer ID to look for
|
||||
|
||||
Returns
|
||||
-------
|
||||
List[bytes]
|
||||
List of content keys provided by the peer
|
||||
|
||||
"""
|
||||
peer_id_str = str(peer_id)
|
||||
result = []
|
||||
|
||||
for key, providers in self.providers.items():
|
||||
if peer_id_str in providers:
|
||||
result.append(key)
|
||||
|
||||
return result
|
||||
|
||||
def size(self) -> int:
|
||||
"""
|
||||
Get the total number of provider records in the store.
|
||||
|
||||
Returns
|
||||
-------
|
||||
int
|
||||
Total number of provider records across all keys
|
||||
|
||||
"""
|
||||
total = 0
|
||||
for providers in self.providers.values():
|
||||
total += len(providers)
|
||||
return total
|
||||
601
libp2p/kad_dht/routing_table.py
Normal file
601
libp2p/kad_dht/routing_table.py
Normal file
@ -0,0 +1,601 @@
|
||||
"""
|
||||
Kademlia DHT routing table implementation.
|
||||
"""
|
||||
|
||||
from collections import (
|
||||
OrderedDict,
|
||||
)
|
||||
import logging
|
||||
import time
|
||||
|
||||
import trio
|
||||
|
||||
from libp2p.abc import (
|
||||
IHost,
|
||||
)
|
||||
from libp2p.custom_types import (
|
||||
TProtocol,
|
||||
)
|
||||
from libp2p.kad_dht.utils import xor_distance
|
||||
from libp2p.peer.id import (
|
||||
ID,
|
||||
)
|
||||
from libp2p.peer.peerinfo import (
|
||||
PeerInfo,
|
||||
)
|
||||
|
||||
from .pb.kademlia_pb2 import (
|
||||
Message,
|
||||
)
|
||||
|
||||
# logger = logging.getLogger("libp2p.kademlia.routing_table")
|
||||
logger = logging.getLogger("kademlia-example.routing_table")
|
||||
|
||||
# Default parameters
|
||||
BUCKET_SIZE = 20 # k in the Kademlia paper
|
||||
MAXIMUM_BUCKETS = 256 # Maximum number of buckets (for 256-bit keys)
|
||||
PEER_REFRESH_INTERVAL = 60 # Interval to refresh peers in seconds
|
||||
STALE_PEER_THRESHOLD = 3600 # Time in seconds after which a peer is considered stale
|
||||
|
||||
|
||||
class KBucket:
|
||||
"""
|
||||
A k-bucket implementation for the Kademlia DHT.
|
||||
|
||||
Each k-bucket stores up to k (BUCKET_SIZE) peers, sorted by least-recently seen.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
host: IHost,
|
||||
bucket_size: int = BUCKET_SIZE,
|
||||
min_range: int = 0,
|
||||
max_range: int = 2**256,
|
||||
):
|
||||
"""
|
||||
Initialize a new k-bucket.
|
||||
|
||||
:param host: The host this bucket belongs to
|
||||
:param bucket_size: Maximum number of peers to store in the bucket
|
||||
:param min_range: Lower boundary of the bucket's key range (inclusive)
|
||||
:param max_range: Upper boundary of the bucket's key range (exclusive)
|
||||
|
||||
"""
|
||||
self.bucket_size = bucket_size
|
||||
self.host = host
|
||||
self.min_range = min_range
|
||||
self.max_range = max_range
|
||||
# Store PeerInfo objects along with last-seen timestamp
|
||||
self.peers: OrderedDict[ID, tuple[PeerInfo, float]] = OrderedDict()
|
||||
|
||||
def peer_ids(self) -> list[ID]:
|
||||
"""Get all peer IDs in the bucket."""
|
||||
return list(self.peers.keys())
|
||||
|
||||
def peer_infos(self) -> list[PeerInfo]:
|
||||
"""Get all PeerInfo objects in the bucket."""
|
||||
return [info for info, _ in self.peers.values()]
|
||||
|
||||
def get_oldest_peer(self) -> ID | None:
|
||||
"""Get the least-recently seen peer."""
|
||||
if not self.peers:
|
||||
return None
|
||||
return next(iter(self.peers.keys()))
|
||||
|
||||
async def add_peer(self, peer_info: PeerInfo) -> bool:
|
||||
"""
|
||||
Add a peer to the bucket. Returns True if the peer was added or updated,
|
||||
False if the bucket is full.
|
||||
"""
|
||||
current_time = time.time()
|
||||
peer_id = peer_info.peer_id
|
||||
|
||||
# If peer is already in the bucket, move it to the end (most recently seen)
|
||||
if peer_id in self.peers:
|
||||
self.refresh_peer_last_seen(peer_id)
|
||||
return True
|
||||
|
||||
# If bucket has space, add the peer
|
||||
if len(self.peers) < self.bucket_size:
|
||||
self.peers[peer_id] = (peer_info, current_time)
|
||||
return True
|
||||
|
||||
# If bucket is full, we need to replace the least-recently seen peer
|
||||
# Get the least-recently seen peer
|
||||
oldest_peer_id = self.get_oldest_peer()
|
||||
if oldest_peer_id is None:
|
||||
logger.warning("No oldest peer found when bucket is full")
|
||||
return False
|
||||
|
||||
# Check if the old peer is responsive to ping request
|
||||
try:
|
||||
# Try to ping the oldest peer, not the new peer
|
||||
response = await self._ping_peer(oldest_peer_id)
|
||||
if response:
|
||||
# If the old peer is still alive, we will not add the new peer
|
||||
logger.debug(
|
||||
"Old peer %s is still alive, cannot add new peer %s",
|
||||
oldest_peer_id,
|
||||
peer_id,
|
||||
)
|
||||
return False
|
||||
except Exception as e:
|
||||
# If the old peer is unresponsive, we can replace it with the new peer
|
||||
logger.debug(
|
||||
"Old peer %s is unresponsive, replacing with new peer %s: %s",
|
||||
oldest_peer_id,
|
||||
peer_id,
|
||||
str(e),
|
||||
)
|
||||
self.peers.popitem(last=False) # Remove oldest peer
|
||||
self.peers[peer_id] = (peer_info, current_time)
|
||||
return True
|
||||
|
||||
# If we got here, the oldest peer responded but we couldn't add the new peer
|
||||
return False
|
||||
|
||||
def remove_peer(self, peer_id: ID) -> bool:
|
||||
"""
|
||||
Remove a peer from the bucket.
|
||||
Returns True if the peer was in the bucket, False otherwise.
|
||||
"""
|
||||
if peer_id in self.peers:
|
||||
del self.peers[peer_id]
|
||||
return True
|
||||
return False
|
||||
|
||||
def has_peer(self, peer_id: ID) -> bool:
|
||||
"""Check if the peer is in the bucket."""
|
||||
return peer_id in self.peers
|
||||
|
||||
def get_peer_info(self, peer_id: ID) -> PeerInfo | None:
|
||||
"""Get the PeerInfo for a given peer ID if it exists in the bucket."""
|
||||
if peer_id in self.peers:
|
||||
return self.peers[peer_id][0]
|
||||
return None
|
||||
|
||||
def size(self) -> int:
|
||||
"""Get the number of peers in the bucket."""
|
||||
return len(self.peers)
|
||||
|
||||
def get_stale_peers(self, stale_threshold_seconds: int = 3600) -> list[ID]:
|
||||
"""
|
||||
Get peers that haven't been pinged recently.
|
||||
|
||||
params: stale_threshold_seconds: Time in seconds
|
||||
params: after which a peer is considered stale
|
||||
|
||||
Returns
|
||||
-------
|
||||
list[ID]
|
||||
List of peer IDs that need to be refreshed
|
||||
|
||||
"""
|
||||
current_time = time.time()
|
||||
stale_peers = []
|
||||
|
||||
for peer_id, (_, last_seen) in self.peers.items():
|
||||
if current_time - last_seen > stale_threshold_seconds:
|
||||
stale_peers.append(peer_id)
|
||||
|
||||
return stale_peers
|
||||
|
||||
async def _periodic_peer_refresh(self) -> None:
|
||||
"""Background task to periodically refresh peers"""
|
||||
try:
|
||||
while True:
|
||||
await trio.sleep(PEER_REFRESH_INTERVAL) # Check every minute
|
||||
|
||||
# Find stale peers (not pinged in last hour)
|
||||
stale_peers = self.get_stale_peers(
|
||||
stale_threshold_seconds=STALE_PEER_THRESHOLD
|
||||
)
|
||||
if stale_peers:
|
||||
logger.debug(f"Found {len(stale_peers)} stale peers to refresh")
|
||||
|
||||
for peer_id in stale_peers:
|
||||
try:
|
||||
# Try to ping the peer
|
||||
logger.debug("Pinging stale peer %s", peer_id)
|
||||
responce = await self._ping_peer(peer_id)
|
||||
if responce:
|
||||
# Update the last seen time
|
||||
self.refresh_peer_last_seen(peer_id)
|
||||
logger.debug(f"Refreshed peer {peer_id}")
|
||||
else:
|
||||
# If ping fails, remove the peer
|
||||
logger.debug(f"Failed to ping peer {peer_id}")
|
||||
self.remove_peer(peer_id)
|
||||
logger.info(f"Removed unresponsive peer {peer_id}")
|
||||
|
||||
logger.debug(f"Successfully refreshed peer {peer_id}")
|
||||
except Exception as e:
|
||||
# If ping fails, remove the peer
|
||||
logger.debug(
|
||||
"Failed to ping peer %s: %s",
|
||||
peer_id,
|
||||
e,
|
||||
)
|
||||
self.remove_peer(peer_id)
|
||||
logger.info(f"Removed unresponsive peer {peer_id}")
|
||||
except trio.Cancelled:
|
||||
logger.debug("Peer refresh task cancelled")
|
||||
except Exception as e:
|
||||
logger.error(f"Error in peer refresh task: {e}", exc_info=True)
|
||||
|
||||
async def _ping_peer(self, peer_id: ID) -> bool:
|
||||
"""
|
||||
Ping a peer using protobuf message to check
|
||||
if it's still alive and update last seen time.
|
||||
|
||||
params: peer_id: The ID of the peer to ping
|
||||
|
||||
Returns
|
||||
-------
|
||||
bool
|
||||
True if ping successful, False otherwise
|
||||
|
||||
"""
|
||||
result = False
|
||||
# Get peer info directly from the bucket
|
||||
peer_info = self.get_peer_info(peer_id)
|
||||
if not peer_info:
|
||||
raise ValueError(f"Peer {peer_id} not in bucket")
|
||||
|
||||
# Default protocol ID for Kademlia DHT
|
||||
protocol_id = TProtocol("/ipfs/kad/1.0.0")
|
||||
|
||||
try:
|
||||
# Open a stream to the peer with the DHT protocol
|
||||
stream = await self.host.new_stream(peer_id, [protocol_id])
|
||||
|
||||
try:
|
||||
# Create ping protobuf message
|
||||
ping_msg = Message()
|
||||
ping_msg.type = Message.PING # Use correct enum
|
||||
|
||||
# Serialize and send with length prefix (4 bytes big-endian)
|
||||
msg_bytes = ping_msg.SerializeToString()
|
||||
logger.debug(
|
||||
f"Sending PING message to {peer_id}, size: {len(msg_bytes)} bytes"
|
||||
)
|
||||
await stream.write(len(msg_bytes).to_bytes(4, byteorder="big"))
|
||||
await stream.write(msg_bytes)
|
||||
|
||||
# Wait for response with timeout
|
||||
with trio.move_on_after(2): # 2 second timeout
|
||||
# Read response length (4 bytes)
|
||||
length_bytes = await stream.read(4)
|
||||
if not length_bytes or len(length_bytes) < 4:
|
||||
logger.warning(f"Peer {peer_id} disconnected during ping")
|
||||
return False
|
||||
|
||||
msg_len = int.from_bytes(length_bytes, byteorder="big")
|
||||
if (
|
||||
msg_len <= 0 or msg_len > 1024 * 1024
|
||||
): # Sanity check on message size
|
||||
logger.warning(
|
||||
f"Invalid message length from {peer_id}: {msg_len}"
|
||||
)
|
||||
return False
|
||||
|
||||
logger.debug(
|
||||
f"Receiving response from {peer_id}, size: {msg_len} bytes"
|
||||
)
|
||||
|
||||
# Read full message
|
||||
response_bytes = await stream.read(msg_len)
|
||||
if not response_bytes:
|
||||
logger.warning(f"Failed to read response from {peer_id}")
|
||||
return False
|
||||
|
||||
# Parse protobuf response
|
||||
response = Message()
|
||||
try:
|
||||
response.ParseFromString(response_bytes)
|
||||
except Exception as e:
|
||||
logger.warning(
|
||||
f"Failed to parse protobuf response from {peer_id}: {e}"
|
||||
)
|
||||
return False
|
||||
|
||||
if response.type == Message.PING:
|
||||
# Update the last seen timestamp for this peer
|
||||
logger.debug(f"Successfully pinged peer {peer_id}")
|
||||
result = True
|
||||
return result
|
||||
|
||||
else:
|
||||
logger.warning(
|
||||
f"Unexpected response type from {peer_id}: {response.type}"
|
||||
)
|
||||
return False
|
||||
|
||||
# If we get here, the ping timed out
|
||||
logger.warning(f"Ping to peer {peer_id} timed out")
|
||||
return False
|
||||
|
||||
finally:
|
||||
await stream.close()
|
||||
return result
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error pinging peer {peer_id}: {str(e)}")
|
||||
return False
|
||||
|
||||
def refresh_peer_last_seen(self, peer_id: ID) -> bool:
|
||||
"""
|
||||
Update the last-seen timestamp for a peer in the bucket.
|
||||
|
||||
params: peer_id: The ID of the peer to refresh
|
||||
|
||||
Returns
|
||||
-------
|
||||
bool
|
||||
True if the peer was found and refreshed, False otherwise
|
||||
|
||||
"""
|
||||
if peer_id in self.peers:
|
||||
# Get current peer info and update the timestamp
|
||||
peer_info, _ = self.peers[peer_id]
|
||||
current_time = time.time()
|
||||
self.peers[peer_id] = (peer_info, current_time)
|
||||
# Move to end of ordered dict to mark as most recently seen
|
||||
self.peers.move_to_end(peer_id)
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def key_in_range(self, key: bytes) -> bool:
|
||||
"""
|
||||
Check if a key is in the range of this bucket.
|
||||
|
||||
params: key: The key to check (bytes)
|
||||
|
||||
Returns
|
||||
-------
|
||||
bool
|
||||
True if the key is in range, False otherwise
|
||||
|
||||
"""
|
||||
key_int = int.from_bytes(key, byteorder="big")
|
||||
return self.min_range <= key_int < self.max_range
|
||||
|
||||
def split(self) -> tuple["KBucket", "KBucket"]:
|
||||
"""
|
||||
Split the bucket into two buckets.
|
||||
|
||||
Returns
|
||||
-------
|
||||
tuple
|
||||
(lower_bucket, upper_bucket)
|
||||
|
||||
"""
|
||||
midpoint = (self.min_range + self.max_range) // 2
|
||||
lower_bucket = KBucket(self.host, self.bucket_size, self.min_range, midpoint)
|
||||
upper_bucket = KBucket(self.host, self.bucket_size, midpoint, self.max_range)
|
||||
|
||||
# Redistribute peers
|
||||
for peer_id, (peer_info, timestamp) in self.peers.items():
|
||||
peer_key = int.from_bytes(peer_id.to_bytes(), byteorder="big")
|
||||
if peer_key < midpoint:
|
||||
lower_bucket.peers[peer_id] = (peer_info, timestamp)
|
||||
else:
|
||||
upper_bucket.peers[peer_id] = (peer_info, timestamp)
|
||||
|
||||
return lower_bucket, upper_bucket
|
||||
|
||||
|
||||
class RoutingTable:
|
||||
"""
|
||||
The Kademlia routing table maintains information on which peers to contact for any
|
||||
given peer ID in the network.
|
||||
"""
|
||||
|
||||
def __init__(self, local_id: ID, host: IHost) -> None:
|
||||
"""
|
||||
Initialize the routing table.
|
||||
|
||||
:param local_id: The ID of the local node.
|
||||
:param host: The host this routing table belongs to.
|
||||
|
||||
"""
|
||||
self.local_id = local_id
|
||||
self.host = host
|
||||
self.buckets = [KBucket(host, BUCKET_SIZE)]
|
||||
|
||||
async def add_peer(self, peer_obj: PeerInfo | ID) -> bool:
|
||||
"""
|
||||
Add a peer to the routing table.
|
||||
|
||||
:param peer_obj: Either PeerInfo object or peer ID to add
|
||||
|
||||
Returns
|
||||
-------
|
||||
bool: True if the peer was added or updated, False otherwise
|
||||
|
||||
"""
|
||||
peer_id = None
|
||||
peer_info = None
|
||||
|
||||
try:
|
||||
# Handle different types of input
|
||||
if isinstance(peer_obj, PeerInfo):
|
||||
# Already have PeerInfo object
|
||||
peer_info = peer_obj
|
||||
peer_id = peer_obj.peer_id
|
||||
else:
|
||||
# Assume it's a peer ID
|
||||
peer_id = peer_obj
|
||||
# Try to get addresses from the peerstore if available
|
||||
try:
|
||||
addrs = self.host.get_peerstore().addrs(peer_id)
|
||||
if addrs:
|
||||
# Create PeerInfo object
|
||||
peer_info = PeerInfo(peer_id, addrs)
|
||||
else:
|
||||
logger.debug(
|
||||
"No addresses found for peer %s in peerstore, skipping",
|
||||
peer_id,
|
||||
)
|
||||
return False
|
||||
except Exception as peerstore_error:
|
||||
# Handle case where peer is not in peerstore yet
|
||||
logger.debug(
|
||||
"Peer %s not found in peerstore: %s, skipping",
|
||||
peer_id,
|
||||
str(peerstore_error),
|
||||
)
|
||||
return False
|
||||
|
||||
# Don't add ourselves
|
||||
if peer_id == self.local_id:
|
||||
return False
|
||||
|
||||
# Find the right bucket for this peer
|
||||
bucket = self.find_bucket(peer_id)
|
||||
|
||||
# Try to add to the bucket
|
||||
success = await bucket.add_peer(peer_info)
|
||||
if success:
|
||||
logger.debug(f"Successfully added peer {peer_id} to routing table")
|
||||
return success
|
||||
|
||||
except Exception as e:
|
||||
logger.debug(f"Error adding peer {peer_obj} to routing table: {e}")
|
||||
return False
|
||||
|
||||
def remove_peer(self, peer_id: ID) -> bool:
|
||||
"""
|
||||
Remove a peer from the routing table.
|
||||
|
||||
:param peer_id: The ID of the peer to remove
|
||||
|
||||
Returns
|
||||
-------
|
||||
bool: True if the peer was removed, False otherwise
|
||||
|
||||
"""
|
||||
bucket = self.find_bucket(peer_id)
|
||||
return bucket.remove_peer(peer_id)
|
||||
|
||||
def find_bucket(self, peer_id: ID) -> KBucket:
|
||||
"""
|
||||
Find the bucket that would contain the given peer ID or PeerInfo.
|
||||
|
||||
:param peer_obj: Either a peer ID or a PeerInfo object
|
||||
|
||||
Returns
|
||||
-------
|
||||
KBucket: The bucket for this peer
|
||||
|
||||
"""
|
||||
for bucket in self.buckets:
|
||||
if bucket.key_in_range(peer_id.to_bytes()):
|
||||
return bucket
|
||||
|
||||
return self.buckets[0]
|
||||
|
||||
def find_local_closest_peers(self, key: bytes, count: int = 20) -> list[ID]:
|
||||
"""
|
||||
Find the closest peers to a given key.
|
||||
|
||||
:param key: The key to find closest peers to (bytes)
|
||||
:param count: Maximum number of peers to return
|
||||
|
||||
Returns
|
||||
-------
|
||||
List[ID]: List of peer IDs closest to the key
|
||||
|
||||
"""
|
||||
# Get all peers from all buckets
|
||||
all_peers = []
|
||||
for bucket in self.buckets:
|
||||
all_peers.extend(bucket.peer_ids())
|
||||
|
||||
# Sort by XOR distance to the key
|
||||
all_peers.sort(key=lambda p: xor_distance(p.to_bytes(), key))
|
||||
|
||||
return all_peers[:count]
|
||||
|
||||
def get_peer_ids(self) -> list[ID]:
|
||||
"""
|
||||
Get all peer IDs in the routing table.
|
||||
|
||||
Returns
|
||||
-------
|
||||
:param List[ID]: List of all peer IDs
|
||||
|
||||
"""
|
||||
peers = []
|
||||
for bucket in self.buckets:
|
||||
peers.extend(bucket.peer_ids())
|
||||
return peers
|
||||
|
||||
def get_peer_info(self, peer_id: ID) -> PeerInfo | None:
|
||||
"""
|
||||
Get the peer info for a specific peer.
|
||||
|
||||
:param peer_id: The ID of the peer to get info for
|
||||
|
||||
Returns
|
||||
-------
|
||||
PeerInfo: The peer info, or None if not found
|
||||
|
||||
"""
|
||||
bucket = self.find_bucket(peer_id)
|
||||
return bucket.get_peer_info(peer_id)
|
||||
|
||||
def peer_in_table(self, peer_id: ID) -> bool:
|
||||
"""
|
||||
Check if a peer is in the routing table.
|
||||
|
||||
:param peer_id: The ID of the peer to check
|
||||
|
||||
Returns
|
||||
-------
|
||||
bool: True if the peer is in the routing table, False otherwise
|
||||
|
||||
"""
|
||||
bucket = self.find_bucket(peer_id)
|
||||
return bucket.has_peer(peer_id)
|
||||
|
||||
def size(self) -> int:
|
||||
"""
|
||||
Get the number of peers in the routing table.
|
||||
|
||||
Returns
|
||||
-------
|
||||
int: Number of peers
|
||||
|
||||
"""
|
||||
count = 0
|
||||
for bucket in self.buckets:
|
||||
count += bucket.size()
|
||||
return count
|
||||
|
||||
def get_stale_peers(self, stale_threshold_seconds: int = 3600) -> list[ID]:
|
||||
"""
|
||||
Get all stale peers from all buckets
|
||||
|
||||
params: stale_threshold_seconds:
|
||||
Time in seconds after which a peer is considered stale
|
||||
|
||||
Returns
|
||||
-------
|
||||
list[ID]
|
||||
List of stale peer IDs
|
||||
|
||||
"""
|
||||
stale_peers = []
|
||||
for bucket in self.buckets:
|
||||
stale_peers.extend(bucket.get_stale_peers(stale_threshold_seconds))
|
||||
return stale_peers
|
||||
|
||||
def cleanup_routing_table(self) -> None:
|
||||
"""
|
||||
Cleanup the routing table by removing all data.
|
||||
This is useful for resetting the routing table during tests or reinitialization.
|
||||
"""
|
||||
self.buckets = [KBucket(self.host, BUCKET_SIZE)]
|
||||
logger.info("Routing table cleaned up, all data removed.")
|
||||
117
libp2p/kad_dht/utils.py
Normal file
117
libp2p/kad_dht/utils.py
Normal file
@ -0,0 +1,117 @@
|
||||
"""
|
||||
Utility functions for Kademlia DHT implementation.
|
||||
"""
|
||||
|
||||
import base58
|
||||
import multihash
|
||||
|
||||
from libp2p.peer.id import (
|
||||
ID,
|
||||
)
|
||||
|
||||
|
||||
def create_key_from_binary(binary_data: bytes) -> bytes:
|
||||
"""
|
||||
Creates a key for the DHT by hashing binary data with SHA-256.
|
||||
|
||||
params: binary_data: The binary data to hash.
|
||||
|
||||
Returns
|
||||
-------
|
||||
bytes: The resulting key.
|
||||
|
||||
"""
|
||||
return multihash.digest(binary_data, "sha2-256").digest
|
||||
|
||||
|
||||
def xor_distance(key1: bytes, key2: bytes) -> int:
|
||||
"""
|
||||
Calculate the XOR distance between two keys.
|
||||
|
||||
params: key1: First key (bytes)
|
||||
params: key2: Second key (bytes)
|
||||
|
||||
Returns
|
||||
-------
|
||||
int: The XOR distance between the keys
|
||||
|
||||
"""
|
||||
# Ensure the inputs are bytes
|
||||
if not isinstance(key1, bytes) or not isinstance(key2, bytes):
|
||||
raise TypeError("Both key1 and key2 must be bytes objects")
|
||||
|
||||
# Convert to integers
|
||||
k1 = int.from_bytes(key1, byteorder="big")
|
||||
k2 = int.from_bytes(key2, byteorder="big")
|
||||
|
||||
# Calculate XOR distance
|
||||
return k1 ^ k2
|
||||
|
||||
|
||||
def bytes_to_base58(data: bytes) -> str:
|
||||
"""
|
||||
Convert bytes to base58 encoded string.
|
||||
|
||||
params: data: Input bytes
|
||||
|
||||
Returns
|
||||
-------
|
||||
str: Base58 encoded string
|
||||
|
||||
"""
|
||||
return base58.b58encode(data).decode("utf-8")
|
||||
|
||||
|
||||
def sort_peer_ids_by_distance(target_key: bytes, peer_ids: list[ID]) -> list[ID]:
|
||||
"""
|
||||
Sort a list of peer IDs by their distance to the target key.
|
||||
|
||||
params: target_key: The target key to measure distance from
|
||||
params: peer_ids: List of peer IDs to sort
|
||||
|
||||
Returns
|
||||
-------
|
||||
List[ID]: Sorted list of peer IDs from closest to furthest
|
||||
|
||||
"""
|
||||
|
||||
def get_distance(peer_id: ID) -> int:
|
||||
# Hash the peer ID bytes to get a key for distance calculation
|
||||
peer_hash = multihash.digest(peer_id.to_bytes(), "sha2-256").digest
|
||||
return xor_distance(target_key, peer_hash)
|
||||
|
||||
return sorted(peer_ids, key=get_distance)
|
||||
|
||||
|
||||
def shared_prefix_len(first: bytes, second: bytes) -> int:
|
||||
"""
|
||||
Calculate the number of prefix bits shared by two byte sequences.
|
||||
|
||||
params: first: First byte sequence
|
||||
params: second: Second byte sequence
|
||||
|
||||
Returns
|
||||
-------
|
||||
int: Number of shared prefix bits
|
||||
|
||||
"""
|
||||
# Compare each byte to find the first bit difference
|
||||
common_length = 0
|
||||
for i in range(min(len(first), len(second))):
|
||||
byte_first = first[i]
|
||||
byte_second = second[i]
|
||||
|
||||
if byte_first == byte_second:
|
||||
common_length += 8
|
||||
else:
|
||||
# Find specific bit where they differ
|
||||
xor = byte_first ^ byte_second
|
||||
# Count leading zeros in the xor result
|
||||
for j in range(7, -1, -1):
|
||||
if (xor >> j) & 1 == 1:
|
||||
return common_length + (7 - j)
|
||||
|
||||
# This shouldn't be reached if xor != 0
|
||||
return common_length + 8
|
||||
|
||||
return common_length
|
||||
393
libp2p/kad_dht/value_store.py
Normal file
393
libp2p/kad_dht/value_store.py
Normal file
@ -0,0 +1,393 @@
|
||||
"""
|
||||
Value store implementation for Kademlia DHT.
|
||||
|
||||
Provides a way to store and retrieve key-value pairs with optional expiration.
|
||||
"""
|
||||
|
||||
import logging
|
||||
import time
|
||||
|
||||
import varint
|
||||
|
||||
from libp2p.abc import (
|
||||
IHost,
|
||||
)
|
||||
from libp2p.custom_types import (
|
||||
TProtocol,
|
||||
)
|
||||
from libp2p.peer.id import (
|
||||
ID,
|
||||
)
|
||||
|
||||
from .pb.kademlia_pb2 import (
|
||||
Message,
|
||||
)
|
||||
|
||||
# logger = logging.getLogger("libp2p.kademlia.value_store")
|
||||
logger = logging.getLogger("kademlia-example.value_store")
|
||||
|
||||
# Default time to live for values in seconds (24 hours)
|
||||
DEFAULT_TTL = 24 * 60 * 60
|
||||
PROTOCOL_ID = TProtocol("/ipfs/kad/1.0.0")
|
||||
|
||||
|
||||
class ValueStore:
|
||||
"""
|
||||
Store for key-value pairs in a Kademlia DHT.
|
||||
|
||||
Values are stored with a timestamp and optional expiration time.
|
||||
"""
|
||||
|
||||
def __init__(self, host: IHost, local_peer_id: ID):
|
||||
"""
|
||||
Initialize an empty value store.
|
||||
|
||||
:param host: The libp2p host instance.
|
||||
:param local_peer_id: The local peer ID to ignore in peer requests.
|
||||
|
||||
"""
|
||||
# Store format: {key: (value, validity)}
|
||||
self.store: dict[bytes, tuple[bytes, float]] = {}
|
||||
# Store references to the host and local peer ID for making requests
|
||||
self.host = host
|
||||
self.local_peer_id = local_peer_id
|
||||
|
||||
def put(self, key: bytes, value: bytes, validity: float = 0.0) -> None:
|
||||
"""
|
||||
Store a value in the DHT.
|
||||
|
||||
:param key: The key to store the value under
|
||||
:param value: The value to store
|
||||
:param validity: validity in seconds before the value expires.
|
||||
Defaults to `DEFAULT_TTL` if set to 0.0.
|
||||
|
||||
Returns
|
||||
-------
|
||||
None
|
||||
|
||||
"""
|
||||
if validity == 0.0:
|
||||
validity = time.time() + DEFAULT_TTL
|
||||
logger.debug(
|
||||
"Storing value for key %s... with validity %s", key.hex(), validity
|
||||
)
|
||||
self.store[key] = (value, validity)
|
||||
logger.debug(f"Stored value for key {key.hex()}")
|
||||
|
||||
async def _store_at_peer(self, peer_id: ID, key: bytes, value: bytes) -> bool:
|
||||
"""
|
||||
Store a value at a specific peer.
|
||||
|
||||
params: peer_id: The ID of the peer to store the value at
|
||||
params: key: The key to store
|
||||
params: value: The value to store
|
||||
|
||||
Returns
|
||||
-------
|
||||
bool
|
||||
True if the value was successfully stored, False otherwise
|
||||
|
||||
"""
|
||||
result = False
|
||||
stream = None
|
||||
try:
|
||||
# Don't try to store at ourselves
|
||||
if self.local_peer_id and peer_id == self.local_peer_id:
|
||||
result = True
|
||||
return result
|
||||
|
||||
if not self.host:
|
||||
logger.error("Host not initialized, cannot store value at peer")
|
||||
return False
|
||||
|
||||
logger.debug(f"Storing value for key {key.hex()} at peer {peer_id}")
|
||||
|
||||
# Open a stream to the peer
|
||||
stream = await self.host.new_stream(peer_id, [PROTOCOL_ID])
|
||||
logger.debug(f"Opened stream to peer {peer_id}")
|
||||
|
||||
# Create the PUT_VALUE message with protobuf
|
||||
message = Message()
|
||||
message.type = Message.MessageType.PUT_VALUE
|
||||
|
||||
# Set message fields
|
||||
message.key = key
|
||||
message.record.key = key
|
||||
message.record.value = value
|
||||
message.record.timeReceived = str(time.time())
|
||||
|
||||
# Serialize and send the protobuf message with length prefix
|
||||
proto_bytes = message.SerializeToString()
|
||||
await stream.write(varint.encode(len(proto_bytes)))
|
||||
await stream.write(proto_bytes)
|
||||
logger.debug("Sent PUT_VALUE protobuf message with varint length")
|
||||
# Read varint-prefixed response length
|
||||
|
||||
length_bytes = b""
|
||||
while True:
|
||||
logger.debug("Reading varint length prefix for response...")
|
||||
b = await stream.read(1)
|
||||
if not b:
|
||||
logger.warning("Connection closed while reading varint length")
|
||||
return False
|
||||
length_bytes += b
|
||||
if b[0] & 0x80 == 0:
|
||||
break
|
||||
logger.debug(f"Received varint length bytes: {length_bytes.hex()}")
|
||||
response_length = varint.decode_bytes(length_bytes)
|
||||
logger.debug("Response length: %d bytes", response_length)
|
||||
# Read response data
|
||||
response_bytes = b""
|
||||
remaining = response_length
|
||||
while remaining > 0:
|
||||
chunk = await stream.read(remaining)
|
||||
if not chunk:
|
||||
logger.debug(
|
||||
f"Connection closed by peer {peer_id} while reading data"
|
||||
)
|
||||
return False
|
||||
response_bytes += chunk
|
||||
remaining -= len(chunk)
|
||||
|
||||
# Parse protobuf response
|
||||
response = Message()
|
||||
response.ParseFromString(response_bytes)
|
||||
|
||||
# Check if response is valid
|
||||
if response.type == Message.MessageType.PUT_VALUE:
|
||||
if response.key:
|
||||
result = True
|
||||
return result
|
||||
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to store value at peer {peer_id}: {e}")
|
||||
return False
|
||||
|
||||
finally:
|
||||
if stream:
|
||||
await stream.close()
|
||||
return result
|
||||
|
||||
def get(self, key: bytes) -> bytes | None:
|
||||
"""
|
||||
Retrieve a value from the DHT.
|
||||
|
||||
params: key: The key to look up
|
||||
|
||||
Returns
|
||||
-------
|
||||
Optional[bytes]
|
||||
The stored value, or None if not found or expired
|
||||
|
||||
"""
|
||||
logger.debug("Retrieving value for key %s...", key.hex()[:8])
|
||||
if key not in self.store:
|
||||
return None
|
||||
|
||||
value, validity = self.store[key]
|
||||
logger.debug(
|
||||
"Found value for key %s... with validity %s",
|
||||
key.hex(),
|
||||
validity,
|
||||
)
|
||||
# Check if the value has expired
|
||||
if validity is not None and validity < time.time():
|
||||
logger.debug(
|
||||
"Value for key %s... has expired, removing it",
|
||||
key.hex()[:8],
|
||||
)
|
||||
self.remove(key)
|
||||
return None
|
||||
|
||||
return value
|
||||
|
||||
async def _get_from_peer(self, peer_id: ID, key: bytes) -> bytes | None:
|
||||
"""
|
||||
Retrieve a value from a specific peer.
|
||||
|
||||
params: peer_id: The ID of the peer to retrieve the value from
|
||||
params: key: The key to retrieve
|
||||
|
||||
Returns
|
||||
-------
|
||||
Optional[bytes]
|
||||
The value if found, None otherwise
|
||||
|
||||
"""
|
||||
stream = None
|
||||
try:
|
||||
# Don't try to get from ourselves
|
||||
if peer_id == self.local_peer_id:
|
||||
return None
|
||||
|
||||
logger.debug(f"Getting value for key {key.hex()} from peer {peer_id}")
|
||||
|
||||
# Open a stream to the peer
|
||||
stream = await self.host.new_stream(peer_id, [TProtocol(PROTOCOL_ID)])
|
||||
logger.debug(f"Opened stream to peer {peer_id} for GET_VALUE")
|
||||
|
||||
# Create the GET_VALUE message using protobuf
|
||||
message = Message()
|
||||
message.type = Message.MessageType.GET_VALUE
|
||||
message.key = key
|
||||
|
||||
# Serialize and send the protobuf message
|
||||
proto_bytes = message.SerializeToString()
|
||||
await stream.write(varint.encode(len(proto_bytes)))
|
||||
await stream.write(proto_bytes)
|
||||
|
||||
# Read response length
|
||||
length_bytes = b""
|
||||
while True:
|
||||
b = await stream.read(1)
|
||||
if not b:
|
||||
logger.warning("Connection closed while reading length")
|
||||
return None
|
||||
length_bytes += b
|
||||
if b[0] & 0x80 == 0:
|
||||
break
|
||||
response_length = varint.decode_bytes(length_bytes)
|
||||
# Read response data
|
||||
response_bytes = b""
|
||||
remaining = response_length
|
||||
while remaining > 0:
|
||||
chunk = await stream.read(remaining)
|
||||
if not chunk:
|
||||
logger.debug(
|
||||
f"Connection closed by peer {peer_id} while reading data"
|
||||
)
|
||||
return None
|
||||
response_bytes += chunk
|
||||
remaining -= len(chunk)
|
||||
|
||||
# Parse protobuf response
|
||||
try:
|
||||
response = Message()
|
||||
response.ParseFromString(response_bytes)
|
||||
logger.debug(
|
||||
f"Received protobuf response from peer"
|
||||
f" {peer_id}, type: {response.type}"
|
||||
)
|
||||
|
||||
# Process protobuf response
|
||||
if (
|
||||
response.type == Message.MessageType.GET_VALUE
|
||||
and response.HasField("record")
|
||||
and response.record.value
|
||||
):
|
||||
logger.debug(
|
||||
f"Received value for key {key.hex()} from peer {peer_id}"
|
||||
)
|
||||
return response.record.value
|
||||
|
||||
# Handle case where value is not found but peer infos are returned
|
||||
else:
|
||||
logger.debug(
|
||||
f"Value not found for key {key.hex()} from peer {peer_id},"
|
||||
f" received {len(response.closerPeers)} closer peers"
|
||||
)
|
||||
return None
|
||||
|
||||
except Exception as proto_err:
|
||||
logger.warning(f"Failed to parse as protobuf: {proto_err}")
|
||||
|
||||
return None
|
||||
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to get value from peer {peer_id}: {e}")
|
||||
return None
|
||||
|
||||
finally:
|
||||
if stream:
|
||||
await stream.close()
|
||||
|
||||
def remove(self, key: bytes) -> bool:
|
||||
"""
|
||||
Remove a value from the DHT.
|
||||
|
||||
|
||||
params: key: The key to remove
|
||||
|
||||
Returns
|
||||
-------
|
||||
bool
|
||||
True if the key was found and removed, False otherwise
|
||||
|
||||
"""
|
||||
if key in self.store:
|
||||
del self.store[key]
|
||||
logger.debug(f"Removed value for key {key.hex()[:8]}...")
|
||||
return True
|
||||
return False
|
||||
|
||||
def has(self, key: bytes) -> bool:
|
||||
"""
|
||||
Check if a key exists in the store and hasn't expired.
|
||||
|
||||
params: key: The key to check
|
||||
|
||||
Returns
|
||||
-------
|
||||
bool
|
||||
True if the key exists and hasn't expired, False otherwise
|
||||
|
||||
"""
|
||||
if key not in self.store:
|
||||
return False
|
||||
|
||||
_, validity = self.store[key]
|
||||
if validity is not None and time.time() > validity:
|
||||
self.remove(key)
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def cleanup_expired(self) -> int:
|
||||
"""
|
||||
Remove all expired values from the store.
|
||||
|
||||
Returns
|
||||
-------
|
||||
int
|
||||
The number of expired values that were removed
|
||||
|
||||
"""
|
||||
current_time = time.time()
|
||||
expired_keys = [
|
||||
key for key, (_, validity) in self.store.items() if current_time > validity
|
||||
]
|
||||
|
||||
for key in expired_keys:
|
||||
del self.store[key]
|
||||
|
||||
if expired_keys:
|
||||
logger.debug(f"Cleaned up {len(expired_keys)} expired values")
|
||||
|
||||
return len(expired_keys)
|
||||
|
||||
def get_keys(self) -> list[bytes]:
|
||||
"""
|
||||
Get all non-expired keys in the store.
|
||||
|
||||
Returns
|
||||
-------
|
||||
list[bytes]
|
||||
List of keys
|
||||
|
||||
"""
|
||||
# Clean up expired values first
|
||||
self.cleanup_expired()
|
||||
return list(self.store.keys())
|
||||
|
||||
def size(self) -> int:
|
||||
"""
|
||||
Get the number of items in the store (after removing expired entries).
|
||||
|
||||
Returns
|
||||
-------
|
||||
int
|
||||
Number of items
|
||||
|
||||
"""
|
||||
self.cleanup_expired()
|
||||
return len(self.store)
|
||||
@ -187,7 +187,7 @@ class Swarm(Service, INetworkService):
|
||||
# Per, https://discuss.libp2p.io/t/multistream-security/130, we first secure
|
||||
# the conn and then mux the conn
|
||||
try:
|
||||
secured_conn = await self.upgrader.upgrade_security(raw_conn, peer_id, True)
|
||||
secured_conn = await self.upgrader.upgrade_security(raw_conn, True, peer_id)
|
||||
except SecurityUpgradeFailure as error:
|
||||
logger.debug("failed to upgrade security for peer %s", peer_id)
|
||||
await raw_conn.close()
|
||||
@ -257,10 +257,7 @@ class Swarm(Service, INetworkService):
|
||||
# Per, https://discuss.libp2p.io/t/multistream-security/130, we first
|
||||
# secure the conn and then mux the conn
|
||||
try:
|
||||
# FIXME: This dummy `ID(b"")` for the remote peer is useless.
|
||||
secured_conn = await self.upgrader.upgrade_security(
|
||||
raw_conn, ID(b""), False
|
||||
)
|
||||
secured_conn = await self.upgrader.upgrade_security(raw_conn, False)
|
||||
except SecurityUpgradeFailure as error:
|
||||
logger.debug("failed to upgrade security for peer at %s", maddr)
|
||||
await raw_conn.close()
|
||||
|
||||
@ -516,82 +516,99 @@ class GossipSub(IPubsubRouter, Service):
|
||||
peers_to_prune[peer].append(topic)
|
||||
return peers_to_graft, peers_to_prune
|
||||
|
||||
def fanout_heartbeat(self) -> None:
|
||||
# Note: the comments here are the exact pseudocode from the spec
|
||||
for topic in list(self.fanout):
|
||||
if (
|
||||
self.pubsub is not None
|
||||
and topic not in self.pubsub.peer_topics
|
||||
and self.time_since_last_publish.get(topic, 0) + self.time_to_live
|
||||
< int(time.time())
|
||||
def _handle_topic_heartbeat(
|
||||
self,
|
||||
topic: str,
|
||||
current_peers: set[ID],
|
||||
is_fanout: bool = False,
|
||||
peers_to_gossip: DefaultDict[ID, dict[str, list[str]]] | None = None,
|
||||
) -> tuple[set[ID], bool]:
|
||||
"""
|
||||
Helper method to handle heartbeat for a single topic,
|
||||
supporting both fanout and gossip.
|
||||
|
||||
:param topic: The topic to handle
|
||||
:param current_peers: Current set of peers in the topic
|
||||
:param is_fanout: Whether this is a fanout topic (affects expiration check)
|
||||
:param peers_to_gossip: Optional dictionary to store peers to gossip to
|
||||
:return: Tuple of (updated_peers, should_remove_topic)
|
||||
"""
|
||||
if self.pubsub is None:
|
||||
raise NoPubsubAttached
|
||||
|
||||
# Skip if no peers have subscribed to the topic
|
||||
if topic not in self.pubsub.peer_topics:
|
||||
return current_peers, False
|
||||
|
||||
# For fanout topics, check if we should remove the topic
|
||||
if is_fanout:
|
||||
if self.time_since_last_publish.get(topic, 0) + self.time_to_live < int(
|
||||
time.time()
|
||||
):
|
||||
# Remove topic from fanout
|
||||
return set(), True
|
||||
|
||||
# Check if peers are still in the topic and remove the ones that are not
|
||||
in_topic_peers: set[ID] = {
|
||||
peer for peer in current_peers if peer in self.pubsub.peer_topics[topic]
|
||||
}
|
||||
|
||||
# If we need more peers to reach target degree
|
||||
if len(in_topic_peers) < self.degree:
|
||||
# Select additional peers from peers.gossipsub[topic]
|
||||
selected_peers = self._get_in_topic_gossipsub_peers_from_minus(
|
||||
topic,
|
||||
self.degree - len(in_topic_peers),
|
||||
in_topic_peers,
|
||||
)
|
||||
# Add the selected peers
|
||||
in_topic_peers.update(selected_peers)
|
||||
|
||||
# Handle gossip if requested
|
||||
if peers_to_gossip is not None:
|
||||
msg_ids = self.mcache.window(topic)
|
||||
if msg_ids:
|
||||
# Select D peers from peers.gossipsub[topic] excluding current peers
|
||||
peers_to_emit_ihave_to = self._get_in_topic_gossipsub_peers_from_minus(
|
||||
topic, self.degree, current_peers
|
||||
)
|
||||
msg_id_strs = [str(msg_id) for msg_id in msg_ids]
|
||||
for peer in peers_to_emit_ihave_to:
|
||||
peers_to_gossip[peer][topic] = msg_id_strs
|
||||
|
||||
return in_topic_peers, False
|
||||
|
||||
def fanout_heartbeat(self) -> None:
|
||||
"""
|
||||
Maintain fanout topics by:
|
||||
1. Removing expired topics
|
||||
2. Removing peers that are no longer in the topic
|
||||
3. Adding new peers if needed to maintain the target degree
|
||||
"""
|
||||
for topic in list(self.fanout):
|
||||
updated_peers, should_remove = self._handle_topic_heartbeat(
|
||||
topic, self.fanout[topic], is_fanout=True
|
||||
)
|
||||
if should_remove:
|
||||
del self.fanout[topic]
|
||||
else:
|
||||
# Check if fanout peers are still in the topic and remove the ones that are not # noqa: E501
|
||||
# ref: https://github.com/libp2p/go-libp2p-pubsub/blob/01b9825fbee1848751d90a8469e3f5f43bac8466/gossipsub.go#L498-L504 # noqa: E501
|
||||
|
||||
in_topic_fanout_peers: list[ID] = []
|
||||
if self.pubsub is not None:
|
||||
in_topic_fanout_peers = [
|
||||
peer
|
||||
for peer in self.fanout[topic]
|
||||
if peer in self.pubsub.peer_topics[topic]
|
||||
]
|
||||
self.fanout[topic] = set(in_topic_fanout_peers)
|
||||
num_fanout_peers_in_topic = len(self.fanout[topic])
|
||||
|
||||
# If |fanout[topic]| < D
|
||||
if num_fanout_peers_in_topic < self.degree:
|
||||
# Select D - |fanout[topic]| peers from peers.gossipsub[topic] - fanout[topic] # noqa: E501
|
||||
selected_peers = self._get_in_topic_gossipsub_peers_from_minus(
|
||||
topic,
|
||||
self.degree - num_fanout_peers_in_topic,
|
||||
self.fanout[topic],
|
||||
)
|
||||
# Add the peers to fanout[topic]
|
||||
self.fanout[topic].update(selected_peers)
|
||||
self.fanout[topic] = updated_peers
|
||||
|
||||
def gossip_heartbeat(self) -> DefaultDict[ID, dict[str, list[str]]]:
|
||||
peers_to_gossip: DefaultDict[ID, dict[str, list[str]]] = defaultdict(dict)
|
||||
|
||||
# Handle mesh topics
|
||||
for topic in self.mesh:
|
||||
msg_ids = self.mcache.window(topic)
|
||||
if msg_ids:
|
||||
if self.pubsub is None:
|
||||
raise NoPubsubAttached
|
||||
# Get all pubsub peers in a topic and only add them if they are
|
||||
# gossipsub peers too
|
||||
if topic in self.pubsub.peer_topics:
|
||||
# Select D peers from peers.gossipsub[topic]
|
||||
peers_to_emit_ihave_to = (
|
||||
self._get_in_topic_gossipsub_peers_from_minus(
|
||||
topic, self.degree, self.mesh[topic]
|
||||
)
|
||||
)
|
||||
self._handle_topic_heartbeat(
|
||||
topic, self.mesh[topic], peers_to_gossip=peers_to_gossip
|
||||
)
|
||||
|
||||
msg_id_strs = [str(msg_id) for msg_id in msg_ids]
|
||||
for peer in peers_to_emit_ihave_to:
|
||||
peers_to_gossip[peer][topic] = msg_id_strs
|
||||
|
||||
# TODO: Refactor and Dedup. This section is the roughly the same as the above.
|
||||
# Do the same for fanout, for all topics not already hit in mesh
|
||||
# Handle fanout topics that aren't in mesh
|
||||
for topic in self.fanout:
|
||||
msg_ids = self.mcache.window(topic)
|
||||
if msg_ids:
|
||||
if self.pubsub is None:
|
||||
raise NoPubsubAttached
|
||||
# Get all pubsub peers in topic and only add if they are
|
||||
# gossipsub peers also
|
||||
if topic in self.pubsub.peer_topics:
|
||||
# Select D peers from peers.gossipsub[topic]
|
||||
peers_to_emit_ihave_to = (
|
||||
self._get_in_topic_gossipsub_peers_from_minus(
|
||||
topic, self.degree, self.fanout[topic]
|
||||
)
|
||||
)
|
||||
msg_id_strs = [str(msg) for msg in msg_ids]
|
||||
for peer in peers_to_emit_ihave_to:
|
||||
peers_to_gossip[peer][topic] = msg_id_strs
|
||||
if topic not in self.mesh:
|
||||
self._handle_topic_heartbeat(
|
||||
topic, self.fanout[topic], peers_to_gossip=peers_to_gossip
|
||||
)
|
||||
|
||||
return peers_to_gossip
|
||||
|
||||
@staticmethod
|
||||
|
||||
@ -621,16 +621,22 @@ class Pubsub(Service, IPubsub):
|
||||
logger.debug("Fail to message peer %s: stream closed", peer_id)
|
||||
self._handle_dead_peer(peer_id)
|
||||
|
||||
async def publish(self, topic_id: str, data: bytes) -> None:
|
||||
async def publish(self, topic_id: str | list[str], data: bytes) -> None:
|
||||
"""
|
||||
Publish data to a topic.
|
||||
Publish data to a topic or multiple topics.
|
||||
|
||||
:param topic_id: topic which we are going to publish the data to
|
||||
:param topic_id: topic (str) or topics (list[str]) to publish the data to
|
||||
:param data: data which we are publishing
|
||||
"""
|
||||
# Handle both single topic (str) and multiple topics (list[str])
|
||||
if isinstance(topic_id, str):
|
||||
topic_ids = [topic_id]
|
||||
else:
|
||||
topic_ids = topic_id
|
||||
|
||||
msg = rpc_pb2.Message(
|
||||
data=data,
|
||||
topicIDs=[topic_id],
|
||||
topicIDs=topic_ids,
|
||||
# Origin is ourself.
|
||||
from_id=self.my_id.to_bytes(),
|
||||
seqno=self._next_seqno(),
|
||||
|
||||
28
libp2p/relay/__init__.py
Normal file
28
libp2p/relay/__init__.py
Normal file
@ -0,0 +1,28 @@
|
||||
"""
|
||||
Relay module for libp2p.
|
||||
|
||||
This package includes implementations of circuit relay protocols
|
||||
for enabling connectivity between peers behind NATs or firewalls.
|
||||
"""
|
||||
|
||||
# Import the circuit_v2 module to make it accessible
|
||||
# through the relay package
|
||||
from libp2p.relay.circuit_v2 import (
|
||||
PROTOCOL_ID,
|
||||
CircuitV2Protocol,
|
||||
CircuitV2Transport,
|
||||
RelayDiscovery,
|
||||
RelayLimits,
|
||||
RelayResourceManager,
|
||||
Reservation,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
"CircuitV2Protocol",
|
||||
"CircuitV2Transport",
|
||||
"PROTOCOL_ID",
|
||||
"RelayDiscovery",
|
||||
"RelayLimits",
|
||||
"RelayResourceManager",
|
||||
"Reservation",
|
||||
]
|
||||
32
libp2p/relay/circuit_v2/__init__.py
Normal file
32
libp2p/relay/circuit_v2/__init__.py
Normal file
@ -0,0 +1,32 @@
|
||||
"""
|
||||
Circuit Relay v2 implementation for libp2p.
|
||||
|
||||
This package implements the Circuit Relay v2 protocol as specified in:
|
||||
https://github.com/libp2p/specs/blob/master/relay/circuit-v2.md
|
||||
"""
|
||||
|
||||
from .discovery import (
|
||||
RelayDiscovery,
|
||||
)
|
||||
from .protocol import (
|
||||
PROTOCOL_ID,
|
||||
CircuitV2Protocol,
|
||||
)
|
||||
from .resources import (
|
||||
RelayLimits,
|
||||
RelayResourceManager,
|
||||
Reservation,
|
||||
)
|
||||
from .transport import (
|
||||
CircuitV2Transport,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
"CircuitV2Protocol",
|
||||
"PROTOCOL_ID",
|
||||
"RelayLimits",
|
||||
"Reservation",
|
||||
"RelayResourceManager",
|
||||
"CircuitV2Transport",
|
||||
"RelayDiscovery",
|
||||
]
|
||||
92
libp2p/relay/circuit_v2/config.py
Normal file
92
libp2p/relay/circuit_v2/config.py
Normal file
@ -0,0 +1,92 @@
|
||||
"""
|
||||
Configuration management for Circuit Relay v2.
|
||||
|
||||
This module handles configuration for relay roles, resource limits,
|
||||
and discovery settings.
|
||||
"""
|
||||
|
||||
from dataclasses import (
|
||||
dataclass,
|
||||
field,
|
||||
)
|
||||
|
||||
from libp2p.peer.peerinfo import (
|
||||
PeerInfo,
|
||||
)
|
||||
|
||||
from .resources import (
|
||||
RelayLimits,
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class RelayConfig:
|
||||
"""Configuration for Circuit Relay v2."""
|
||||
|
||||
# Role configuration
|
||||
enable_hop: bool = False # Whether to act as a relay (hop)
|
||||
enable_stop: bool = True # Whether to accept relayed connections (stop)
|
||||
enable_client: bool = True # Whether to use relays for dialing
|
||||
|
||||
# Resource limits
|
||||
limits: RelayLimits | None = None
|
||||
|
||||
# Discovery configuration
|
||||
bootstrap_relays: list[PeerInfo] = field(default_factory=list)
|
||||
min_relays: int = 3
|
||||
max_relays: int = 20
|
||||
discovery_interval: int = 300 # seconds
|
||||
|
||||
# Connection configuration
|
||||
reservation_ttl: int = 3600 # seconds
|
||||
max_circuit_duration: int = 3600 # seconds
|
||||
max_circuit_bytes: int = 1024 * 1024 * 1024 # 1GB
|
||||
|
||||
def __post_init__(self) -> None:
|
||||
"""Initialize default values."""
|
||||
if self.limits is None:
|
||||
self.limits = RelayLimits(
|
||||
duration=self.max_circuit_duration,
|
||||
data=self.max_circuit_bytes,
|
||||
max_circuit_conns=8,
|
||||
max_reservations=4,
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class HopConfig:
|
||||
"""Configuration specific to relay (hop) nodes."""
|
||||
|
||||
# Resource limits per IP
|
||||
max_reservations_per_ip: int = 8
|
||||
max_circuits_per_ip: int = 16
|
||||
|
||||
# Rate limiting
|
||||
reservation_rate_per_ip: int = 4 # per minute
|
||||
circuit_rate_per_ip: int = 8 # per minute
|
||||
|
||||
# Resource quotas
|
||||
max_circuits_total: int = 64
|
||||
max_reservations_total: int = 32
|
||||
|
||||
# Bandwidth limits
|
||||
max_bandwidth_per_circuit: int = 1024 * 1024 # 1MB/s
|
||||
max_bandwidth_total: int = 10 * 1024 * 1024 # 10MB/s
|
||||
|
||||
|
||||
@dataclass
|
||||
class ClientConfig:
|
||||
"""Configuration specific to relay clients."""
|
||||
|
||||
# Relay selection
|
||||
min_relay_score: float = 0.5
|
||||
max_relay_latency: float = 1.0 # seconds
|
||||
|
||||
# Auto-relay settings
|
||||
enable_auto_relay: bool = True
|
||||
auto_relay_timeout: int = 30 # seconds
|
||||
max_auto_relay_attempts: int = 3
|
||||
|
||||
# Reservation management
|
||||
reservation_refresh_threshold: float = 0.8 # Refresh at 80% of TTL
|
||||
max_concurrent_reservations: int = 2
|
||||
537
libp2p/relay/circuit_v2/discovery.py
Normal file
537
libp2p/relay/circuit_v2/discovery.py
Normal file
@ -0,0 +1,537 @@
|
||||
"""
|
||||
Discovery module for Circuit Relay v2.
|
||||
|
||||
This module handles discovering and tracking relay nodes in the network.
|
||||
"""
|
||||
|
||||
from dataclasses import (
|
||||
dataclass,
|
||||
)
|
||||
import logging
|
||||
import time
|
||||
from typing import (
|
||||
Any,
|
||||
Protocol as TypingProtocol,
|
||||
cast,
|
||||
runtime_checkable,
|
||||
)
|
||||
|
||||
import trio
|
||||
|
||||
from libp2p.abc import (
|
||||
IHost,
|
||||
)
|
||||
from libp2p.custom_types import (
|
||||
TProtocol,
|
||||
)
|
||||
from libp2p.peer.id import (
|
||||
ID,
|
||||
)
|
||||
from libp2p.tools.async_service import (
|
||||
Service,
|
||||
)
|
||||
|
||||
from .pb.circuit_pb2 import (
|
||||
HopMessage,
|
||||
)
|
||||
from .protocol import (
|
||||
PROTOCOL_ID,
|
||||
)
|
||||
from .protocol_buffer import (
|
||||
StatusCode,
|
||||
)
|
||||
|
||||
logger = logging.getLogger("libp2p.relay.circuit_v2.discovery")
|
||||
|
||||
# Constants
|
||||
MAX_RELAYS_TO_TRACK = 10
|
||||
DEFAULT_DISCOVERY_INTERVAL = 60 # seconds
|
||||
STREAM_TIMEOUT = 10 # seconds
|
||||
|
||||
|
||||
# Extended interfaces for type checking
|
||||
@runtime_checkable
|
||||
class IHostWithMultiselect(TypingProtocol):
|
||||
"""Extended host interface with multiselect attribute."""
|
||||
|
||||
@property
|
||||
def multiselect(self) -> Any:
|
||||
"""Get the multiselect component."""
|
||||
...
|
||||
|
||||
|
||||
@dataclass
|
||||
class RelayInfo:
|
||||
"""Information about a discovered relay."""
|
||||
|
||||
peer_id: ID
|
||||
discovered_at: float
|
||||
last_seen: float
|
||||
has_reservation: bool = False
|
||||
reservation_expires_at: float | None = None
|
||||
reservation_data_limit: int | None = None
|
||||
|
||||
|
||||
class RelayDiscovery(Service):
|
||||
"""
|
||||
Discovery service for Circuit Relay v2 nodes.
|
||||
|
||||
This service discovers and keeps track of available relay nodes, and optionally
|
||||
makes reservations with them.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
host: IHost,
|
||||
auto_reserve: bool = False,
|
||||
discovery_interval: int = DEFAULT_DISCOVERY_INTERVAL,
|
||||
max_relays: int = MAX_RELAYS_TO_TRACK,
|
||||
) -> None:
|
||||
"""
|
||||
Initialize the discovery service.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
host : IHost
|
||||
The libp2p host this discovery service is running on
|
||||
auto_reserve : bool
|
||||
Whether to automatically make reservations with discovered relays
|
||||
discovery_interval : int
|
||||
How often to run discovery, in seconds
|
||||
max_relays : int
|
||||
Maximum number of relays to track
|
||||
|
||||
"""
|
||||
super().__init__()
|
||||
self.host = host
|
||||
self.auto_reserve = auto_reserve
|
||||
self.discovery_interval = discovery_interval
|
||||
self.max_relays = max_relays
|
||||
self._discovered_relays: dict[ID, RelayInfo] = {}
|
||||
self._protocol_cache: dict[
|
||||
ID, set[str]
|
||||
] = {} # Cache protocol info to reduce queries
|
||||
self.event_started = trio.Event()
|
||||
self.is_running = False
|
||||
|
||||
async def run(self, *, task_status: Any = trio.TASK_STATUS_IGNORED) -> None:
|
||||
"""Run the discovery service."""
|
||||
try:
|
||||
self.is_running = True
|
||||
self.event_started.set()
|
||||
task_status.started()
|
||||
|
||||
# Main discovery loop
|
||||
async with trio.open_nursery() as nursery:
|
||||
# Run initial discovery
|
||||
nursery.start_soon(self.discover_relays)
|
||||
|
||||
# Set up periodic discovery
|
||||
while True:
|
||||
await trio.sleep(self.discovery_interval)
|
||||
if not self.manager.is_running:
|
||||
break
|
||||
nursery.start_soon(self.discover_relays)
|
||||
|
||||
# Cleanup expired relays and reservations
|
||||
await self._cleanup_expired()
|
||||
|
||||
finally:
|
||||
self.is_running = False
|
||||
|
||||
async def discover_relays(self) -> None:
|
||||
r"""
|
||||
Discover relay nodes in the network.
|
||||
|
||||
This method queries the network for peers that support the
|
||||
Circuit Relay v2 protocol.
|
||||
"""
|
||||
logger.debug("Starting relay discovery")
|
||||
|
||||
try:
|
||||
# Get connected peers
|
||||
connected_peers = self.host.get_connected_peers()
|
||||
logger.debug(
|
||||
"Checking %d connected peers for relay support", len(connected_peers)
|
||||
)
|
||||
|
||||
# Check each peer if they support the relay protocol
|
||||
for peer_id in connected_peers:
|
||||
if peer_id == self.host.get_id():
|
||||
continue # Skip ourselves
|
||||
|
||||
if peer_id in self._discovered_relays:
|
||||
# Update last seen time for existing relay
|
||||
self._discovered_relays[peer_id].last_seen = time.time()
|
||||
continue
|
||||
|
||||
# Check if peer supports the relay protocol
|
||||
with trio.move_on_after(5): # Don't wait too long for protocol info
|
||||
if await self._supports_relay_protocol(peer_id):
|
||||
await self._add_relay(peer_id)
|
||||
|
||||
# Limit number of relays we track
|
||||
if len(self._discovered_relays) > self.max_relays:
|
||||
# Sort by last seen time and keep only the most recent ones
|
||||
sorted_relays = sorted(
|
||||
self._discovered_relays.items(),
|
||||
key=lambda x: x[1].last_seen,
|
||||
reverse=True,
|
||||
)
|
||||
to_remove = sorted_relays[self.max_relays :]
|
||||
for peer_id, _ in to_remove:
|
||||
del self._discovered_relays[peer_id]
|
||||
|
||||
logger.debug(
|
||||
"Discovery completed, tracking %d relays", len(self._discovered_relays)
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error during relay discovery: %s", str(e))
|
||||
|
||||
async def _supports_relay_protocol(self, peer_id: ID) -> bool:
|
||||
"""
|
||||
Check if a peer supports the relay protocol.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
peer_id : ID
|
||||
The ID of the peer to check
|
||||
|
||||
Returns
|
||||
-------
|
||||
bool
|
||||
True if the peer supports the relay protocol, False otherwise
|
||||
|
||||
"""
|
||||
# Check cache first
|
||||
if peer_id in self._protocol_cache:
|
||||
return PROTOCOL_ID in self._protocol_cache[peer_id]
|
||||
|
||||
# Method 1: Try peerstore
|
||||
result = await self._check_via_peerstore(peer_id)
|
||||
if result is not None:
|
||||
return result
|
||||
|
||||
# Method 2: Try direct stream connection
|
||||
result = await self._check_via_direct_connection(peer_id)
|
||||
if result is not None:
|
||||
return result
|
||||
|
||||
# Method 3: Try protocols from mux
|
||||
result = await self._check_via_mux(peer_id)
|
||||
if result is not None:
|
||||
return result
|
||||
|
||||
# Default: Cannot determine, assume false
|
||||
return False
|
||||
|
||||
async def _check_via_peerstore(self, peer_id: ID) -> bool | None:
|
||||
"""Check protocol support via peerstore."""
|
||||
try:
|
||||
peerstore = self.host.get_peerstore()
|
||||
proto_getter = peerstore.get_protocols
|
||||
|
||||
if not callable(proto_getter):
|
||||
return None
|
||||
|
||||
try:
|
||||
# Try to get protocols
|
||||
proto_result = proto_getter(peer_id)
|
||||
|
||||
# Get protocols list
|
||||
protocols_list = []
|
||||
if hasattr(proto_result, "__await__"):
|
||||
protocols_list = await cast(Any, proto_result)
|
||||
else:
|
||||
protocols_list = proto_result
|
||||
|
||||
# Check result
|
||||
if protocols_list is not None:
|
||||
protocols = set(protocols_list)
|
||||
self._protocol_cache[peer_id] = protocols
|
||||
return PROTOCOL_ID in protocols
|
||||
|
||||
return False
|
||||
except Exception as e:
|
||||
logger.debug("Error getting protocols: %s", str(e))
|
||||
return None
|
||||
except Exception as e:
|
||||
logger.debug("Error accessing peerstore: %s", str(e))
|
||||
return None
|
||||
|
||||
async def _check_via_direct_connection(self, peer_id: ID) -> bool | None:
|
||||
"""Check protocol support via direct connection."""
|
||||
try:
|
||||
with trio.fail_after(STREAM_TIMEOUT):
|
||||
stream = await self.host.new_stream(peer_id, [PROTOCOL_ID])
|
||||
if stream:
|
||||
await stream.close()
|
||||
self._protocol_cache[peer_id] = {PROTOCOL_ID}
|
||||
return True
|
||||
return False
|
||||
except Exception as e:
|
||||
logger.debug(
|
||||
"Failed to open relay protocol stream to %s: %s", peer_id, str(e)
|
||||
)
|
||||
return None
|
||||
|
||||
async def _check_via_mux(self, peer_id: ID) -> bool | None:
|
||||
"""Check protocol support via mux protocols."""
|
||||
try:
|
||||
if not (hasattr(self.host, "get_mux") and self.host.get_mux() is not None):
|
||||
return None
|
||||
|
||||
mux = self.host.get_mux()
|
||||
if not hasattr(mux, "protocols"):
|
||||
return None
|
||||
|
||||
peer_protocols = set()
|
||||
# Get protocols from mux with proper type safety
|
||||
available_protocols = []
|
||||
if hasattr(mux, "get_protocols"):
|
||||
# Get protocols with proper typing
|
||||
mux_protocols = mux.get_protocols()
|
||||
if isinstance(mux_protocols, (list, tuple)):
|
||||
available_protocols = list(mux_protocols)
|
||||
|
||||
for protocol in available_protocols:
|
||||
try:
|
||||
with trio.fail_after(2): # Quick check
|
||||
# Ensure we have a proper protocol object
|
||||
# Use string representation since we can't use isinstance
|
||||
is_tprotocol = str(type(protocol)) == str(type(TProtocol))
|
||||
protocol_obj = (
|
||||
protocol if is_tprotocol else TProtocol(str(protocol))
|
||||
)
|
||||
stream = await self.host.new_stream(peer_id, [protocol_obj])
|
||||
if stream:
|
||||
peer_protocols.add(str(protocol_obj))
|
||||
await stream.close()
|
||||
except Exception:
|
||||
pass # Ignore errors when closing the stream
|
||||
|
||||
self._protocol_cache[peer_id] = peer_protocols
|
||||
protocol_str = str(PROTOCOL_ID)
|
||||
for protocol in peer_protocols:
|
||||
if protocol == protocol_str:
|
||||
return True
|
||||
return False
|
||||
except Exception as e:
|
||||
logger.debug("Error checking protocols via mux: %s", str(e))
|
||||
return None
|
||||
|
||||
async def _add_relay(self, peer_id: ID) -> None:
|
||||
"""
|
||||
Add a peer as a relay and optionally make a reservation.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
peer_id : ID
|
||||
The ID of the peer to add as a relay
|
||||
|
||||
"""
|
||||
now = time.time()
|
||||
relay_info = RelayInfo(
|
||||
peer_id=peer_id,
|
||||
discovered_at=now,
|
||||
last_seen=now,
|
||||
)
|
||||
self._discovered_relays[peer_id] = relay_info
|
||||
logger.debug("Added relay %s to discovered relays", peer_id)
|
||||
|
||||
# If auto-reserve is enabled, make a reservation with this relay
|
||||
if self.auto_reserve:
|
||||
await self.make_reservation(peer_id)
|
||||
|
||||
async def make_reservation(self, peer_id: ID) -> bool:
|
||||
"""
|
||||
Make a reservation with a relay.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
peer_id : ID
|
||||
The ID of the relay to make a reservation with
|
||||
|
||||
Returns
|
||||
-------
|
||||
bool
|
||||
True if reservation succeeded, False otherwise
|
||||
|
||||
"""
|
||||
if peer_id not in self._discovered_relays:
|
||||
logger.error("Cannot make reservation with unknown relay %s", peer_id)
|
||||
return False
|
||||
|
||||
stream = None
|
||||
try:
|
||||
logger.debug("Making reservation with relay %s", peer_id)
|
||||
|
||||
# Open a stream to the relay with timeout
|
||||
try:
|
||||
with trio.fail_after(STREAM_TIMEOUT):
|
||||
stream = await self.host.new_stream(peer_id, [PROTOCOL_ID])
|
||||
if not stream:
|
||||
logger.error("Failed to open stream to relay %s", peer_id)
|
||||
return False
|
||||
except trio.TooSlowError:
|
||||
logger.error("Timeout opening stream to relay %s", peer_id)
|
||||
return False
|
||||
|
||||
try:
|
||||
# Create and send reservation request
|
||||
request = HopMessage(
|
||||
type=HopMessage.RESERVE,
|
||||
peer=self.host.get_id().to_bytes(),
|
||||
)
|
||||
|
||||
with trio.fail_after(STREAM_TIMEOUT):
|
||||
await stream.write(request.SerializeToString())
|
||||
|
||||
# Wait for response
|
||||
response_bytes = await stream.read()
|
||||
if not response_bytes:
|
||||
logger.error("No response received from relay %s", peer_id)
|
||||
return False
|
||||
|
||||
# Parse response
|
||||
response = HopMessage()
|
||||
response.ParseFromString(response_bytes)
|
||||
|
||||
# Check if reservation was successful
|
||||
if response.type == HopMessage.RESERVE and response.HasField(
|
||||
"status"
|
||||
):
|
||||
# Access status code directly from protobuf object
|
||||
status_code = getattr(response.status, "code", StatusCode.OK)
|
||||
|
||||
if status_code == StatusCode.OK:
|
||||
# Update relay info with reservation details
|
||||
relay_info = self._discovered_relays[peer_id]
|
||||
relay_info.has_reservation = True
|
||||
|
||||
if response.HasField("reservation") and response.HasField(
|
||||
"limit"
|
||||
):
|
||||
relay_info.reservation_expires_at = (
|
||||
response.reservation.expire
|
||||
)
|
||||
relay_info.reservation_data_limit = response.limit.data
|
||||
|
||||
logger.debug(
|
||||
"Successfully made reservation with relay %s", peer_id
|
||||
)
|
||||
return True
|
||||
|
||||
# Reservation failed
|
||||
error_message = "Unknown error"
|
||||
if response.HasField("status"):
|
||||
# Access message directly from protobuf object
|
||||
error_message = getattr(response.status, "message", "")
|
||||
|
||||
logger.warning(
|
||||
"Reservation request rejected by relay %s: %s",
|
||||
peer_id,
|
||||
error_message,
|
||||
)
|
||||
return False
|
||||
|
||||
except trio.TooSlowError:
|
||||
logger.error(
|
||||
"Timeout during reservation process with relay %s", peer_id
|
||||
)
|
||||
return False
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error making reservation with relay %s: %s", peer_id, str(e))
|
||||
return False
|
||||
finally:
|
||||
# Always close the stream
|
||||
if stream:
|
||||
try:
|
||||
await stream.close()
|
||||
except Exception:
|
||||
pass # Ignore errors when closing the stream
|
||||
|
||||
return False
|
||||
|
||||
async def _cleanup_expired(self) -> None:
|
||||
"""Clean up expired relays and reservations."""
|
||||
now = time.time()
|
||||
to_remove = []
|
||||
|
||||
for peer_id, relay_info in self._discovered_relays.items():
|
||||
# Check if relay hasn't been seen in a while (3x discovery interval)
|
||||
if now - relay_info.last_seen > self.discovery_interval * 3:
|
||||
to_remove.append(peer_id)
|
||||
continue
|
||||
|
||||
# Check if reservation has expired
|
||||
if (
|
||||
relay_info.has_reservation
|
||||
and relay_info.reservation_expires_at
|
||||
and now > relay_info.reservation_expires_at
|
||||
):
|
||||
relay_info.has_reservation = False
|
||||
relay_info.reservation_expires_at = None
|
||||
relay_info.reservation_data_limit = None
|
||||
|
||||
# If auto-reserve is enabled, try to renew
|
||||
if self.auto_reserve:
|
||||
await self.make_reservation(peer_id)
|
||||
|
||||
# Remove expired relays
|
||||
for peer_id in to_remove:
|
||||
del self._discovered_relays[peer_id]
|
||||
if peer_id in self._protocol_cache:
|
||||
del self._protocol_cache[peer_id]
|
||||
|
||||
def get_relays(self) -> list[ID]:
|
||||
"""
|
||||
Get a list of discovered relay peer IDs.
|
||||
|
||||
Returns
|
||||
-------
|
||||
list[ID]
|
||||
List of discovered relay peer IDs
|
||||
|
||||
"""
|
||||
return list(self._discovered_relays.keys())
|
||||
|
||||
def get_relay_info(self, peer_id: ID) -> RelayInfo | None:
|
||||
"""
|
||||
Get information about a specific relay.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
peer_id : ID
|
||||
The ID of the relay to get information about
|
||||
|
||||
Returns
|
||||
-------
|
||||
Optional[RelayInfo]
|
||||
Information about the relay, or None if not found
|
||||
|
||||
"""
|
||||
return self._discovered_relays.get(peer_id)
|
||||
|
||||
def get_relay(self) -> ID | None:
|
||||
"""
|
||||
Get a single relay peer ID for connection purposes.
|
||||
Prioritizes relays with active reservations.
|
||||
|
||||
Returns
|
||||
-------
|
||||
Optional[ID]
|
||||
ID of a discovered relay, or None if no relays found
|
||||
|
||||
"""
|
||||
if not self._discovered_relays:
|
||||
return None
|
||||
|
||||
# First try to find a relay with an active reservation
|
||||
for peer_id, relay_info in self._discovered_relays.items():
|
||||
if relay_info and relay_info.has_reservation:
|
||||
return peer_id
|
||||
|
||||
return next(iter(self._discovered_relays.keys()), None)
|
||||
16
libp2p/relay/circuit_v2/pb/__init__.py
Normal file
16
libp2p/relay/circuit_v2/pb/__init__.py
Normal file
@ -0,0 +1,16 @@
|
||||
"""
|
||||
Protocol buffer package for circuit_v2.
|
||||
|
||||
Contains generated protobuf code for circuit_v2 relay protocol.
|
||||
"""
|
||||
|
||||
# Import the classes to be accessible directly from the package
|
||||
from .circuit_pb2 import (
|
||||
HopMessage,
|
||||
Limit,
|
||||
Reservation,
|
||||
Status,
|
||||
StopMessage,
|
||||
)
|
||||
|
||||
__all__ = ["HopMessage", "Limit", "Reservation", "Status", "StopMessage"]
|
||||
55
libp2p/relay/circuit_v2/pb/circuit.proto
Normal file
55
libp2p/relay/circuit_v2/pb/circuit.proto
Normal file
@ -0,0 +1,55 @@
|
||||
syntax = "proto3";
|
||||
|
||||
package circuit.pb.v2;
|
||||
|
||||
// Circuit v2 message types
|
||||
message HopMessage {
|
||||
enum Type {
|
||||
RESERVE = 0;
|
||||
CONNECT = 1;
|
||||
STATUS = 2;
|
||||
}
|
||||
|
||||
Type type = 1;
|
||||
bytes peer = 2;
|
||||
Reservation reservation = 3;
|
||||
Limit limit = 4;
|
||||
Status status = 5;
|
||||
}
|
||||
|
||||
message StopMessage {
|
||||
enum Type {
|
||||
CONNECT = 0;
|
||||
STATUS = 1;
|
||||
}
|
||||
|
||||
Type type = 1;
|
||||
bytes peer = 2;
|
||||
Status status = 3;
|
||||
}
|
||||
|
||||
message Reservation {
|
||||
bytes voucher = 1;
|
||||
bytes signature = 2;
|
||||
int64 expire = 3;
|
||||
}
|
||||
|
||||
message Limit {
|
||||
int64 duration = 1;
|
||||
int64 data = 2;
|
||||
}
|
||||
|
||||
message Status {
|
||||
enum Code {
|
||||
OK = 0;
|
||||
RESERVATION_REFUSED = 100;
|
||||
RESOURCE_LIMIT_EXCEEDED = 101;
|
||||
PERMISSION_DENIED = 102;
|
||||
CONNECTION_FAILED = 200;
|
||||
DIAL_REFUSED = 201;
|
||||
STOP_FAILED = 300;
|
||||
MALFORMED_MESSAGE = 400;
|
||||
}
|
||||
Code code = 1;
|
||||
string message = 2;
|
||||
}
|
||||
37
libp2p/relay/circuit_v2/pb/circuit_pb2.py
Normal file
37
libp2p/relay/circuit_v2/pb/circuit_pb2.py
Normal file
@ -0,0 +1,37 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Generated by the protocol buffer compiler. DO NOT EDIT!
|
||||
# NO CHECKED-IN PROTOBUF GENCODE
|
||||
# source: libp2p/relay/circuit_v2/pb/circuit.proto
|
||||
"""Generated protocol buffer code."""
|
||||
from google.protobuf.internal import builder as _builder
|
||||
from google.protobuf import descriptor as _descriptor
|
||||
from google.protobuf import descriptor_pool as _descriptor_pool
|
||||
from google.protobuf import symbol_database as _symbol_database
|
||||
# @@protoc_insertion_point(imports)
|
||||
|
||||
_sym_db = _symbol_database.Default()
|
||||
|
||||
|
||||
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n(libp2p/relay/circuit_v2/pb/circuit.proto\x12\rcircuit.pb.v2\"\xf3\x01\n\nHopMessage\x12,\n\x04type\x18\x01 \x01(\x0e\x32\x1e.circuit.pb.v2.HopMessage.Type\x12\x0c\n\x04peer\x18\x02 \x01(\x0c\x12/\n\x0breservation\x18\x03 \x01(\x0b\x32\x1a.circuit.pb.v2.Reservation\x12#\n\x05limit\x18\x04 \x01(\x0b\x32\x14.circuit.pb.v2.Limit\x12%\n\x06status\x18\x05 \x01(\x0b\x32\x15.circuit.pb.v2.Status\",\n\x04Type\x12\x0b\n\x07RESERVE\x10\x00\x12\x0b\n\x07\x43ONNECT\x10\x01\x12\n\n\x06STATUS\x10\x02\"\x92\x01\n\x0bStopMessage\x12-\n\x04type\x18\x01 \x01(\x0e\x32\x1f.circuit.pb.v2.StopMessage.Type\x12\x0c\n\x04peer\x18\x02 \x01(\x0c\x12%\n\x06status\x18\x03 \x01(\x0b\x32\x15.circuit.pb.v2.Status\"\x1f\n\x04Type\x12\x0b\n\x07\x43ONNECT\x10\x00\x12\n\n\x06STATUS\x10\x01\"A\n\x0bReservation\x12\x0f\n\x07voucher\x18\x01 \x01(\x0c\x12\x11\n\tsignature\x18\x02 \x01(\x0c\x12\x0e\n\x06\x65xpire\x18\x03 \x01(\x03\"\'\n\x05Limit\x12\x10\n\x08\x64uration\x18\x01 \x01(\x03\x12\x0c\n\x04\x64\x61ta\x18\x02 \x01(\x03\"\xf6\x01\n\x06Status\x12(\n\x04\x63ode\x18\x01 \x01(\x0e\x32\x1a.circuit.pb.v2.Status.Code\x12\x0f\n\x07message\x18\x02 \x01(\t\"\xb0\x01\n\x04\x43ode\x12\x06\n\x02OK\x10\x00\x12\x17\n\x13RESERVATION_REFUSED\x10\x64\x12\x1b\n\x17RESOURCE_LIMIT_EXCEEDED\x10\x65\x12\x15\n\x11PERMISSION_DENIED\x10\x66\x12\x16\n\x11\x43ONNECTION_FAILED\x10\xc8\x01\x12\x11\n\x0c\x44IAL_REFUSED\x10\xc9\x01\x12\x10\n\x0bSTOP_FAILED\x10\xac\x02\x12\x16\n\x11MALFORMED_MESSAGE\x10\x90\x03\x62\x06proto3')
|
||||
|
||||
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals())
|
||||
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'libp2p.relay.circuit_v2.pb.circuit_pb2', globals())
|
||||
if _descriptor._USE_C_DESCRIPTORS == False:
|
||||
DESCRIPTOR._options = None
|
||||
_HOPMESSAGE._serialized_start=60
|
||||
_HOPMESSAGE._serialized_end=303
|
||||
_HOPMESSAGE_TYPE._serialized_start=259
|
||||
_HOPMESSAGE_TYPE._serialized_end=303
|
||||
_STOPMESSAGE._serialized_start=306
|
||||
_STOPMESSAGE._serialized_end=452
|
||||
_STOPMESSAGE_TYPE._serialized_start=421
|
||||
_STOPMESSAGE_TYPE._serialized_end=452
|
||||
_RESERVATION._serialized_start=454
|
||||
_RESERVATION._serialized_end=519
|
||||
_LIMIT._serialized_start=521
|
||||
_LIMIT._serialized_end=560
|
||||
_STATUS._serialized_start=563
|
||||
_STATUS._serialized_end=809
|
||||
_STATUS_CODE._serialized_start=633
|
||||
_STATUS_CODE._serialized_end=809
|
||||
# @@protoc_insertion_point(module_scope)
|
||||
184
libp2p/relay/circuit_v2/pb/circuit_pb2.pyi
Normal file
184
libp2p/relay/circuit_v2/pb/circuit_pb2.pyi
Normal file
@ -0,0 +1,184 @@
|
||||
"""
|
||||
@generated by mypy-protobuf. Do not edit manually!
|
||||
isort:skip_file
|
||||
"""
|
||||
|
||||
import builtins
|
||||
import google.protobuf.descriptor
|
||||
import google.protobuf.internal.enum_type_wrapper
|
||||
import google.protobuf.message
|
||||
import sys
|
||||
import typing
|
||||
|
||||
if sys.version_info >= (3, 10):
|
||||
import typing as typing_extensions
|
||||
else:
|
||||
import typing_extensions
|
||||
|
||||
DESCRIPTOR: google.protobuf.descriptor.FileDescriptor
|
||||
|
||||
@typing.final
|
||||
class HopMessage(google.protobuf.message.Message):
|
||||
"""Circuit v2 message types"""
|
||||
|
||||
DESCRIPTOR: google.protobuf.descriptor.Descriptor
|
||||
|
||||
class _Type:
|
||||
ValueType = typing.NewType("ValueType", builtins.int)
|
||||
V: typing_extensions.TypeAlias = ValueType
|
||||
|
||||
class _TypeEnumTypeWrapper(google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[HopMessage._Type.ValueType], builtins.type):
|
||||
DESCRIPTOR: google.protobuf.descriptor.EnumDescriptor
|
||||
RESERVE: HopMessage._Type.ValueType # 0
|
||||
CONNECT: HopMessage._Type.ValueType # 1
|
||||
STATUS: HopMessage._Type.ValueType # 2
|
||||
|
||||
class Type(_Type, metaclass=_TypeEnumTypeWrapper): ...
|
||||
RESERVE: HopMessage.Type.ValueType # 0
|
||||
CONNECT: HopMessage.Type.ValueType # 1
|
||||
STATUS: HopMessage.Type.ValueType # 2
|
||||
|
||||
TYPE_FIELD_NUMBER: builtins.int
|
||||
PEER_FIELD_NUMBER: builtins.int
|
||||
RESERVATION_FIELD_NUMBER: builtins.int
|
||||
LIMIT_FIELD_NUMBER: builtins.int
|
||||
STATUS_FIELD_NUMBER: builtins.int
|
||||
type: global___HopMessage.Type.ValueType
|
||||
peer: builtins.bytes
|
||||
@property
|
||||
def reservation(self) -> global___Reservation: ...
|
||||
@property
|
||||
def limit(self) -> global___Limit: ...
|
||||
@property
|
||||
def status(self) -> global___Status: ...
|
||||
def __init__(
|
||||
self,
|
||||
*,
|
||||
type: global___HopMessage.Type.ValueType = ...,
|
||||
peer: builtins.bytes = ...,
|
||||
reservation: global___Reservation | None = ...,
|
||||
limit: global___Limit | None = ...,
|
||||
status: global___Status | None = ...,
|
||||
) -> None: ...
|
||||
def HasField(self, field_name: typing.Literal["limit", b"limit", "reservation", b"reservation", "status", b"status"]) -> builtins.bool: ...
|
||||
def ClearField(self, field_name: typing.Literal["limit", b"limit", "peer", b"peer", "reservation", b"reservation", "status", b"status", "type", b"type"]) -> None: ...
|
||||
|
||||
global___HopMessage = HopMessage
|
||||
|
||||
@typing.final
|
||||
class StopMessage(google.protobuf.message.Message):
|
||||
DESCRIPTOR: google.protobuf.descriptor.Descriptor
|
||||
|
||||
class _Type:
|
||||
ValueType = typing.NewType("ValueType", builtins.int)
|
||||
V: typing_extensions.TypeAlias = ValueType
|
||||
|
||||
class _TypeEnumTypeWrapper(google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[StopMessage._Type.ValueType], builtins.type):
|
||||
DESCRIPTOR: google.protobuf.descriptor.EnumDescriptor
|
||||
CONNECT: StopMessage._Type.ValueType # 0
|
||||
STATUS: StopMessage._Type.ValueType # 1
|
||||
|
||||
class Type(_Type, metaclass=_TypeEnumTypeWrapper): ...
|
||||
CONNECT: StopMessage.Type.ValueType # 0
|
||||
STATUS: StopMessage.Type.ValueType # 1
|
||||
|
||||
TYPE_FIELD_NUMBER: builtins.int
|
||||
PEER_FIELD_NUMBER: builtins.int
|
||||
STATUS_FIELD_NUMBER: builtins.int
|
||||
type: global___StopMessage.Type.ValueType
|
||||
peer: builtins.bytes
|
||||
@property
|
||||
def status(self) -> global___Status: ...
|
||||
def __init__(
|
||||
self,
|
||||
*,
|
||||
type: global___StopMessage.Type.ValueType = ...,
|
||||
peer: builtins.bytes = ...,
|
||||
status: global___Status | None = ...,
|
||||
) -> None: ...
|
||||
def HasField(self, field_name: typing.Literal["status", b"status"]) -> builtins.bool: ...
|
||||
def ClearField(self, field_name: typing.Literal["peer", b"peer", "status", b"status", "type", b"type"]) -> None: ...
|
||||
|
||||
global___StopMessage = StopMessage
|
||||
|
||||
@typing.final
|
||||
class Reservation(google.protobuf.message.Message):
|
||||
DESCRIPTOR: google.protobuf.descriptor.Descriptor
|
||||
|
||||
VOUCHER_FIELD_NUMBER: builtins.int
|
||||
SIGNATURE_FIELD_NUMBER: builtins.int
|
||||
EXPIRE_FIELD_NUMBER: builtins.int
|
||||
voucher: builtins.bytes
|
||||
signature: builtins.bytes
|
||||
expire: builtins.int
|
||||
def __init__(
|
||||
self,
|
||||
*,
|
||||
voucher: builtins.bytes = ...,
|
||||
signature: builtins.bytes = ...,
|
||||
expire: builtins.int = ...,
|
||||
) -> None: ...
|
||||
def ClearField(self, field_name: typing.Literal["expire", b"expire", "signature", b"signature", "voucher", b"voucher"]) -> None: ...
|
||||
|
||||
global___Reservation = Reservation
|
||||
|
||||
@typing.final
|
||||
class Limit(google.protobuf.message.Message):
|
||||
DESCRIPTOR: google.protobuf.descriptor.Descriptor
|
||||
|
||||
DURATION_FIELD_NUMBER: builtins.int
|
||||
DATA_FIELD_NUMBER: builtins.int
|
||||
duration: builtins.int
|
||||
data: builtins.int
|
||||
def __init__(
|
||||
self,
|
||||
*,
|
||||
duration: builtins.int = ...,
|
||||
data: builtins.int = ...,
|
||||
) -> None: ...
|
||||
def ClearField(self, field_name: typing.Literal["data", b"data", "duration", b"duration"]) -> None: ...
|
||||
|
||||
global___Limit = Limit
|
||||
|
||||
@typing.final
|
||||
class Status(google.protobuf.message.Message):
|
||||
DESCRIPTOR: google.protobuf.descriptor.Descriptor
|
||||
|
||||
class _Code:
|
||||
ValueType = typing.NewType("ValueType", builtins.int)
|
||||
V: typing_extensions.TypeAlias = ValueType
|
||||
|
||||
class _CodeEnumTypeWrapper(google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[Status._Code.ValueType], builtins.type):
|
||||
DESCRIPTOR: google.protobuf.descriptor.EnumDescriptor
|
||||
OK: Status._Code.ValueType # 0
|
||||
RESERVATION_REFUSED: Status._Code.ValueType # 100
|
||||
RESOURCE_LIMIT_EXCEEDED: Status._Code.ValueType # 101
|
||||
PERMISSION_DENIED: Status._Code.ValueType # 102
|
||||
CONNECTION_FAILED: Status._Code.ValueType # 200
|
||||
DIAL_REFUSED: Status._Code.ValueType # 201
|
||||
STOP_FAILED: Status._Code.ValueType # 300
|
||||
MALFORMED_MESSAGE: Status._Code.ValueType # 400
|
||||
|
||||
class Code(_Code, metaclass=_CodeEnumTypeWrapper): ...
|
||||
OK: Status.Code.ValueType # 0
|
||||
RESERVATION_REFUSED: Status.Code.ValueType # 100
|
||||
RESOURCE_LIMIT_EXCEEDED: Status.Code.ValueType # 101
|
||||
PERMISSION_DENIED: Status.Code.ValueType # 102
|
||||
CONNECTION_FAILED: Status.Code.ValueType # 200
|
||||
DIAL_REFUSED: Status.Code.ValueType # 201
|
||||
STOP_FAILED: Status.Code.ValueType # 300
|
||||
MALFORMED_MESSAGE: Status.Code.ValueType # 400
|
||||
|
||||
CODE_FIELD_NUMBER: builtins.int
|
||||
MESSAGE_FIELD_NUMBER: builtins.int
|
||||
code: global___Status.Code.ValueType
|
||||
message: builtins.str
|
||||
def __init__(
|
||||
self,
|
||||
*,
|
||||
code: global___Status.Code.ValueType = ...,
|
||||
message: builtins.str = ...,
|
||||
) -> None: ...
|
||||
def ClearField(self, field_name: typing.Literal["code", b"code", "message", b"message"]) -> None: ...
|
||||
|
||||
global___Status = Status
|
||||
800
libp2p/relay/circuit_v2/protocol.py
Normal file
800
libp2p/relay/circuit_v2/protocol.py
Normal file
@ -0,0 +1,800 @@
|
||||
"""
|
||||
Circuit Relay v2 protocol implementation.
|
||||
|
||||
This module implements the Circuit Relay v2 protocol as specified in:
|
||||
https://github.com/libp2p/specs/blob/master/relay/circuit-v2.md
|
||||
"""
|
||||
|
||||
import logging
|
||||
import time
|
||||
from typing import (
|
||||
Any,
|
||||
Protocol as TypingProtocol,
|
||||
cast,
|
||||
runtime_checkable,
|
||||
)
|
||||
|
||||
import trio
|
||||
|
||||
from libp2p.abc import (
|
||||
IHost,
|
||||
INetStream,
|
||||
)
|
||||
from libp2p.custom_types import (
|
||||
TProtocol,
|
||||
)
|
||||
from libp2p.io.abc import (
|
||||
ReadWriteCloser,
|
||||
)
|
||||
from libp2p.peer.id import (
|
||||
ID,
|
||||
)
|
||||
from libp2p.stream_muxer.mplex.exceptions import (
|
||||
MplexStreamEOF,
|
||||
MplexStreamReset,
|
||||
)
|
||||
from libp2p.tools.async_service import (
|
||||
Service,
|
||||
)
|
||||
|
||||
from .pb.circuit_pb2 import (
|
||||
HopMessage,
|
||||
Limit,
|
||||
Reservation,
|
||||
Status as PbStatus,
|
||||
StopMessage,
|
||||
)
|
||||
from .protocol_buffer import (
|
||||
StatusCode,
|
||||
create_status,
|
||||
)
|
||||
from .resources import (
|
||||
RelayLimits,
|
||||
RelayResourceManager,
|
||||
)
|
||||
|
||||
logger = logging.getLogger("libp2p.relay.circuit_v2")
|
||||
|
||||
PROTOCOL_ID = TProtocol("/libp2p/circuit/relay/2.0.0")
|
||||
STOP_PROTOCOL_ID = TProtocol("/libp2p/circuit/relay/2.0.0/stop")
|
||||
|
||||
# Default limits for relay resources
|
||||
DEFAULT_RELAY_LIMITS = RelayLimits(
|
||||
duration=60 * 60, # 1 hour
|
||||
data=1024 * 1024 * 1024, # 1GB
|
||||
max_circuit_conns=8,
|
||||
max_reservations=4,
|
||||
)
|
||||
|
||||
# Stream operation timeouts
|
||||
STREAM_READ_TIMEOUT = 15 # seconds
|
||||
STREAM_WRITE_TIMEOUT = 15 # seconds
|
||||
STREAM_CLOSE_TIMEOUT = 10 # seconds
|
||||
MAX_READ_RETRIES = 5 # Maximum number of read retries
|
||||
|
||||
|
||||
# Extended interfaces for type checking
|
||||
@runtime_checkable
|
||||
class IHostWithStreamHandlers(TypingProtocol):
|
||||
"""Extended host interface with stream handler methods."""
|
||||
|
||||
def remove_stream_handler(self, protocol_id: TProtocol) -> None:
|
||||
"""Remove a stream handler for a protocol."""
|
||||
...
|
||||
|
||||
|
||||
@runtime_checkable
|
||||
class INetStreamWithExtras(TypingProtocol):
|
||||
"""Extended net stream interface with additional methods."""
|
||||
|
||||
def get_remote_peer_id(self) -> ID:
|
||||
"""Get the remote peer ID."""
|
||||
...
|
||||
|
||||
def is_open(self) -> bool:
|
||||
"""Check if the stream is open."""
|
||||
...
|
||||
|
||||
def is_closed(self) -> bool:
|
||||
"""Check if the stream is closed."""
|
||||
...
|
||||
|
||||
|
||||
class CircuitV2Protocol(Service):
|
||||
"""
|
||||
CircuitV2Protocol implements the Circuit Relay v2 protocol.
|
||||
|
||||
This protocol allows peers to establish connections through relay nodes
|
||||
when direct connections are not possible (e.g., due to NAT).
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
host: IHost,
|
||||
limits: RelayLimits | None = None,
|
||||
allow_hop: bool = False,
|
||||
) -> None:
|
||||
"""
|
||||
Initialize a Circuit Relay v2 protocol instance.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
host : IHost
|
||||
The libp2p host instance
|
||||
limits : RelayLimits | None
|
||||
Resource limits for the relay
|
||||
allow_hop : bool
|
||||
Whether to allow this node to act as a relay
|
||||
|
||||
"""
|
||||
self.host = host
|
||||
self.limits = limits or DEFAULT_RELAY_LIMITS
|
||||
self.allow_hop = allow_hop
|
||||
self.resource_manager = RelayResourceManager(self.limits)
|
||||
self._active_relays: dict[ID, tuple[INetStream, INetStream | None]] = {}
|
||||
self.event_started = trio.Event()
|
||||
|
||||
async def run(self, *, task_status: Any = trio.TASK_STATUS_IGNORED) -> None:
|
||||
"""Run the protocol service."""
|
||||
try:
|
||||
# Register protocol handlers
|
||||
if self.allow_hop:
|
||||
logger.debug("Registering stream handlers for relay protocol")
|
||||
self.host.set_stream_handler(PROTOCOL_ID, self._handle_hop_stream)
|
||||
self.host.set_stream_handler(STOP_PROTOCOL_ID, self._handle_stop_stream)
|
||||
logger.debug("Stream handlers registered successfully")
|
||||
|
||||
# Signal that we're ready
|
||||
self.event_started.set()
|
||||
task_status.started()
|
||||
logger.debug("Protocol service started")
|
||||
|
||||
# Wait for service to be stopped
|
||||
await self.manager.wait_finished()
|
||||
finally:
|
||||
# Clean up any active relay connections
|
||||
for src_stream, dst_stream in self._active_relays.values():
|
||||
await self._close_stream(src_stream)
|
||||
await self._close_stream(dst_stream)
|
||||
self._active_relays.clear()
|
||||
|
||||
# Unregister protocol handlers
|
||||
if self.allow_hop:
|
||||
try:
|
||||
# Cast host to extended interface with remove_stream_handler
|
||||
host_with_handlers = cast(IHostWithStreamHandlers, self.host)
|
||||
host_with_handlers.remove_stream_handler(PROTOCOL_ID)
|
||||
host_with_handlers.remove_stream_handler(STOP_PROTOCOL_ID)
|
||||
except Exception as e:
|
||||
logger.error("Error unregistering stream handlers: %s", str(e))
|
||||
|
||||
async def _close_stream(self, stream: INetStream | None) -> None:
|
||||
"""Helper function to safely close a stream."""
|
||||
if stream is None:
|
||||
return
|
||||
|
||||
try:
|
||||
with trio.fail_after(STREAM_CLOSE_TIMEOUT):
|
||||
await stream.close()
|
||||
except Exception:
|
||||
try:
|
||||
await stream.reset()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
async def _read_stream_with_retry(
|
||||
self,
|
||||
stream: INetStream,
|
||||
max_retries: int = MAX_READ_RETRIES,
|
||||
) -> bytes | None:
|
||||
"""
|
||||
Helper function to read from a stream with retries.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
stream : INetStream
|
||||
The stream to read from
|
||||
max_retries : int
|
||||
Maximum number of read retries
|
||||
|
||||
Returns
|
||||
-------
|
||||
Optional[bytes]
|
||||
The data read from the stream, or None if the stream is closed/reset
|
||||
|
||||
Raises
|
||||
------
|
||||
trio.TooSlowError
|
||||
If read timeout occurs after all retries
|
||||
Exception
|
||||
For other unexpected errors
|
||||
|
||||
"""
|
||||
retries = 0
|
||||
last_error: Any = None
|
||||
backoff_time = 0.2 # Base backoff time in seconds
|
||||
|
||||
while retries < max_retries:
|
||||
try:
|
||||
with trio.fail_after(STREAM_READ_TIMEOUT):
|
||||
# Try reading with timeout
|
||||
logger.debug(
|
||||
"Attempting to read from stream (attempt %d/%d)",
|
||||
retries + 1,
|
||||
max_retries,
|
||||
)
|
||||
data = await stream.read()
|
||||
if not data: # EOF
|
||||
logger.debug("Stream EOF detected")
|
||||
return None
|
||||
|
||||
logger.debug("Successfully read %d bytes from stream", len(data))
|
||||
return data
|
||||
except trio.WouldBlock:
|
||||
# Just retry immediately if we would block
|
||||
retries += 1
|
||||
logger.debug(
|
||||
"Stream would block (attempt %d/%d), retrying...",
|
||||
retries,
|
||||
max_retries,
|
||||
)
|
||||
await trio.sleep(backoff_time * retries) # Increased backoff time
|
||||
continue
|
||||
except (MplexStreamEOF, MplexStreamReset):
|
||||
# Stream closed/reset - no point retrying
|
||||
logger.debug("Stream closed/reset during read")
|
||||
return None
|
||||
except trio.TooSlowError as e:
|
||||
last_error = e
|
||||
retries += 1
|
||||
logger.debug(
|
||||
"Read timeout (attempt %d/%d), retrying...", retries, max_retries
|
||||
)
|
||||
if retries < max_retries:
|
||||
# Wait longer before retry with increasing backoff
|
||||
await trio.sleep(backoff_time * retries) # Increased backoff
|
||||
continue
|
||||
except Exception as e:
|
||||
logger.error("Unexpected error reading from stream: %s", str(e))
|
||||
last_error = e
|
||||
retries += 1
|
||||
if retries < max_retries:
|
||||
await trio.sleep(backoff_time * retries) # Increased backoff
|
||||
continue
|
||||
raise
|
||||
|
||||
if last_error:
|
||||
if isinstance(last_error, trio.TooSlowError):
|
||||
logger.error("Read timed out after %d retries", max_retries)
|
||||
raise last_error
|
||||
|
||||
return None
|
||||
|
||||
async def _handle_hop_stream(self, stream: INetStream) -> None:
|
||||
"""
|
||||
Handle incoming HOP streams.
|
||||
|
||||
This handler processes relay requests from other peers.
|
||||
"""
|
||||
try:
|
||||
# Try to get peer ID first
|
||||
try:
|
||||
# Cast to extended interface with get_remote_peer_id
|
||||
stream_with_peer_id = cast(INetStreamWithExtras, stream)
|
||||
remote_peer_id = stream_with_peer_id.get_remote_peer_id()
|
||||
remote_id = str(remote_peer_id)
|
||||
except Exception:
|
||||
# Fall back to address if peer ID not available
|
||||
remote_addr = stream.get_remote_address()
|
||||
remote_id = f"peer at {remote_addr}" if remote_addr else "unknown peer"
|
||||
|
||||
logger.debug("Handling hop stream from %s", remote_id)
|
||||
|
||||
# First, handle the read timeout gracefully
|
||||
try:
|
||||
with trio.fail_after(
|
||||
STREAM_READ_TIMEOUT * 2
|
||||
): # Double the timeout for reading
|
||||
msg_bytes = await stream.read()
|
||||
if not msg_bytes:
|
||||
logger.error(
|
||||
"Empty read from stream from %s",
|
||||
remote_id,
|
||||
)
|
||||
# Create a proto Status directly
|
||||
pb_status = PbStatus()
|
||||
pb_status.code = cast(Any, int(StatusCode.MALFORMED_MESSAGE))
|
||||
pb_status.message = "Empty message received"
|
||||
|
||||
response = HopMessage(
|
||||
type=HopMessage.STATUS,
|
||||
status=pb_status,
|
||||
)
|
||||
await stream.write(response.SerializeToString())
|
||||
await trio.sleep(0.5) # Longer wait to ensure message is sent
|
||||
return
|
||||
except trio.TooSlowError:
|
||||
logger.error(
|
||||
"Timeout reading from hop stream from %s",
|
||||
remote_id,
|
||||
)
|
||||
# Create a proto Status directly
|
||||
pb_status = PbStatus()
|
||||
pb_status.code = cast(Any, int(StatusCode.CONNECTION_FAILED))
|
||||
pb_status.message = "Stream read timeout"
|
||||
|
||||
response = HopMessage(
|
||||
type=HopMessage.STATUS,
|
||||
status=pb_status,
|
||||
)
|
||||
await stream.write(response.SerializeToString())
|
||||
await trio.sleep(0.5) # Longer wait to ensure the message is sent
|
||||
return
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Error reading from hop stream from %s: %s",
|
||||
remote_id,
|
||||
str(e),
|
||||
)
|
||||
# Create a proto Status directly
|
||||
pb_status = PbStatus()
|
||||
pb_status.code = cast(Any, int(StatusCode.MALFORMED_MESSAGE))
|
||||
pb_status.message = f"Read error: {str(e)}"
|
||||
|
||||
response = HopMessage(
|
||||
type=HopMessage.STATUS,
|
||||
status=pb_status,
|
||||
)
|
||||
await stream.write(response.SerializeToString())
|
||||
await trio.sleep(0.5) # Longer wait to ensure the message is sent
|
||||
return
|
||||
|
||||
# Parse the message
|
||||
try:
|
||||
hop_msg = HopMessage()
|
||||
hop_msg.ParseFromString(msg_bytes)
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Error parsing hop message from %s: %s",
|
||||
remote_id,
|
||||
str(e),
|
||||
)
|
||||
# Create a proto Status directly
|
||||
pb_status = PbStatus()
|
||||
pb_status.code = cast(Any, int(StatusCode.MALFORMED_MESSAGE))
|
||||
pb_status.message = f"Parse error: {str(e)}"
|
||||
|
||||
response = HopMessage(
|
||||
type=HopMessage.STATUS,
|
||||
status=pb_status,
|
||||
)
|
||||
await stream.write(response.SerializeToString())
|
||||
await trio.sleep(0.5) # Longer wait to ensure the message is sent
|
||||
return
|
||||
|
||||
# Process based on message type
|
||||
if hop_msg.type == HopMessage.RESERVE:
|
||||
logger.debug("Handling RESERVE message from %s", remote_id)
|
||||
await self._handle_reserve(stream, hop_msg)
|
||||
# For RESERVE requests, let the client close the stream
|
||||
return
|
||||
elif hop_msg.type == HopMessage.CONNECT:
|
||||
logger.debug("Handling CONNECT message from %s", remote_id)
|
||||
await self._handle_connect(stream, hop_msg)
|
||||
else:
|
||||
logger.error("Invalid message type %d from %s", hop_msg.type, remote_id)
|
||||
# Send a nice error response using _send_status method
|
||||
await self._send_status(
|
||||
stream,
|
||||
StatusCode.MALFORMED_MESSAGE,
|
||||
f"Invalid message type: {hop_msg.type}",
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Unexpected error handling hop stream from %s: %s", remote_id, str(e)
|
||||
)
|
||||
try:
|
||||
# Send a nice error response using _send_status method
|
||||
await self._send_status(
|
||||
stream,
|
||||
StatusCode.MALFORMED_MESSAGE,
|
||||
f"Internal error: {str(e)}",
|
||||
)
|
||||
except Exception as e2:
|
||||
logger.error(
|
||||
"Failed to send error response to %s: %s", remote_id, str(e2)
|
||||
)
|
||||
|
||||
async def _handle_stop_stream(self, stream: INetStream) -> None:
|
||||
"""
|
||||
Handle incoming STOP streams.
|
||||
|
||||
This handler processes incoming relay connections from the destination side.
|
||||
"""
|
||||
try:
|
||||
# Read the incoming message with timeout
|
||||
with trio.fail_after(STREAM_READ_TIMEOUT):
|
||||
msg_bytes = await stream.read()
|
||||
stop_msg = StopMessage()
|
||||
stop_msg.ParseFromString(msg_bytes)
|
||||
|
||||
if stop_msg.type != StopMessage.CONNECT:
|
||||
# Use direct attribute access to create status object for error response
|
||||
await self._send_stop_status(
|
||||
stream,
|
||||
StatusCode.MALFORMED_MESSAGE,
|
||||
"Invalid message type",
|
||||
)
|
||||
await self._close_stream(stream)
|
||||
return
|
||||
|
||||
# Get the source stream from active relays
|
||||
peer_id = ID(stop_msg.peer)
|
||||
if peer_id not in self._active_relays:
|
||||
# Use direct attribute access to create status object for error response
|
||||
await self._send_stop_status(
|
||||
stream,
|
||||
StatusCode.CONNECTION_FAILED,
|
||||
"No pending relay connection",
|
||||
)
|
||||
await self._close_stream(stream)
|
||||
return
|
||||
|
||||
src_stream, _ = self._active_relays[peer_id]
|
||||
self._active_relays[peer_id] = (src_stream, stream)
|
||||
|
||||
# Send success status to both sides
|
||||
await self._send_status(
|
||||
src_stream,
|
||||
StatusCode.OK,
|
||||
"Connection established",
|
||||
)
|
||||
await self._send_stop_status(
|
||||
stream,
|
||||
StatusCode.OK,
|
||||
"Connection established",
|
||||
)
|
||||
|
||||
# Start relaying data
|
||||
async with trio.open_nursery() as nursery:
|
||||
nursery.start_soon(self._relay_data, src_stream, stream, peer_id)
|
||||
nursery.start_soon(self._relay_data, stream, src_stream, peer_id)
|
||||
|
||||
except trio.TooSlowError:
|
||||
logger.error("Timeout reading from stop stream")
|
||||
await self._send_stop_status(
|
||||
stream,
|
||||
StatusCode.CONNECTION_FAILED,
|
||||
"Stream read timeout",
|
||||
)
|
||||
await self._close_stream(stream)
|
||||
except Exception as e:
|
||||
logger.error("Error handling stop stream: %s", str(e))
|
||||
try:
|
||||
await self._send_stop_status(
|
||||
stream,
|
||||
StatusCode.MALFORMED_MESSAGE,
|
||||
str(e),
|
||||
)
|
||||
await self._close_stream(stream)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
async def _handle_reserve(self, stream: INetStream, msg: Any) -> None:
|
||||
"""Handle a reservation request."""
|
||||
peer_id = None
|
||||
try:
|
||||
peer_id = ID(msg.peer)
|
||||
logger.debug("Handling reservation request from peer %s", peer_id)
|
||||
|
||||
# Check if we can accept more reservations
|
||||
if not self.resource_manager.can_accept_reservation(peer_id):
|
||||
logger.debug("Reservation limit exceeded for peer %s", peer_id)
|
||||
# Send status message with STATUS type
|
||||
status = create_status(
|
||||
code=StatusCode.RESOURCE_LIMIT_EXCEEDED,
|
||||
message="Reservation limit exceeded",
|
||||
)
|
||||
|
||||
status_msg = HopMessage(
|
||||
type=HopMessage.STATUS,
|
||||
status=status.to_pb(),
|
||||
)
|
||||
await stream.write(status_msg.SerializeToString())
|
||||
return
|
||||
|
||||
# Accept reservation
|
||||
logger.debug("Accepting reservation from peer %s", peer_id)
|
||||
ttl = self.resource_manager.reserve(peer_id)
|
||||
|
||||
# Send reservation success response
|
||||
with trio.fail_after(STREAM_WRITE_TIMEOUT):
|
||||
status = create_status(
|
||||
code=StatusCode.OK, message="Reservation accepted"
|
||||
)
|
||||
|
||||
response = HopMessage(
|
||||
type=HopMessage.STATUS,
|
||||
status=status.to_pb(),
|
||||
reservation=Reservation(
|
||||
expire=int(time.time() + ttl),
|
||||
voucher=b"", # We don't use vouchers yet
|
||||
signature=b"", # We don't use signatures yet
|
||||
),
|
||||
limit=Limit(
|
||||
duration=self.limits.duration,
|
||||
data=self.limits.data,
|
||||
),
|
||||
)
|
||||
|
||||
# Log the response message details for debugging
|
||||
logger.debug(
|
||||
"Sending reservation response: type=%s, status=%s, ttl=%d",
|
||||
response.type,
|
||||
getattr(response.status, "code", "unknown"),
|
||||
ttl,
|
||||
)
|
||||
|
||||
# Send the response with increased timeout
|
||||
await stream.write(response.SerializeToString())
|
||||
|
||||
# Add a small wait to ensure the message is fully sent
|
||||
await trio.sleep(0.1)
|
||||
|
||||
logger.debug("Reservation response sent successfully")
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error handling reservation request: %s", str(e))
|
||||
if cast(INetStreamWithExtras, stream).is_open():
|
||||
try:
|
||||
# Send error response
|
||||
await self._send_status(
|
||||
stream,
|
||||
StatusCode.INTERNAL_ERROR,
|
||||
f"Failed to process reservation: {str(e)}",
|
||||
)
|
||||
except Exception as send_err:
|
||||
logger.error("Failed to send error response: %s", str(send_err))
|
||||
finally:
|
||||
# Always close the stream when done with reservation
|
||||
if cast(INetStreamWithExtras, stream).is_open():
|
||||
try:
|
||||
with trio.fail_after(STREAM_CLOSE_TIMEOUT):
|
||||
await stream.close()
|
||||
except Exception as close_err:
|
||||
logger.error("Error closing stream: %s", str(close_err))
|
||||
|
||||
async def _handle_connect(self, stream: INetStream, msg: Any) -> None:
|
||||
"""Handle a connect request."""
|
||||
peer_id = ID(msg.peer)
|
||||
dst_stream: INetStream | None = None
|
||||
|
||||
# Verify reservation if provided
|
||||
if msg.HasField("reservation"):
|
||||
if not self.resource_manager.verify_reservation(peer_id, msg.reservation):
|
||||
await self._send_status(
|
||||
stream,
|
||||
StatusCode.PERMISSION_DENIED,
|
||||
"Invalid reservation",
|
||||
)
|
||||
await stream.reset()
|
||||
return
|
||||
|
||||
# Check resource limits
|
||||
if not self.resource_manager.can_accept_connection(peer_id):
|
||||
await self._send_status(
|
||||
stream,
|
||||
StatusCode.RESOURCE_LIMIT_EXCEEDED,
|
||||
"Connection limit exceeded",
|
||||
)
|
||||
await stream.reset()
|
||||
return
|
||||
|
||||
try:
|
||||
# Store the source stream with properly typed None
|
||||
self._active_relays[peer_id] = (stream, None)
|
||||
|
||||
# Try to connect to the destination with timeout
|
||||
with trio.fail_after(STREAM_READ_TIMEOUT):
|
||||
dst_stream = await self.host.new_stream(peer_id, [STOP_PROTOCOL_ID])
|
||||
if not dst_stream:
|
||||
raise ConnectionError("Could not connect to destination")
|
||||
|
||||
# Send STOP CONNECT message
|
||||
stop_msg = StopMessage(
|
||||
type=StopMessage.CONNECT,
|
||||
# Cast to extended interface with get_remote_peer_id
|
||||
peer=cast(INetStreamWithExtras, stream)
|
||||
.get_remote_peer_id()
|
||||
.to_bytes(),
|
||||
)
|
||||
await dst_stream.write(stop_msg.SerializeToString())
|
||||
|
||||
# Wait for response from destination
|
||||
resp_bytes = await dst_stream.read()
|
||||
resp = StopMessage()
|
||||
resp.ParseFromString(resp_bytes)
|
||||
|
||||
# Handle status attributes from the response
|
||||
if resp.HasField("status"):
|
||||
# Get code and message attributes with defaults
|
||||
status_code = getattr(resp.status, "code", StatusCode.OK)
|
||||
# Get message with default
|
||||
status_msg = getattr(resp.status, "message", "Unknown error")
|
||||
else:
|
||||
status_code = StatusCode.OK
|
||||
status_msg = "No status provided"
|
||||
|
||||
if status_code != StatusCode.OK:
|
||||
raise ConnectionError(
|
||||
f"Destination rejected connection: {status_msg}"
|
||||
)
|
||||
|
||||
# Update active relays with destination stream
|
||||
self._active_relays[peer_id] = (stream, dst_stream)
|
||||
|
||||
# Update reservation connection count
|
||||
reservation = self.resource_manager._reservations.get(peer_id)
|
||||
if reservation:
|
||||
reservation.active_connections += 1
|
||||
|
||||
# Send success status
|
||||
await self._send_status(
|
||||
stream,
|
||||
StatusCode.OK,
|
||||
"Connection established",
|
||||
)
|
||||
|
||||
# Start relaying data
|
||||
async with trio.open_nursery() as nursery:
|
||||
nursery.start_soon(self._relay_data, stream, dst_stream, peer_id)
|
||||
nursery.start_soon(self._relay_data, dst_stream, stream, peer_id)
|
||||
|
||||
except (trio.TooSlowError, ConnectionError) as e:
|
||||
logger.error("Error establishing relay connection: %s", str(e))
|
||||
await self._send_status(
|
||||
stream,
|
||||
StatusCode.CONNECTION_FAILED,
|
||||
str(e),
|
||||
)
|
||||
if peer_id in self._active_relays:
|
||||
del self._active_relays[peer_id]
|
||||
# Clean up reservation connection count on failure
|
||||
reservation = self.resource_manager._reservations.get(peer_id)
|
||||
if reservation:
|
||||
reservation.active_connections -= 1
|
||||
await stream.reset()
|
||||
if dst_stream and not cast(INetStreamWithExtras, dst_stream).is_closed():
|
||||
await dst_stream.reset()
|
||||
except Exception as e:
|
||||
logger.error("Unexpected error in connect handler: %s", str(e))
|
||||
await self._send_status(
|
||||
stream,
|
||||
StatusCode.CONNECTION_FAILED,
|
||||
"Internal error",
|
||||
)
|
||||
if peer_id in self._active_relays:
|
||||
del self._active_relays[peer_id]
|
||||
await stream.reset()
|
||||
if dst_stream and not cast(INetStreamWithExtras, dst_stream).is_closed():
|
||||
await dst_stream.reset()
|
||||
|
||||
async def _relay_data(
|
||||
self,
|
||||
src_stream: INetStream,
|
||||
dst_stream: INetStream,
|
||||
peer_id: ID,
|
||||
) -> None:
|
||||
"""
|
||||
Relay data between two streams.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
src_stream : INetStream
|
||||
Source stream to read from
|
||||
dst_stream : INetStream
|
||||
Destination stream to write to
|
||||
peer_id : ID
|
||||
ID of the peer being relayed
|
||||
|
||||
"""
|
||||
try:
|
||||
while True:
|
||||
# Read data with retries
|
||||
data = await self._read_stream_with_retry(src_stream)
|
||||
if not data:
|
||||
logger.info("Source stream closed/reset")
|
||||
break
|
||||
|
||||
# Write data with timeout
|
||||
try:
|
||||
with trio.fail_after(STREAM_WRITE_TIMEOUT):
|
||||
await dst_stream.write(data)
|
||||
except trio.TooSlowError:
|
||||
logger.error("Timeout writing to destination stream")
|
||||
break
|
||||
except Exception as e:
|
||||
logger.error("Error writing to destination stream: %s", str(e))
|
||||
break
|
||||
|
||||
# Update resource usage
|
||||
reservation = self.resource_manager._reservations.get(peer_id)
|
||||
if reservation:
|
||||
reservation.data_used += len(data)
|
||||
if reservation.data_used >= reservation.limits.data:
|
||||
logger.warning("Data limit exceeded for peer %s", peer_id)
|
||||
break
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error relaying data: %s", str(e))
|
||||
finally:
|
||||
# Clean up streams and remove from active relays
|
||||
await src_stream.reset()
|
||||
await dst_stream.reset()
|
||||
if peer_id in self._active_relays:
|
||||
del self._active_relays[peer_id]
|
||||
|
||||
async def _send_status(
|
||||
self,
|
||||
stream: ReadWriteCloser,
|
||||
code: int,
|
||||
message: str,
|
||||
) -> None:
|
||||
"""Send a status message."""
|
||||
try:
|
||||
logger.debug("Sending status message with code %s: %s", code, message)
|
||||
with trio.fail_after(STREAM_WRITE_TIMEOUT * 2): # Double the timeout
|
||||
# Create a proto Status directly
|
||||
pb_status = PbStatus()
|
||||
pb_status.code = cast(
|
||||
Any, int(code)
|
||||
) # Cast to Any to avoid type errors
|
||||
pb_status.message = message
|
||||
|
||||
status_msg = HopMessage(
|
||||
type=HopMessage.STATUS,
|
||||
status=pb_status,
|
||||
)
|
||||
|
||||
msg_bytes = status_msg.SerializeToString()
|
||||
logger.debug("Status message serialized (%d bytes)", len(msg_bytes))
|
||||
|
||||
await stream.write(msg_bytes)
|
||||
logger.debug("Status message sent, waiting for processing")
|
||||
|
||||
# Wait longer to ensure the message is sent
|
||||
await trio.sleep(1.5)
|
||||
logger.debug("Status message sending completed")
|
||||
except trio.TooSlowError:
|
||||
logger.error(
|
||||
"Timeout sending status message: code=%s, message=%s", code, message
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error("Error sending status message: %s", str(e))
|
||||
|
||||
async def _send_stop_status(
|
||||
self,
|
||||
stream: ReadWriteCloser,
|
||||
code: int,
|
||||
message: str,
|
||||
) -> None:
|
||||
"""Send a status message on a STOP stream."""
|
||||
try:
|
||||
logger.debug("Sending stop status message with code %s: %s", code, message)
|
||||
with trio.fail_after(STREAM_WRITE_TIMEOUT * 2): # Double the timeout
|
||||
# Create a proto Status directly
|
||||
pb_status = PbStatus()
|
||||
pb_status.code = cast(
|
||||
Any, int(code)
|
||||
) # Cast to Any to avoid type errors
|
||||
pb_status.message = message
|
||||
|
||||
status_msg = StopMessage(
|
||||
type=StopMessage.STATUS,
|
||||
status=pb_status,
|
||||
)
|
||||
await stream.write(status_msg.SerializeToString())
|
||||
await trio.sleep(0.5) # Ensure message is sent
|
||||
except Exception as e:
|
||||
logger.error("Error sending stop status message: %s", str(e))
|
||||
55
libp2p/relay/circuit_v2/protocol_buffer.py
Normal file
55
libp2p/relay/circuit_v2/protocol_buffer.py
Normal file
@ -0,0 +1,55 @@
|
||||
"""
|
||||
Protocol buffer wrapper classes for Circuit Relay v2.
|
||||
|
||||
This module provides wrapper classes for protocol buffer generated objects
|
||||
to make them easier to work with in type-checked code.
|
||||
"""
|
||||
|
||||
from enum import (
|
||||
IntEnum,
|
||||
)
|
||||
from typing import (
|
||||
Any,
|
||||
)
|
||||
|
||||
from .pb.circuit_pb2 import Status as PbStatus
|
||||
|
||||
|
||||
# Define Status codes as an Enum for better type safety and organization
|
||||
class StatusCode(IntEnum):
|
||||
OK = 0
|
||||
RESERVATION_REFUSED = 100
|
||||
RESOURCE_LIMIT_EXCEEDED = 101
|
||||
PERMISSION_DENIED = 102
|
||||
CONNECTION_FAILED = 200
|
||||
DIAL_REFUSED = 201
|
||||
STOP_FAILED = 300
|
||||
MALFORMED_MESSAGE = 400
|
||||
INTERNAL_ERROR = 500
|
||||
|
||||
|
||||
def create_status(code: int = StatusCode.OK, message: str = "") -> Any:
|
||||
"""
|
||||
Create a protocol buffer Status object.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
code : int
|
||||
The status code
|
||||
message : str
|
||||
The status message
|
||||
|
||||
Returns
|
||||
-------
|
||||
Any
|
||||
The protocol buffer Status object
|
||||
|
||||
"""
|
||||
# Create status object
|
||||
pb_obj = PbStatus()
|
||||
|
||||
# Convert the integer status code to the protobuf enum value type
|
||||
pb_obj.code = PbStatus.Code.ValueType(code)
|
||||
pb_obj.message = message
|
||||
|
||||
return pb_obj
|
||||
254
libp2p/relay/circuit_v2/resources.py
Normal file
254
libp2p/relay/circuit_v2/resources.py
Normal file
@ -0,0 +1,254 @@
|
||||
"""
|
||||
Resource management for Circuit Relay v2.
|
||||
|
||||
This module handles managing resources for relay operations,
|
||||
including reservations and connection limits.
|
||||
"""
|
||||
|
||||
from dataclasses import (
|
||||
dataclass,
|
||||
)
|
||||
import hashlib
|
||||
import os
|
||||
import time
|
||||
|
||||
from libp2p.peer.id import (
|
||||
ID,
|
||||
)
|
||||
|
||||
# Import the protobuf definitions
|
||||
from .pb.circuit_pb2 import Reservation as PbReservation
|
||||
|
||||
|
||||
@dataclass
|
||||
class RelayLimits:
|
||||
"""Configuration for relay resource limits."""
|
||||
|
||||
duration: int # Maximum duration of a relay connection in seconds
|
||||
data: int # Maximum data transfer allowed in bytes
|
||||
max_circuit_conns: int # Maximum number of concurrent circuit connections
|
||||
max_reservations: int # Maximum number of active reservations
|
||||
|
||||
|
||||
class Reservation:
|
||||
"""Represents a relay reservation."""
|
||||
|
||||
def __init__(self, peer_id: ID, limits: RelayLimits):
|
||||
"""
|
||||
Initialize a new reservation.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
peer_id : ID
|
||||
The peer ID this reservation is for
|
||||
limits : RelayLimits
|
||||
The resource limits for this reservation
|
||||
|
||||
"""
|
||||
self.peer_id = peer_id
|
||||
self.limits = limits
|
||||
self.created_at = time.time()
|
||||
self.expires_at = self.created_at + limits.duration
|
||||
self.data_used = 0
|
||||
self.active_connections = 0
|
||||
self.voucher = self._generate_voucher()
|
||||
|
||||
def _generate_voucher(self) -> bytes:
|
||||
"""
|
||||
Generate a unique cryptographically secure voucher for this reservation.
|
||||
|
||||
Returns
|
||||
-------
|
||||
bytes
|
||||
A secure voucher token
|
||||
|
||||
"""
|
||||
# Create a random token using a combination of:
|
||||
# - Random bytes for unpredictability
|
||||
# - Peer ID to bind it to the specific peer
|
||||
# - Timestamp for uniqueness
|
||||
# - Hash everything for a fixed size output
|
||||
random_bytes = os.urandom(16) # 128 bits of randomness
|
||||
timestamp = str(int(self.created_at * 1000000)).encode()
|
||||
peer_bytes = self.peer_id.to_bytes()
|
||||
|
||||
# Combine all elements and hash them
|
||||
h = hashlib.sha256()
|
||||
h.update(random_bytes)
|
||||
h.update(timestamp)
|
||||
h.update(peer_bytes)
|
||||
|
||||
return h.digest()
|
||||
|
||||
def is_expired(self) -> bool:
|
||||
"""Check if the reservation has expired."""
|
||||
return time.time() > self.expires_at
|
||||
|
||||
def can_accept_connection(self) -> bool:
|
||||
"""Check if a new connection can be accepted."""
|
||||
return (
|
||||
not self.is_expired()
|
||||
and self.active_connections < self.limits.max_circuit_conns
|
||||
and self.data_used < self.limits.data
|
||||
)
|
||||
|
||||
def to_proto(self) -> PbReservation:
|
||||
"""Convert the reservation to its protobuf representation."""
|
||||
# TODO: For production use, implement proper signature generation
|
||||
# The signature should be created by signing the voucher with the
|
||||
# peer's private key. The current implementation with an empty signature
|
||||
# is intended for development and testing only.
|
||||
return PbReservation(
|
||||
expire=int(self.expires_at),
|
||||
voucher=self.voucher,
|
||||
signature=b"",
|
||||
)
|
||||
|
||||
|
||||
class RelayResourceManager:
|
||||
"""
|
||||
Manages resources and reservations for relay operations.
|
||||
|
||||
This class handles:
|
||||
- Tracking active reservations
|
||||
- Enforcing resource limits
|
||||
- Managing connection quotas
|
||||
"""
|
||||
|
||||
def __init__(self, limits: RelayLimits):
|
||||
"""
|
||||
Initialize the resource manager.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
limits : RelayLimits
|
||||
The resource limits to enforce
|
||||
|
||||
"""
|
||||
self.limits = limits
|
||||
self._reservations: dict[ID, Reservation] = {}
|
||||
|
||||
def can_accept_reservation(self, peer_id: ID) -> bool:
|
||||
"""
|
||||
Check if a new reservation can be accepted for the given peer.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
peer_id : ID
|
||||
The peer ID requesting the reservation
|
||||
|
||||
Returns
|
||||
-------
|
||||
bool
|
||||
True if the reservation can be accepted
|
||||
|
||||
"""
|
||||
# Clean expired reservations
|
||||
self._clean_expired()
|
||||
|
||||
# Check if peer already has a valid reservation
|
||||
existing = self._reservations.get(peer_id)
|
||||
if existing and not existing.is_expired():
|
||||
return True
|
||||
|
||||
# Check if we're at the reservation limit
|
||||
return len(self._reservations) < self.limits.max_reservations
|
||||
|
||||
def create_reservation(self, peer_id: ID) -> Reservation:
|
||||
"""
|
||||
Create a new reservation for the given peer.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
peer_id : ID
|
||||
The peer ID to create the reservation for
|
||||
|
||||
Returns
|
||||
-------
|
||||
Reservation
|
||||
The newly created reservation
|
||||
|
||||
"""
|
||||
reservation = Reservation(peer_id, self.limits)
|
||||
self._reservations[peer_id] = reservation
|
||||
return reservation
|
||||
|
||||
def verify_reservation(self, peer_id: ID, proto_res: PbReservation) -> bool:
|
||||
"""
|
||||
Verify a reservation from a protobuf message.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
peer_id : ID
|
||||
The peer ID the reservation is for
|
||||
proto_res : PbReservation
|
||||
The protobuf reservation message
|
||||
|
||||
Returns
|
||||
-------
|
||||
bool
|
||||
True if the reservation is valid
|
||||
|
||||
"""
|
||||
# TODO: Implement voucher and signature verification
|
||||
reservation = self._reservations.get(peer_id)
|
||||
return (
|
||||
reservation is not None
|
||||
and not reservation.is_expired()
|
||||
and reservation.expires_at == proto_res.expire
|
||||
)
|
||||
|
||||
def can_accept_connection(self, peer_id: ID) -> bool:
|
||||
"""
|
||||
Check if a new connection can be accepted for the given peer.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
peer_id : ID
|
||||
The peer ID requesting the connection
|
||||
|
||||
Returns
|
||||
-------
|
||||
bool
|
||||
True if the connection can be accepted
|
||||
|
||||
"""
|
||||
reservation = self._reservations.get(peer_id)
|
||||
return reservation is not None and reservation.can_accept_connection()
|
||||
|
||||
def _clean_expired(self) -> None:
|
||||
"""Remove expired reservations."""
|
||||
now = time.time()
|
||||
expired = [
|
||||
peer_id
|
||||
for peer_id, res in self._reservations.items()
|
||||
if now > res.expires_at
|
||||
]
|
||||
for peer_id in expired:
|
||||
del self._reservations[peer_id]
|
||||
|
||||
def reserve(self, peer_id: ID) -> int:
|
||||
"""
|
||||
Create or update a reservation for a peer and return the TTL.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
peer_id : ID
|
||||
The peer ID to reserve for
|
||||
|
||||
Returns
|
||||
-------
|
||||
int
|
||||
The TTL of the reservation in seconds
|
||||
|
||||
"""
|
||||
# Check for existing reservation
|
||||
existing = self._reservations.get(peer_id)
|
||||
if existing and not existing.is_expired():
|
||||
# Return remaining time for existing reservation
|
||||
remaining = max(0, int(existing.expires_at - time.time()))
|
||||
return remaining
|
||||
|
||||
# Create new reservation
|
||||
self.create_reservation(peer_id)
|
||||
return self.limits.duration
|
||||
427
libp2p/relay/circuit_v2/transport.py
Normal file
427
libp2p/relay/circuit_v2/transport.py
Normal file
@ -0,0 +1,427 @@
|
||||
"""
|
||||
Transport implementation for Circuit Relay v2.
|
||||
|
||||
This module implements the transport layer for Circuit Relay v2,
|
||||
allowing peers to establish connections through relay nodes.
|
||||
"""
|
||||
|
||||
from collections.abc import Awaitable, Callable
|
||||
import logging
|
||||
|
||||
import multiaddr
|
||||
import trio
|
||||
|
||||
from libp2p.abc import (
|
||||
IHost,
|
||||
IListener,
|
||||
INetStream,
|
||||
ITransport,
|
||||
ReadWriteCloser,
|
||||
)
|
||||
from libp2p.network.connection.raw_connection import (
|
||||
RawConnection,
|
||||
)
|
||||
from libp2p.peer.id import (
|
||||
ID,
|
||||
)
|
||||
from libp2p.peer.peerinfo import (
|
||||
PeerInfo,
|
||||
)
|
||||
from libp2p.tools.async_service import (
|
||||
Service,
|
||||
)
|
||||
|
||||
from .config import (
|
||||
ClientConfig,
|
||||
RelayConfig,
|
||||
)
|
||||
from .discovery import (
|
||||
RelayDiscovery,
|
||||
)
|
||||
from .pb.circuit_pb2 import (
|
||||
HopMessage,
|
||||
StopMessage,
|
||||
)
|
||||
from .protocol import (
|
||||
PROTOCOL_ID,
|
||||
CircuitV2Protocol,
|
||||
)
|
||||
from .protocol_buffer import (
|
||||
StatusCode,
|
||||
)
|
||||
|
||||
logger = logging.getLogger("libp2p.relay.circuit_v2.transport")
|
||||
|
||||
|
||||
class CircuitV2Transport(ITransport):
|
||||
"""
|
||||
CircuitV2Transport implements the transport interface for Circuit Relay v2.
|
||||
|
||||
This transport allows peers to establish connections through relay nodes
|
||||
when direct connections are not possible.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
host: IHost,
|
||||
protocol: CircuitV2Protocol,
|
||||
config: RelayConfig,
|
||||
) -> None:
|
||||
"""
|
||||
Initialize the Circuit v2 transport.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
host : IHost
|
||||
The libp2p host this transport is running on
|
||||
protocol : CircuitV2Protocol
|
||||
The Circuit v2 protocol instance
|
||||
config : RelayConfig
|
||||
Relay configuration
|
||||
|
||||
"""
|
||||
self.host = host
|
||||
self.protocol = protocol
|
||||
self.config = config
|
||||
self.client_config = ClientConfig()
|
||||
self.discovery = RelayDiscovery(
|
||||
host=host,
|
||||
auto_reserve=config.enable_client,
|
||||
discovery_interval=config.discovery_interval,
|
||||
max_relays=config.max_relays,
|
||||
)
|
||||
|
||||
async def dial(
|
||||
self,
|
||||
maddr: multiaddr.Multiaddr,
|
||||
) -> RawConnection:
|
||||
"""
|
||||
Dial a peer using the multiaddr.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
maddr : multiaddr.Multiaddr
|
||||
The multiaddr to dial
|
||||
|
||||
Returns
|
||||
-------
|
||||
RawConnection
|
||||
The established connection
|
||||
|
||||
Raises
|
||||
------
|
||||
ConnectionError
|
||||
If the connection cannot be established
|
||||
|
||||
"""
|
||||
# Extract peer ID from multiaddr - P_P2P code is 0x01A5 (421)
|
||||
peer_id_str = maddr.value_for_protocol("p2p")
|
||||
if not peer_id_str:
|
||||
raise ConnectionError("Multiaddr does not contain peer ID")
|
||||
|
||||
peer_id = ID.from_base58(peer_id_str)
|
||||
peer_info = PeerInfo(peer_id, [maddr])
|
||||
|
||||
# Use the internal dial_peer_info method
|
||||
return await self.dial_peer_info(peer_info)
|
||||
|
||||
async def dial_peer_info(
|
||||
self,
|
||||
peer_info: PeerInfo,
|
||||
*,
|
||||
relay_peer_id: ID | None = None,
|
||||
) -> RawConnection:
|
||||
"""
|
||||
Dial a peer through a relay.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
peer_info : PeerInfo
|
||||
The peer to dial
|
||||
relay_peer_id : Optional[ID], optional
|
||||
Optional specific relay peer to use
|
||||
|
||||
Returns
|
||||
-------
|
||||
RawConnection
|
||||
The established connection
|
||||
|
||||
Raises
|
||||
------
|
||||
ConnectionError
|
||||
If the connection cannot be established
|
||||
|
||||
"""
|
||||
# If no specific relay is provided, try to find one
|
||||
if relay_peer_id is None:
|
||||
relay_peer_id = await self._select_relay(peer_info)
|
||||
if not relay_peer_id:
|
||||
raise ConnectionError("No suitable relay found")
|
||||
|
||||
# Get a stream to the relay
|
||||
relay_stream = await self.host.new_stream(relay_peer_id, [PROTOCOL_ID])
|
||||
if not relay_stream:
|
||||
raise ConnectionError(f"Could not open stream to relay {relay_peer_id}")
|
||||
|
||||
try:
|
||||
# First try to make a reservation if enabled
|
||||
if self.config.enable_client:
|
||||
success = await self._make_reservation(relay_stream, relay_peer_id)
|
||||
if not success:
|
||||
logger.warning(
|
||||
"Failed to make reservation with relay %s", relay_peer_id
|
||||
)
|
||||
|
||||
# Send HOP CONNECT message
|
||||
hop_msg = HopMessage(
|
||||
type=HopMessage.CONNECT,
|
||||
peer=peer_info.peer_id.to_bytes(),
|
||||
)
|
||||
await relay_stream.write(hop_msg.SerializeToString())
|
||||
|
||||
# Read response
|
||||
resp_bytes = await relay_stream.read()
|
||||
resp = HopMessage()
|
||||
resp.ParseFromString(resp_bytes)
|
||||
|
||||
# Access status attributes directly
|
||||
status_code = getattr(resp.status, "code", StatusCode.OK)
|
||||
status_msg = getattr(resp.status, "message", "Unknown error")
|
||||
|
||||
if status_code != StatusCode.OK:
|
||||
raise ConnectionError(f"Relay connection failed: {status_msg}")
|
||||
|
||||
# Create raw connection from stream
|
||||
return RawConnection(stream=relay_stream, initiator=True)
|
||||
|
||||
except Exception as e:
|
||||
await relay_stream.close()
|
||||
raise ConnectionError(f"Failed to establish relay connection: {str(e)}")
|
||||
|
||||
async def _select_relay(self, peer_info: PeerInfo) -> ID | None:
|
||||
"""
|
||||
Select an appropriate relay for the given peer.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
peer_info : PeerInfo
|
||||
The peer to connect to
|
||||
|
||||
Returns
|
||||
-------
|
||||
Optional[ID]
|
||||
Selected relay peer ID, or None if no suitable relay found
|
||||
|
||||
"""
|
||||
# Try to find a relay
|
||||
attempts = 0
|
||||
while attempts < self.client_config.max_auto_relay_attempts:
|
||||
# Get a relay from the list of discovered relays
|
||||
relays = self.discovery.get_relays()
|
||||
if relays:
|
||||
# TODO: Implement more sophisticated relay selection
|
||||
# For now, just return the first available relay
|
||||
return relays[0]
|
||||
|
||||
# Wait and try discovery
|
||||
await trio.sleep(1)
|
||||
attempts += 1
|
||||
|
||||
return None
|
||||
|
||||
async def _make_reservation(
|
||||
self,
|
||||
stream: INetStream,
|
||||
relay_peer_id: ID,
|
||||
) -> bool:
|
||||
"""
|
||||
Make a reservation with a relay.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
stream : INetStream
|
||||
Stream to the relay
|
||||
relay_peer_id : ID
|
||||
The relay's peer ID
|
||||
|
||||
Returns
|
||||
-------
|
||||
bool
|
||||
True if reservation was successful
|
||||
|
||||
"""
|
||||
try:
|
||||
# Send reservation request
|
||||
reserve_msg = HopMessage(
|
||||
type=HopMessage.RESERVE,
|
||||
peer=self.host.get_id().to_bytes(),
|
||||
)
|
||||
await stream.write(reserve_msg.SerializeToString())
|
||||
|
||||
# Read response
|
||||
resp_bytes = await stream.read()
|
||||
resp = HopMessage()
|
||||
resp.ParseFromString(resp_bytes)
|
||||
|
||||
# Access status attributes directly
|
||||
status_code = getattr(resp.status, "code", StatusCode.OK)
|
||||
status_msg = getattr(resp.status, "message", "Unknown error")
|
||||
|
||||
if status_code != StatusCode.OK:
|
||||
logger.warning(
|
||||
"Reservation failed with relay %s: %s",
|
||||
relay_peer_id,
|
||||
status_msg,
|
||||
)
|
||||
return False
|
||||
|
||||
# Store reservation info
|
||||
# TODO: Implement reservation storage and refresh mechanism
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error making reservation: %s", str(e))
|
||||
return False
|
||||
|
||||
def create_listener(
|
||||
self,
|
||||
handler_function: Callable[[ReadWriteCloser], Awaitable[None]],
|
||||
) -> IListener:
|
||||
"""
|
||||
Create a listener for incoming relay connections.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
handler_function : Callable[[ReadWriteCloser], Awaitable[None]]
|
||||
The handler function for new connections
|
||||
|
||||
Returns
|
||||
-------
|
||||
IListener
|
||||
The created listener
|
||||
|
||||
"""
|
||||
return CircuitV2Listener(self.host, self.protocol, self.config)
|
||||
|
||||
|
||||
class CircuitV2Listener(Service, IListener):
|
||||
"""Listener for incoming relay connections."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
host: IHost,
|
||||
protocol: CircuitV2Protocol,
|
||||
config: RelayConfig,
|
||||
) -> None:
|
||||
"""
|
||||
Initialize the Circuit v2 listener.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
host : IHost
|
||||
The libp2p host this listener is running on
|
||||
protocol : CircuitV2Protocol
|
||||
The Circuit v2 protocol instance
|
||||
config : RelayConfig
|
||||
Relay configuration
|
||||
|
||||
"""
|
||||
super().__init__()
|
||||
self.host = host
|
||||
self.protocol = protocol
|
||||
self.config = config
|
||||
self.multiaddrs: list[
|
||||
multiaddr.Multiaddr
|
||||
] = [] # Store multiaddrs as Multiaddr objects
|
||||
|
||||
async def handle_incoming_connection(
|
||||
self,
|
||||
stream: INetStream,
|
||||
remote_peer_id: ID,
|
||||
) -> RawConnection:
|
||||
"""
|
||||
Handle an incoming relay connection.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
stream : INetStream
|
||||
The incoming stream
|
||||
remote_peer_id : ID
|
||||
The remote peer's ID
|
||||
|
||||
Returns
|
||||
-------
|
||||
RawConnection
|
||||
The established connection
|
||||
|
||||
Raises
|
||||
------
|
||||
ConnectionError
|
||||
If the connection cannot be established
|
||||
|
||||
"""
|
||||
if not self.config.enable_stop:
|
||||
raise ConnectionError("Stop role is not enabled")
|
||||
|
||||
try:
|
||||
# Read STOP message
|
||||
msg_bytes = await stream.read()
|
||||
stop_msg = StopMessage()
|
||||
stop_msg.ParseFromString(msg_bytes)
|
||||
|
||||
if stop_msg.type != StopMessage.CONNECT:
|
||||
raise ConnectionError("Invalid STOP message type")
|
||||
|
||||
# Create raw connection
|
||||
return RawConnection(stream=stream, initiator=False)
|
||||
|
||||
except Exception as e:
|
||||
await stream.close()
|
||||
raise ConnectionError(f"Failed to handle incoming connection: {str(e)}")
|
||||
|
||||
async def run(self) -> None:
|
||||
"""Run the listener service."""
|
||||
# Implementation would go here
|
||||
|
||||
async def listen(self, maddr: multiaddr.Multiaddr, nursery: trio.Nursery) -> bool:
|
||||
"""
|
||||
Start listening on the given multiaddr.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
maddr : multiaddr.Multiaddr
|
||||
The multiaddr to listen on
|
||||
nursery : trio.Nursery
|
||||
The nursery to run tasks in
|
||||
|
||||
Returns
|
||||
-------
|
||||
bool
|
||||
True if listening successfully started
|
||||
|
||||
"""
|
||||
# Convert string to Multiaddr if needed
|
||||
addr = (
|
||||
maddr
|
||||
if isinstance(maddr, multiaddr.Multiaddr)
|
||||
else multiaddr.Multiaddr(maddr)
|
||||
)
|
||||
self.multiaddrs.append(addr)
|
||||
return True
|
||||
|
||||
def get_addrs(self) -> tuple[multiaddr.Multiaddr, ...]:
|
||||
"""
|
||||
Get the listening addresses.
|
||||
|
||||
Returns
|
||||
-------
|
||||
tuple[multiaddr.Multiaddr, ...]
|
||||
Tuple of listening multiaddresses
|
||||
|
||||
"""
|
||||
return tuple(self.multiaddrs)
|
||||
|
||||
async def close(self) -> None:
|
||||
"""Close the listener."""
|
||||
self.multiaddrs.clear()
|
||||
await self.manager.stop()
|
||||
@ -87,14 +87,16 @@ async def connect(node1: IHost, node2: IHost) -> None:
|
||||
addr = node2.get_addrs()[0]
|
||||
info = info_from_p2p_addr(addr)
|
||||
|
||||
# Add retry logic for more robust connection
|
||||
# Add retry logic for more robust connection with timeout
|
||||
max_retries = 3
|
||||
retry_delay = 0.2
|
||||
last_error = None
|
||||
|
||||
for attempt in range(max_retries):
|
||||
try:
|
||||
await node1.connect(info)
|
||||
# Use timeout for each connection attempt
|
||||
with trio.move_on_after(5): # 5 second timeout
|
||||
await node1.connect(info)
|
||||
|
||||
# Verify connection is established in both directions
|
||||
if (
|
||||
|
||||
@ -48,11 +48,16 @@ class TransportUpgrader:
|
||||
# TODO: Figure out what to do with this function.
|
||||
|
||||
async def upgrade_security(
|
||||
self, raw_conn: IRawConnection, peer_id: ID, is_initiator: bool
|
||||
self,
|
||||
raw_conn: IRawConnection,
|
||||
is_initiator: bool,
|
||||
peer_id: ID | None = None,
|
||||
) -> ISecureConn:
|
||||
"""Upgrade conn to a secured connection."""
|
||||
try:
|
||||
if is_initiator:
|
||||
if peer_id is None:
|
||||
raise ValueError("peer_id must be provided for outbout connection")
|
||||
return await self.security_multistream.secure_outbound(
|
||||
raw_conn, peer_id
|
||||
)
|
||||
|
||||
Reference in New Issue
Block a user