ft. modernise py-libp2p (#618)

* fix pyproject.toml , add ruff

* rm lock

* make progress

* add poetry lock ignore

* fix type issues

* fix tcp type errors

* fix text example - type error - wrong args

* add setuptools to dev

* test ci

* fix docs build

* fix type issues for new_swarm & new_host

* fix types in gossipsub

* fix type issues in noise

* wip: factories

* revert factories

* fix more type issues

* more type fixes

* fix: add null checks for noise protocol initialization and key handling

* corrected argument-errors in peerId and Multiaddr in peer tests

* fix: Noice - remove redundant type casts in BaseNoiseMsgReadWriter

* fix: update test_notify.py to use SwarmFactory.create_batch_and_listen, fix type hints, and comment out ClosedStream assertions

* Fix type checks for pubsub module

Signed-off-by: sukhman <sukhmansinghsaluja@gmail.com>

* Fix type checks for pubsub module-tests

Signed-off-by: sukhman <sukhmansinghsaluja@gmail.com>

* noise: add checks for uninitialized protocol and key states in PatternXX

Signed-off-by: varun-r-mallya <varunrmallya@gmail.com>

* pubsub: add None checks for optional fields in FloodSub and Pubsub

Signed-off-by: varun-r-mallya <varunrmallya@gmail.com>

* Fix type hints and improve testing

Signed-off-by: varun-r-mallya <varunrmallya@gmail.com>

* remove redundant checks

Signed-off-by: varun-r-mallya <varunrmallya@gmail.com>

* fix build issues

* add optional to trio service

* fix types

* fix type errors

* Fix type errors

Signed-off-by: varun-r-mallya <varunrmallya@gmail.com>

* fixed more-type checks in crypto and peer_data files

* wip: factories

* replaced union with optional

* fix: type-error in interp-utils and peerinfo

* replace pyright with pyrefly

* add pyrefly.toml

* wip: fix multiselect issues

* try typecheck

* base check

* mcache test fixes , typecheck ci update

* fix ci

* will this work

* minor fix

* use poetry

* fix wokflow

* use cache,fix err

* fix pyrefly.toml

* fix pyrefly.toml

* fix cache in ci

* deploy commit

* add main baseline

* update to v5

* improve typecheck ci (#14)

* fix typo

* remove holepunching code (#16)

* fix gossipsub typeerrors (#17)

* fix: ensure initiator user includes remote peer id in handshake (#15)

* fix ci (#19)

* typefix: custom_types | core/peerinfo/test_peer_info | io/abc | pubsub/floodsub | protocol_muxer/multiselect (#18)

* fix: Typefixes in PeerInfo  (#21)

* fix minor type issue (#22)

* fix type errors in pubsub (#24)

* fix: Minor typefixes in tests (#23)

* Fix failing tests for type-fixed test/pubsub (#8)

* move pyrefly & ruff to pyproject.toml & rm .project-template (#28)

* move the async_context file to tests/core

* move crypto test to crypto folder

* fix: some typefixes (#25)

* fix type errors

* fix type issues

* fix: update gRPC API usage in autonat_pb2_grpc.py (#31)

* md: typecheck ci

* rm comments

* clean up : from review suggestions

* use | None over Optional as per new python standards

* drop supporto for py3.9

* newsfragments

---------

Signed-off-by: sukhman <sukhmansinghsaluja@gmail.com>
Signed-off-by: varun-r-mallya <varunrmallya@gmail.com>
Co-authored-by: acul71 <luca.pisani@birdo.net>
Co-authored-by: kaneki003 <sakshamchauhan707@gmail.com>
Co-authored-by: sukhman <sukhmansinghsaluja@gmail.com>
Co-authored-by: varun-r-mallya <varunrmallya@gmail.com>
Co-authored-by: varunrmallya <100590632+varun-r-mallya@users.noreply.github.com>
Co-authored-by: lla-dane <abhinavagarwalla6@gmail.com>
Co-authored-by: Collins <ArtemisfowlX@protonmail.com>
Co-authored-by: Abhinav Agarwalla <120122716+lla-dane@users.noreply.github.com>
Co-authored-by: guha-rahul <52607971+guha-rahul@users.noreply.github.com>
Co-authored-by: Sukhman Singh <63765293+sukhman-sukh@users.noreply.github.com>
Co-authored-by: acul71 <34693171+acul71@users.noreply.github.com>
Co-authored-by: pacrob <5199899+pacrob@users.noreply.github.com>
This commit is contained in:
Arush Kurundodi
2025-06-09 23:09:59 +05:30
committed by GitHub
parent d020bbc066
commit bdadec7519
111 changed files with 1537 additions and 1401 deletions

View File

@ -1,5 +1,6 @@
from collections.abc import (
AsyncIterator,
Callable,
Sequence,
)
from contextlib import (
@ -8,7 +9,6 @@ from contextlib import (
)
from typing import (
Any,
Callable,
cast,
)
@ -88,8 +88,10 @@ from libp2p.security.noise.messages import (
NoiseHandshakePayload,
make_handshake_payload_sig,
)
from libp2p.security.noise.transport import PROTOCOL_ID as NOISE_PROTOCOL_ID
from libp2p.security.noise.transport import Transport as NoiseTransport
from libp2p.security.noise.transport import (
PROTOCOL_ID as NOISE_PROTOCOL_ID,
Transport as NoiseTransport,
)
import libp2p.security.secio.transport as secio
from libp2p.stream_muxer.mplex.mplex import (
MPLEX_PROTOCOL_ID,
@ -134,7 +136,7 @@ class IDFactory(factory.Factory):
model = ID
peer_id_bytes = factory.LazyFunction(
lambda: generate_peer_id_from(default_key_pair_factory())
lambda: generate_peer_id_from(default_key_pair_factory()).to_bytes()
)
@ -177,7 +179,7 @@ def noise_transport_factory(key_pair: KeyPair) -> ISecureTransport:
def security_options_factory_factory(
protocol_id: TProtocol = None,
protocol_id: TProtocol | None = None,
) -> Callable[[KeyPair], TSecurityOptions]:
if protocol_id is None:
protocol_id = DEFAULT_SECURITY_PROTOCOL_ID
@ -217,8 +219,8 @@ def default_muxer_transport_factory() -> TMuxerOptions:
async def raw_conn_factory(
nursery: trio.Nursery,
) -> AsyncIterator[tuple[IRawConnection, IRawConnection]]:
conn_0 = None
conn_1 = None
conn_0: IRawConnection | None = None
conn_1: IRawConnection | None = None
event = trio.Event()
async def tcp_stream_handler(stream: ReadWriteCloser) -> None:
@ -233,6 +235,7 @@ async def raw_conn_factory(
listening_maddr = listener.get_addrs()[0]
conn_0 = await tcp_transport.dial(listening_maddr)
await event.wait()
assert conn_0 is not None and conn_1 is not None
yield conn_0, conn_1
@ -247,8 +250,8 @@ async def noise_conn_factory(
NoiseTransport, noise_transport_factory(create_secp256k1_key_pair())
)
local_secure_conn: ISecureConn = None
remote_secure_conn: ISecureConn = None
local_secure_conn: ISecureConn | None = None
remote_secure_conn: ISecureConn | None = None
async def upgrade_local_conn() -> None:
nonlocal local_secure_conn
@ -299,9 +302,9 @@ class SwarmFactory(factory.Factory):
@asynccontextmanager
async def create_and_listen(
cls,
key_pair: KeyPair = None,
security_protocol: TProtocol = None,
muxer_opt: TMuxerOptions = None,
key_pair: KeyPair | None = None,
security_protocol: TProtocol | None = None,
muxer_opt: TMuxerOptions | None = None,
) -> AsyncIterator[Swarm]:
# `factory.Factory.__init__` does *not* prepare a *default value* if we pass
# an argument explicitly with `None`. If an argument is `None`, we don't pass it
@ -323,8 +326,8 @@ class SwarmFactory(factory.Factory):
async def create_batch_and_listen(
cls,
number: int,
security_protocol: TProtocol = None,
muxer_opt: TMuxerOptions = None,
security_protocol: TProtocol | None = None,
muxer_opt: TMuxerOptions | None = None,
) -> AsyncIterator[tuple[Swarm, ...]]:
async with AsyncExitStack() as stack:
ctx_mgrs = [
@ -344,11 +347,11 @@ class HostFactory(factory.Factory):
class Params:
key_pair = factory.LazyFunction(default_key_pair_factory)
security_protocol: TProtocol = None
security_protocol: TProtocol | None = None
muxer_opt = factory.LazyFunction(default_muxer_transport_factory)
network = factory.LazyAttribute(
lambda o: SwarmFactory(
lambda o: SwarmFactory.build(
security_protocol=o.security_protocol, muxer_opt=o.muxer_opt
)
)
@ -358,8 +361,8 @@ class HostFactory(factory.Factory):
async def create_batch_and_listen(
cls,
number: int,
security_protocol: TProtocol = None,
muxer_opt: TMuxerOptions = None,
security_protocol: TProtocol | None = None,
muxer_opt: TMuxerOptions | None = None,
) -> AsyncIterator[tuple[BasicHost, ...]]:
async with SwarmFactory.create_batch_and_listen(
number, security_protocol=security_protocol, muxer_opt=muxer_opt
@ -377,7 +380,7 @@ class DummyRouter(IPeerRouting):
def _add_peer(self, peer_id: ID, addrs: list[Multiaddr]) -> None:
self._routing_table[peer_id] = PeerInfo(peer_id, addrs)
async def find_peer(self, peer_id: ID) -> PeerInfo:
async def find_peer(self, peer_id: ID) -> PeerInfo | None:
await trio.lowlevel.checkpoint()
return self._routing_table.get(peer_id, None)
@ -388,11 +391,11 @@ class RoutedHostFactory(factory.Factory):
class Params:
key_pair = factory.LazyFunction(default_key_pair_factory)
security_protocol: TProtocol = None
security_protocol: TProtocol | None = None
muxer_opt = factory.LazyFunction(default_muxer_transport_factory)
network = factory.LazyAttribute(
lambda o: HostFactory(
lambda o: HostFactory.build(
security_protocol=o.security_protocol, muxer_opt=o.muxer_opt
).get_network()
)
@ -403,8 +406,8 @@ class RoutedHostFactory(factory.Factory):
async def create_batch_and_listen(
cls,
number: int,
security_protocol: TProtocol = None,
muxer_opt: TMuxerOptions = None,
security_protocol: TProtocol | None = None,
muxer_opt: TMuxerOptions | None = None,
) -> AsyncIterator[tuple[RoutedHost, ...]]:
routing_table = DummyRouter()
async with HostFactory.create_batch_and_listen(
@ -447,8 +450,8 @@ class PubsubFactory(factory.Factory):
model = Pubsub
host = factory.SubFactory(HostFactory)
router = None
cache_size = None
router: IPubsubRouter | None = None
cache_size: int | None = None
strict_signing = False
@classmethod
@ -457,13 +460,15 @@ class PubsubFactory(factory.Factory):
cls,
host: IHost,
router: IPubsubRouter,
cache_size: int,
cache_size: int | None,
seen_ttl: int,
sweep_interval: int,
strict_signing: bool,
msg_id_constructor: Callable[[rpc_pb2.Message], bytes] = None,
msg_id_constructor: Callable[[rpc_pb2.Message], bytes] | None = None,
) -> AsyncIterator[Pubsub]:
pubsub = cls(
if msg_id_constructor is None:
msg_id_constructor = get_peer_and_seqno_msg_id
pubsub = Pubsub(
host=host,
router=router,
cache_size=cache_size,
@ -482,13 +487,13 @@ class PubsubFactory(factory.Factory):
cls,
number: int,
routers: Sequence[IPubsubRouter],
cache_size: int = None,
cache_size: int | None = None,
seen_ttl: int = 120,
sweep_interval: int = 60,
strict_signing: bool = False,
security_protocol: TProtocol = None,
muxer_opt: TMuxerOptions = None,
msg_id_constructor: Callable[[rpc_pb2.Message], bytes] = None,
security_protocol: TProtocol | None = None,
muxer_opt: TMuxerOptions | None = None,
msg_id_constructor: Callable[[rpc_pb2.Message], bytes] | None = None,
) -> AsyncIterator[tuple[Pubsub, ...]]:
async with HostFactory.create_batch_and_listen(
number, security_protocol=security_protocol, muxer_opt=muxer_opt
@ -516,16 +521,15 @@ class PubsubFactory(factory.Factory):
async def create_batch_with_floodsub(
cls,
number: int,
cache_size: int = None,
cache_size: int | None = None,
seen_ttl: int = 120,
sweep_interval: int = 60,
strict_signing: bool = False,
protocols: Sequence[TProtocol] = None,
security_protocol: TProtocol = None,
muxer_opt: TMuxerOptions = None,
msg_id_constructor: Callable[
[rpc_pb2.Message], bytes
] = get_peer_and_seqno_msg_id,
protocols: Sequence[TProtocol] | None = None,
security_protocol: TProtocol | None = None,
muxer_opt: TMuxerOptions | None = None,
msg_id_constructor: None
| (Callable[[rpc_pb2.Message], bytes]) = get_peer_and_seqno_msg_id,
) -> AsyncIterator[tuple[Pubsub, ...]]:
if protocols is not None:
floodsubs = FloodsubFactory.create_batch(number, protocols=list(protocols))
@ -550,9 +554,9 @@ class PubsubFactory(factory.Factory):
cls,
number: int,
*,
cache_size: int = None,
cache_size: int | None = None,
strict_signing: bool = False,
protocols: Sequence[TProtocol] = None,
protocols: Sequence[TProtocol] | None = None,
degree: int = GOSSIPSUB_PARAMS.degree,
degree_low: int = GOSSIPSUB_PARAMS.degree_low,
degree_high: int = GOSSIPSUB_PARAMS.degree_high,
@ -564,11 +568,10 @@ class PubsubFactory(factory.Factory):
heartbeat_initial_delay: float = GOSSIPSUB_PARAMS.heartbeat_initial_delay,
direct_connect_initial_delay: float = GOSSIPSUB_PARAMS.direct_connect_initial_delay, # noqa: E501
direct_connect_interval: int = GOSSIPSUB_PARAMS.direct_connect_interval,
security_protocol: TProtocol = None,
muxer_opt: TMuxerOptions = None,
msg_id_constructor: Callable[
[rpc_pb2.Message], bytes
] = get_peer_and_seqno_msg_id,
security_protocol: TProtocol | None = None,
muxer_opt: TMuxerOptions | None = None,
msg_id_constructor: None
| (Callable[[rpc_pb2.Message], bytes]) = get_peer_and_seqno_msg_id,
) -> AsyncIterator[tuple[Pubsub, ...]]:
if protocols is not None:
gossipsubs = GossipsubFactory.create_batch(
@ -605,6 +608,8 @@ class PubsubFactory(factory.Factory):
number,
gossipsubs,
cache_size,
120, # seen_ttl
60, # sweep_interval
strict_signing,
security_protocol=security_protocol,
muxer_opt=muxer_opt,
@ -618,7 +623,8 @@ class PubsubFactory(factory.Factory):
@asynccontextmanager
async def swarm_pair_factory(
security_protocol: TProtocol = None, muxer_opt: TMuxerOptions = None
security_protocol: TProtocol | None = None,
muxer_opt: TMuxerOptions | None = None,
) -> AsyncIterator[tuple[Swarm, Swarm]]:
async with SwarmFactory.create_batch_and_listen(
2, security_protocol=security_protocol, muxer_opt=muxer_opt
@ -629,7 +635,8 @@ async def swarm_pair_factory(
@asynccontextmanager
async def host_pair_factory(
security_protocol: TProtocol = None, muxer_opt: TMuxerOptions = None
security_protocol: TProtocol | None = None,
muxer_opt: TMuxerOptions | None = None,
) -> AsyncIterator[tuple[BasicHost, BasicHost]]:
async with HostFactory.create_batch_and_listen(
2, security_protocol=security_protocol, muxer_opt=muxer_opt
@ -640,7 +647,8 @@ async def host_pair_factory(
@asynccontextmanager
async def swarm_conn_pair_factory(
security_protocol: TProtocol = None, muxer_opt: TMuxerOptions = None
security_protocol: TProtocol | None = None,
muxer_opt: TMuxerOptions | None = None,
) -> AsyncIterator[tuple[SwarmConn, SwarmConn]]:
async with swarm_pair_factory(
security_protocol=security_protocol, muxer_opt=muxer_opt
@ -652,7 +660,7 @@ async def swarm_conn_pair_factory(
@asynccontextmanager
async def mplex_conn_pair_factory(
security_protocol: TProtocol = None,
security_protocol: TProtocol | None = None,
) -> AsyncIterator[tuple[Mplex, Mplex]]:
async with swarm_conn_pair_factory(
security_protocol=security_protocol,
@ -666,7 +674,7 @@ async def mplex_conn_pair_factory(
@asynccontextmanager
async def mplex_stream_pair_factory(
security_protocol: TProtocol = None,
security_protocol: TProtocol | None = None,
) -> AsyncIterator[tuple[MplexStream, MplexStream]]:
async with mplex_conn_pair_factory(
security_protocol=security_protocol
@ -684,7 +692,7 @@ async def mplex_stream_pair_factory(
@asynccontextmanager
async def yamux_conn_pair_factory(
security_protocol: TProtocol = None,
security_protocol: TProtocol | None = None,
) -> AsyncIterator[tuple[Yamux, Yamux]]:
async with swarm_conn_pair_factory(
security_protocol=security_protocol, muxer_opt=default_muxer_transport_factory()
@ -697,7 +705,7 @@ async def yamux_conn_pair_factory(
@asynccontextmanager
async def yamux_stream_pair_factory(
security_protocol: TProtocol = None,
security_protocol: TProtocol | None = None,
) -> AsyncIterator[tuple[YamuxStream, YamuxStream]]:
async with yamux_conn_pair_factory(
security_protocol=security_protocol
@ -715,11 +723,12 @@ async def yamux_stream_pair_factory(
@asynccontextmanager
async def net_stream_pair_factory(
security_protocol: TProtocol = None, muxer_opt: TMuxerOptions = None
security_protocol: TProtocol | None = None,
muxer_opt: TMuxerOptions | None = None,
) -> AsyncIterator[tuple[INetStream, INetStream]]:
protocol_id = TProtocol("/example/id/1")
stream_1: INetStream
stream_1: INetStream | None = None
# Just a proxy, we only care about the stream.
# Add a barrier to avoid stream being removed.
@ -736,5 +745,6 @@ async def net_stream_pair_factory(
hosts[1].set_stream_handler(protocol_id, handler)
stream_0 = await hosts[0].new_stream(hosts[1].get_id(), [protocol_id])
assert stream_1 is not None
yield stream_0, stream_1
event_handler_finished.set()

View File

@ -131,13 +131,13 @@ async def make_p2pd(
async with p2pc.listen():
peer_id, maddrs = await p2pc.identify()
listen_maddr: Multiaddr = None
listen_maddr: Multiaddr | None = None
for maddr in maddrs:
try:
ip = maddr.value_for_protocol(multiaddr.protocols.P_IP4)
ip = maddr.value_for_protocol(multiaddr.multiaddr.protocols.P_IP4)
# NOTE: Check if this `maddr` uses `tcp`.
maddr.value_for_protocol(multiaddr.protocols.P_TCP)
except multiaddr.exceptions.ProtocolLookupError:
maddr.value_for_protocol(multiaddr.multiaddr.protocols.P_TCP)
except multiaddr.multiaddr.exceptions.ProtocolLookupError:
continue
if ip == LOCALHOST_IP:
listen_maddr = maddr

View File

@ -14,27 +14,34 @@ TIMEOUT_DURATION = 30
class AbstractInterativeProcess(ABC):
@abstractmethod
async def start(self) -> None:
...
async def start(self) -> None: ...
@abstractmethod
async def close(self) -> None:
...
async def close(self) -> None: ...
class BaseInteractiveProcess(AbstractInterativeProcess):
proc: trio.Process = None
proc: trio.Process | None = None
cmd: str
args: list[str]
bytes_read: bytearray
patterns: Iterable[bytes] = None
patterns: Iterable[bytes] | None = None
event_ready: trio.Event
async def wait_until_ready(self) -> None:
if self.proc is None:
raise Exception("process is not defined")
if self.patterns is None:
raise Exception("patterns is not defined")
patterns_occurred = {pat: False for pat in self.patterns}
buffers = {pat: bytearray() for pat in self.patterns}
async def read_from_daemon_and_check() -> None:
if self.proc is None:
raise Exception("process is not defined")
if self.proc.stdout is None:
raise Exception("process stdout is None, cannot read output")
async for data in self.proc.stdout:
self.bytes_read.extend(data)
for pat, occurred in patterns_occurred.items():

View File

@ -5,11 +5,10 @@ from typing import (
from multiaddr import (
Multiaddr,
)
from p2pclient.libp2p_stubs.peer.id import ID as StubID
import trio
from libp2p.host.host_interface import (
IHost,
)
from libp2p.abc import IHost
from libp2p.peer.id import (
ID,
)
@ -58,7 +57,10 @@ async def connect(a: TDaemonOrHost, b: TDaemonOrHost) -> None:
b_peer_info = _get_peer_info(b)
if isinstance(a, Daemon):
await a.control.connect(b_peer_info.peer_id, b_peer_info.addrs)
# Convert internal libp2p ID to p2pclient stub ID .connect()
await a.control.connect(
StubID(b_peer_info.peer_id.to_bytes()), b_peer_info.addrs
)
else: # isinstance(b, IHost)
await a.connect(b_peer_info)
# Allow additional sleep for both side to establish the connection.

View File

@ -8,6 +8,7 @@ from contextlib import (
from libp2p.abc import (
IHost,
ISubscriptionAPI,
)
from libp2p.pubsub.pubsub import (
Pubsub,
@ -40,9 +41,11 @@ class DummyAccountNode(Service):
"""
pubsub: Pubsub
subscription: ISubscriptionAPI | None
def __init__(self, pubsub: Pubsub) -> None:
self.pubsub = pubsub
self.subscription = None
self.balances: dict[str, int] = {}
@property
@ -74,6 +77,10 @@ class DummyAccountNode(Service):
async def handle_incoming_msgs(self) -> None:
"""Handle all incoming messages on the CRYPTO_TOPIC from peers."""
while True:
if self.subscription is None:
raise RuntimeError(
"Subscription must be set before handling incoming messages"
)
incoming = await self.subscription.get()
msg_comps = incoming.data.decode("utf-8").split(",")

View File

@ -1,4 +1,5 @@
import logging
import logging.handlers
import os
from pathlib import (
Path,