ft. modernise py-libp2p (#618)

* fix pyproject.toml , add ruff

* rm lock

* make progress

* add poetry lock ignore

* fix type issues

* fix tcp type errors

* fix text example - type error - wrong args

* add setuptools to dev

* test ci

* fix docs build

* fix type issues for new_swarm & new_host

* fix types in gossipsub

* fix type issues in noise

* wip: factories

* revert factories

* fix more type issues

* more type fixes

* fix: add null checks for noise protocol initialization and key handling

* corrected argument-errors in peerId and Multiaddr in peer tests

* fix: Noice - remove redundant type casts in BaseNoiseMsgReadWriter

* fix: update test_notify.py to use SwarmFactory.create_batch_and_listen, fix type hints, and comment out ClosedStream assertions

* Fix type checks for pubsub module

Signed-off-by: sukhman <sukhmansinghsaluja@gmail.com>

* Fix type checks for pubsub module-tests

Signed-off-by: sukhman <sukhmansinghsaluja@gmail.com>

* noise: add checks for uninitialized protocol and key states in PatternXX

Signed-off-by: varun-r-mallya <varunrmallya@gmail.com>

* pubsub: add None checks for optional fields in FloodSub and Pubsub

Signed-off-by: varun-r-mallya <varunrmallya@gmail.com>

* Fix type hints and improve testing

Signed-off-by: varun-r-mallya <varunrmallya@gmail.com>

* remove redundant checks

Signed-off-by: varun-r-mallya <varunrmallya@gmail.com>

* fix build issues

* add optional to trio service

* fix types

* fix type errors

* Fix type errors

Signed-off-by: varun-r-mallya <varunrmallya@gmail.com>

* fixed more-type checks in crypto and peer_data files

* wip: factories

* replaced union with optional

* fix: type-error in interp-utils and peerinfo

* replace pyright with pyrefly

* add pyrefly.toml

* wip: fix multiselect issues

* try typecheck

* base check

* mcache test fixes , typecheck ci update

* fix ci

* will this work

* minor fix

* use poetry

* fix wokflow

* use cache,fix err

* fix pyrefly.toml

* fix pyrefly.toml

* fix cache in ci

* deploy commit

* add main baseline

* update to v5

* improve typecheck ci (#14)

* fix typo

* remove holepunching code (#16)

* fix gossipsub typeerrors (#17)

* fix: ensure initiator user includes remote peer id in handshake (#15)

* fix ci (#19)

* typefix: custom_types | core/peerinfo/test_peer_info | io/abc | pubsub/floodsub | protocol_muxer/multiselect (#18)

* fix: Typefixes in PeerInfo  (#21)

* fix minor type issue (#22)

* fix type errors in pubsub (#24)

* fix: Minor typefixes in tests (#23)

* Fix failing tests for type-fixed test/pubsub (#8)

* move pyrefly & ruff to pyproject.toml & rm .project-template (#28)

* move the async_context file to tests/core

* move crypto test to crypto folder

* fix: some typefixes (#25)

* fix type errors

* fix type issues

* fix: update gRPC API usage in autonat_pb2_grpc.py (#31)

* md: typecheck ci

* rm comments

* clean up : from review suggestions

* use | None over Optional as per new python standards

* drop supporto for py3.9

* newsfragments

---------

Signed-off-by: sukhman <sukhmansinghsaluja@gmail.com>
Signed-off-by: varun-r-mallya <varunrmallya@gmail.com>
Co-authored-by: acul71 <luca.pisani@birdo.net>
Co-authored-by: kaneki003 <sakshamchauhan707@gmail.com>
Co-authored-by: sukhman <sukhmansinghsaluja@gmail.com>
Co-authored-by: varun-r-mallya <varunrmallya@gmail.com>
Co-authored-by: varunrmallya <100590632+varun-r-mallya@users.noreply.github.com>
Co-authored-by: lla-dane <abhinavagarwalla6@gmail.com>
Co-authored-by: Collins <ArtemisfowlX@protonmail.com>
Co-authored-by: Abhinav Agarwalla <120122716+lla-dane@users.noreply.github.com>
Co-authored-by: guha-rahul <52607971+guha-rahul@users.noreply.github.com>
Co-authored-by: Sukhman Singh <63765293+sukhman-sukh@users.noreply.github.com>
Co-authored-by: acul71 <34693171+acul71@users.noreply.github.com>
Co-authored-by: pacrob <5199899+pacrob@users.noreply.github.com>
This commit is contained in:
Arush Kurundodi
2025-06-09 23:09:59 +05:30
committed by GitHub
parent d020bbc066
commit bdadec7519
111 changed files with 1537 additions and 1401 deletions

View File

@ -8,23 +8,20 @@ into network after network has already started listening
TODO: Add tests for closed_stream, listen_close when those
features are implemented in swarm
"""
import enum
import pytest
from multiaddr import Multiaddr
import trio
from libp2p.abc import (
INetConn,
INetStream,
INetwork,
INotifee,
)
from libp2p.tools.async_service import (
background_trio_service,
)
from libp2p.tools.constants import (
LISTEN_MADDR,
)
from libp2p.tools.utils import (
connect_swarm,
)
from libp2p.tools.utils import connect_swarm
from tests.utils.factories import (
SwarmFactory,
)
@ -40,169 +37,94 @@ class Event(enum.Enum):
class MyNotifee(INotifee):
def __init__(self, events):
def __init__(self, events: list[Event]):
self.events = events
async def opened_stream(self, network, stream):
async def opened_stream(self, network: INetwork, stream: INetStream) -> None:
self.events.append(Event.OpenedStream)
async def closed_stream(self, network, stream):
async def closed_stream(self, network: INetwork, stream: INetStream) -> None:
# TODO: It is not implemented yet.
pass
async def connected(self, network, conn):
async def connected(self, network: INetwork, conn: INetConn) -> None:
self.events.append(Event.Connected)
async def disconnected(self, network, conn):
async def disconnected(self, network: INetwork, conn: INetConn) -> None:
self.events.append(Event.Disconnected)
async def listen(self, network, _multiaddr):
async def listen(self, network: INetwork, multiaddr: Multiaddr) -> None:
self.events.append(Event.Listen)
async def listen_close(self, network, _multiaddr):
async def listen_close(self, network: INetwork, multiaddr: Multiaddr) -> None:
# TODO: It is not implemented yet.
pass
@pytest.mark.trio
async def test_notify(security_protocol):
swarms = [SwarmFactory(security_protocol=security_protocol) for _ in range(2)]
events_0_0 = []
events_1_0 = []
events_0_without_listen = []
# Helper to wait for specific event
async def wait_for_event(events_list, expected_event, timeout=1.0):
start_time = trio.current_time()
while trio.current_time() - start_time < timeout:
if expected_event in events_list:
return True
await trio.sleep(0.01)
async def wait_for_event(events_list, event, timeout=1.0):
with trio.move_on_after(timeout):
while event not in events_list:
await trio.sleep(0.01)
return True
return False
# Run swarms.
async with background_trio_service(swarms[0]), background_trio_service(swarms[1]):
# Register events before listening
swarms[0].register_notifee(MyNotifee(events_0_0))
swarms[1].register_notifee(MyNotifee(events_1_0))
# Event lists for notifees
events_0_0 = []
events_0_1 = []
events_1_0 = []
events_1_1 = []
# Listen
async with trio.open_nursery() as nursery:
nursery.start_soon(swarms[0].listen, LISTEN_MADDR)
nursery.start_soon(swarms[1].listen, LISTEN_MADDR)
# Create two swarms, but do not listen yet
async with SwarmFactory.create_batch_and_listen(2) as swarms:
# Register notifees before listening
notifee_0_0 = MyNotifee(events_0_0)
notifee_0_1 = MyNotifee(events_0_1)
notifee_1_0 = MyNotifee(events_1_0)
notifee_1_1 = MyNotifee(events_1_1)
# Wait for Listen events
assert await wait_for_event(events_0_0, Event.Listen)
assert await wait_for_event(events_1_0, Event.Listen)
swarms[0].register_notifee(notifee_0_0)
swarms[0].register_notifee(notifee_0_1)
swarms[1].register_notifee(notifee_1_0)
swarms[1].register_notifee(notifee_1_1)
swarms[0].register_notifee(MyNotifee(events_0_without_listen))
# Connected
# Connect swarms
await connect_swarm(swarms[0], swarms[1])
assert await wait_for_event(events_0_0, Event.Connected)
assert await wait_for_event(events_1_0, Event.Connected)
assert await wait_for_event(events_0_without_listen, Event.Connected)
# OpenedStream: first
await swarms[0].new_stream(swarms[1].get_peer_id())
# OpenedStream: second
await swarms[0].new_stream(swarms[1].get_peer_id())
# OpenedStream: third, but different direction.
await swarms[1].new_stream(swarms[0].get_peer_id())
# Create a stream
stream = await swarms[0].new_stream(swarms[1].get_peer_id())
await stream.close()
# Clear any duplicate events that might have occurred
events_0_0.copy()
events_1_0.copy()
events_0_without_listen.copy()
# TODO: Check `ClosedStream` and `ListenClose` events after they are ready.
# Disconnected
# Close peer
await swarms[0].close_peer(swarms[1].get_peer_id())
assert await wait_for_event(events_0_0, Event.Disconnected)
assert await wait_for_event(events_1_0, Event.Disconnected)
assert await wait_for_event(events_0_without_listen, Event.Disconnected)
# Connected again, but different direction.
await connect_swarm(swarms[1], swarms[0])
# Wait for events
assert await wait_for_event(events_0_0, Event.Connected, 1.0)
assert await wait_for_event(events_0_0, Event.OpenedStream, 1.0)
# assert await wait_for_event(
# events_0_0, Event.ClosedStream, 1.0
# ) # Not implemented
assert await wait_for_event(events_0_0, Event.Disconnected, 1.0)
# Get the index of the first disconnected event
disconnect_idx_0_0 = events_0_0.index(Event.Disconnected)
disconnect_idx_1_0 = events_1_0.index(Event.Disconnected)
disconnect_idx_without_listen = events_0_without_listen.index(
Event.Disconnected
)
assert await wait_for_event(events_0_1, Event.Connected, 1.0)
assert await wait_for_event(events_0_1, Event.OpenedStream, 1.0)
# assert await wait_for_event(
# events_0_1, Event.ClosedStream, 1.0
# ) # Not implemented
assert await wait_for_event(events_0_1, Event.Disconnected, 1.0)
# Check for connected event after disconnect
assert await wait_for_event(
events_0_0[disconnect_idx_0_0 + 1 :], Event.Connected
)
assert await wait_for_event(
events_1_0[disconnect_idx_1_0 + 1 :], Event.Connected
)
assert await wait_for_event(
events_0_without_listen[disconnect_idx_without_listen + 1 :],
Event.Connected,
)
assert await wait_for_event(events_1_0, Event.Connected, 1.0)
assert await wait_for_event(events_1_0, Event.OpenedStream, 1.0)
# assert await wait_for_event(
# events_1_0, Event.ClosedStream, 1.0
# ) # Not implemented
assert await wait_for_event(events_1_0, Event.Disconnected, 1.0)
# Disconnected again, but different direction.
await swarms[1].close_peer(swarms[0].get_peer_id())
# Find index of the second connected event
second_connect_idx_0_0 = events_0_0.index(
Event.Connected, disconnect_idx_0_0 + 1
)
second_connect_idx_1_0 = events_1_0.index(
Event.Connected, disconnect_idx_1_0 + 1
)
second_connect_idx_without_listen = events_0_without_listen.index(
Event.Connected, disconnect_idx_without_listen + 1
)
# Check for second disconnected event
assert await wait_for_event(
events_0_0[second_connect_idx_0_0 + 1 :], Event.Disconnected
)
assert await wait_for_event(
events_1_0[second_connect_idx_1_0 + 1 :], Event.Disconnected
)
assert await wait_for_event(
events_0_without_listen[second_connect_idx_without_listen + 1 :],
Event.Disconnected,
)
# Verify the core sequence of events
expected_events_without_listen = [
Event.Connected,
Event.Disconnected,
Event.Connected,
Event.Disconnected,
]
# Filter events to check only pattern we care about
# (skipping OpenedStream which may vary)
filtered_events_0_0 = [
e
for e in events_0_0
if e in [Event.Listen, Event.Connected, Event.Disconnected]
]
filtered_events_1_0 = [
e
for e in events_1_0
if e in [Event.Listen, Event.Connected, Event.Disconnected]
]
filtered_events_without_listen = [
e
for e in events_0_without_listen
if e in [Event.Connected, Event.Disconnected]
]
# Check that the pattern matches
assert filtered_events_0_0[0] == Event.Listen, "First event should be Listen"
assert filtered_events_1_0[0] == Event.Listen, "First event should be Listen"
# Check pattern: Connected -> Disconnected -> Connected -> Disconnected
assert filtered_events_0_0[1:5] == expected_events_without_listen
assert filtered_events_1_0[1:5] == expected_events_without_listen
assert filtered_events_without_listen[:4] == expected_events_without_listen
assert await wait_for_event(events_1_1, Event.Connected, 1.0)
assert await wait_for_event(events_1_1, Event.OpenedStream, 1.0)
# assert await wait_for_event(
# events_1_1, Event.ClosedStream, 1.0
# ) # Not implemented
assert await wait_for_event(events_1_1, Event.Disconnected, 1.0)

View File

@ -13,6 +13,9 @@ from libp2p import (
from libp2p.network.exceptions import (
SwarmException,
)
from libp2p.network.swarm import (
Swarm,
)
from libp2p.tools.utils import (
connect_swarm,
)
@ -166,12 +169,14 @@ async def test_swarm_multiaddr(security_protocol):
def test_new_swarm_defaults_to_tcp():
swarm = new_swarm()
assert isinstance(swarm, Swarm)
assert isinstance(swarm.transport, TCP)
def test_new_swarm_tcp_multiaddr_supported():
addr = Multiaddr("/ip4/127.0.0.1/tcp/9999")
swarm = new_swarm(listen_addrs=[addr])
assert isinstance(swarm, Swarm)
assert isinstance(swarm.transport, TCP)