mirror of
https://github.com/varun-r-mallya/py-libp2p.git
synced 2025-12-31 20:36:24 +00:00
Rewrite factories, made some of the test running
This commit is contained in:
@ -4,12 +4,12 @@ import secrets
|
||||
import pytest
|
||||
|
||||
from libp2p.host.ping import ID, PING_LENGTH
|
||||
from libp2p.tools.factories import pair_of_connected_hosts
|
||||
from libp2p.tools.factories import host_pair_factory
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_ping_once():
|
||||
async with pair_of_connected_hosts() as (host_a, host_b):
|
||||
async with host_pair_factory() as (host_a, host_b):
|
||||
stream = await host_b.new_stream(host_a.get_id(), (ID,))
|
||||
some_ping = secrets.token_bytes(PING_LENGTH)
|
||||
await stream.write(some_ping)
|
||||
@ -23,7 +23,7 @@ SOME_PING_COUNT = 3
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_ping_several():
|
||||
async with pair_of_connected_hosts() as (host_a, host_b):
|
||||
async with host_pair_factory() as (host_a, host_b):
|
||||
stream = await host_b.new_stream(host_a.get_id(), (ID,))
|
||||
for _ in range(SOME_PING_COUNT):
|
||||
some_ping = secrets.token_bytes(PING_LENGTH)
|
||||
|
||||
@ -2,12 +2,12 @@ import pytest
|
||||
|
||||
from libp2p.identity.identify.pb.identify_pb2 import Identify
|
||||
from libp2p.identity.identify.protocol import ID, _mk_identify_protobuf
|
||||
from libp2p.tools.factories import pair_of_connected_hosts
|
||||
from libp2p.tools.factories import host_pair_factory
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_identify_protocol():
|
||||
async with pair_of_connected_hosts() as (host_a, host_b):
|
||||
async with host_pair_factory() as (host_a, host_b):
|
||||
stream = await host_b.new_stream(host_a.get_id(), (ID,))
|
||||
response = await stream.read()
|
||||
await stream.close()
|
||||
|
||||
@ -11,26 +11,17 @@ from libp2p.tools.factories import (
|
||||
|
||||
@pytest.fixture
|
||||
async def net_stream_pair(is_host_secure):
|
||||
stream_0, host_0, stream_1, host_1 = await net_stream_pair_factory(is_host_secure)
|
||||
try:
|
||||
yield stream_0, stream_1
|
||||
finally:
|
||||
await asyncio.gather(*[host_0.close(), host_1.close()])
|
||||
async with net_stream_pair_factory(is_host_secure) as net_stream_pair:
|
||||
yield net_stream_pair
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
async def swarm_pair(is_host_secure):
|
||||
swarm_0, swarm_1 = await swarm_pair_factory(is_host_secure)
|
||||
try:
|
||||
yield swarm_0, swarm_1
|
||||
finally:
|
||||
await asyncio.gather(*[swarm_0.close(), swarm_1.close()])
|
||||
async with swarm_pair_factory(is_host_secure) as swarms:
|
||||
yield swarms
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
async def swarm_conn_pair(is_host_secure):
|
||||
conn_0, swarm_0, conn_1, swarm_1 = await swarm_conn_pair_factory(is_host_secure)
|
||||
try:
|
||||
yield conn_0, conn_1
|
||||
finally:
|
||||
await asyncio.gather(*[swarm_0.close(), swarm_1.close()])
|
||||
async with swarm_conn_pair_factory(is_host_secure) as swarm_conn_pair:
|
||||
yield swarm_conn_pair
|
||||
|
||||
@ -1,88 +1,83 @@
|
||||
import asyncio
|
||||
|
||||
import trio
|
||||
import pytest
|
||||
from trio.testing import wait_all_tasks_blocked
|
||||
|
||||
from libp2p.network.exceptions import SwarmException
|
||||
from libp2p.tools.factories import SwarmFactory
|
||||
from libp2p.tools.utils import connect_swarm
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@pytest.mark.trio
|
||||
async def test_swarm_dial_peer(is_host_secure):
|
||||
swarms = await SwarmFactory.create_batch_and_listen(is_host_secure, 3)
|
||||
# Test: No addr found.
|
||||
with pytest.raises(SwarmException):
|
||||
async with SwarmFactory.create_batch_and_listen(is_host_secure, 3) as swarms:
|
||||
# Test: No addr found.
|
||||
with pytest.raises(SwarmException):
|
||||
await swarms[0].dial_peer(swarms[1].get_peer_id())
|
||||
|
||||
# Test: len(addr) in the peerstore is 0.
|
||||
swarms[0].peerstore.add_addrs(swarms[1].get_peer_id(), [], 10000)
|
||||
with pytest.raises(SwarmException):
|
||||
await swarms[0].dial_peer(swarms[1].get_peer_id())
|
||||
|
||||
# Test: Succeed if addrs of the peer_id are present in the peerstore.
|
||||
addrs = tuple(
|
||||
addr
|
||||
for transport in swarms[1].listeners.values()
|
||||
for addr in transport.get_addrs()
|
||||
)
|
||||
swarms[0].peerstore.add_addrs(swarms[1].get_peer_id(), addrs, 10000)
|
||||
await swarms[0].dial_peer(swarms[1].get_peer_id())
|
||||
assert swarms[0].get_peer_id() in swarms[1].connections
|
||||
assert swarms[1].get_peer_id() in swarms[0].connections
|
||||
|
||||
# Test: len(addr) in the peerstore is 0.
|
||||
swarms[0].peerstore.add_addrs(swarms[1].get_peer_id(), [], 10000)
|
||||
with pytest.raises(SwarmException):
|
||||
await swarms[0].dial_peer(swarms[1].get_peer_id())
|
||||
|
||||
# Test: Succeed if addrs of the peer_id are present in the peerstore.
|
||||
addrs = tuple(
|
||||
addr
|
||||
for transport in swarms[1].listeners.values()
|
||||
for addr in transport.get_addrs()
|
||||
)
|
||||
swarms[0].peerstore.add_addrs(swarms[1].get_peer_id(), addrs, 10000)
|
||||
await swarms[0].dial_peer(swarms[1].get_peer_id())
|
||||
assert swarms[0].get_peer_id() in swarms[1].connections
|
||||
assert swarms[1].get_peer_id() in swarms[0].connections
|
||||
|
||||
# Test: Reuse connections when we already have ones with a peer.
|
||||
conn_to_1 = swarms[0].connections[swarms[1].get_peer_id()]
|
||||
conn = await swarms[0].dial_peer(swarms[1].get_peer_id())
|
||||
assert conn is conn_to_1
|
||||
|
||||
# Clean up
|
||||
await asyncio.gather(*[swarm.close() for swarm in swarms])
|
||||
# Test: Reuse connections when we already have ones with a peer.
|
||||
conn_to_1 = swarms[0].connections[swarms[1].get_peer_id()]
|
||||
conn = await swarms[0].dial_peer(swarms[1].get_peer_id())
|
||||
assert conn is conn_to_1
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@pytest.mark.trio
|
||||
async def test_swarm_close_peer(is_host_secure):
|
||||
swarms = await SwarmFactory.create_batch_and_listen(is_host_secure, 3)
|
||||
# 0 <> 1 <> 2
|
||||
await connect_swarm(swarms[0], swarms[1])
|
||||
await connect_swarm(swarms[1], swarms[2])
|
||||
async with SwarmFactory.create_batch_and_listen(is_host_secure, 3) as swarms:
|
||||
# 0 <> 1 <> 2
|
||||
await connect_swarm(swarms[0], swarms[1])
|
||||
await connect_swarm(swarms[1], swarms[2])
|
||||
|
||||
# peer 1 closes peer 0
|
||||
await swarms[1].close_peer(swarms[0].get_peer_id())
|
||||
await asyncio.sleep(0.01)
|
||||
# 0 1 <> 2
|
||||
assert len(swarms[0].connections) == 0
|
||||
assert (
|
||||
len(swarms[1].connections) == 1
|
||||
and swarms[2].get_peer_id() in swarms[1].connections
|
||||
)
|
||||
# peer 1 closes peer 0
|
||||
await swarms[1].close_peer(swarms[0].get_peer_id())
|
||||
await trio.sleep(0.01)
|
||||
await wait_all_tasks_blocked()
|
||||
# 0 1 <> 2
|
||||
assert len(swarms[0].connections) == 0
|
||||
assert (
|
||||
len(swarms[1].connections) == 1
|
||||
and swarms[2].get_peer_id() in swarms[1].connections
|
||||
)
|
||||
|
||||
# peer 1 is closed by peer 2
|
||||
await swarms[2].close_peer(swarms[1].get_peer_id())
|
||||
await asyncio.sleep(0.01)
|
||||
# 0 1 2
|
||||
assert len(swarms[1].connections) == 0 and len(swarms[2].connections) == 0
|
||||
# peer 1 is closed by peer 2
|
||||
await swarms[2].close_peer(swarms[1].get_peer_id())
|
||||
await trio.sleep(0.01)
|
||||
# 0 1 2
|
||||
assert len(swarms[1].connections) == 0 and len(swarms[2].connections) == 0
|
||||
|
||||
await connect_swarm(swarms[0], swarms[1])
|
||||
# 0 <> 1 2
|
||||
assert (
|
||||
len(swarms[0].connections) == 1
|
||||
and swarms[1].get_peer_id() in swarms[0].connections
|
||||
)
|
||||
assert (
|
||||
len(swarms[1].connections) == 1
|
||||
and swarms[0].get_peer_id() in swarms[1].connections
|
||||
)
|
||||
# peer 0 closes peer 1
|
||||
await swarms[0].close_peer(swarms[1].get_peer_id())
|
||||
await asyncio.sleep(0.01)
|
||||
# 0 1 2
|
||||
assert len(swarms[1].connections) == 0 and len(swarms[2].connections) == 0
|
||||
|
||||
# Clean up
|
||||
await asyncio.gather(*[swarm.close() for swarm in swarms])
|
||||
await connect_swarm(swarms[0], swarms[1])
|
||||
# 0 <> 1 2
|
||||
assert (
|
||||
len(swarms[0].connections) == 1
|
||||
and swarms[1].get_peer_id() in swarms[0].connections
|
||||
)
|
||||
assert (
|
||||
len(swarms[1].connections) == 1
|
||||
and swarms[0].get_peer_id() in swarms[1].connections
|
||||
)
|
||||
# peer 0 closes peer 1
|
||||
await swarms[0].close_peer(swarms[1].get_peer_id())
|
||||
await trio.sleep(0.01)
|
||||
# 0 1 2
|
||||
assert len(swarms[1].connections) == 0 and len(swarms[2].connections) == 0
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@pytest.mark.trio
|
||||
async def test_swarm_remove_conn(swarm_pair):
|
||||
swarm_0, swarm_1 = swarm_pair
|
||||
conn_0 = swarm_0.connections[swarm_1.get_peer_id()]
|
||||
|
||||
@ -7,23 +7,13 @@ from libp2p.tools.factories import mplex_conn_pair_factory, mplex_stream_pair_fa
|
||||
|
||||
@pytest.fixture
|
||||
async def mplex_conn_pair(is_host_secure):
|
||||
mplex_conn_0, swarm_0, mplex_conn_1, swarm_1 = await mplex_conn_pair_factory(
|
||||
is_host_secure
|
||||
)
|
||||
assert mplex_conn_0.is_initiator
|
||||
assert not mplex_conn_1.is_initiator
|
||||
try:
|
||||
yield mplex_conn_0, mplex_conn_1
|
||||
finally:
|
||||
await asyncio.gather(*[swarm_0.close(), swarm_1.close()])
|
||||
async with mplex_conn_pair_factory(is_host_secure) as mplex_conn_pair:
|
||||
assert mplex_conn_pair[0].is_initiator
|
||||
assert not mplex_conn_pair[1].is_initiator
|
||||
yield mplex_conn_pair[0], mplex_conn_pair[1]
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
async def mplex_stream_pair(is_host_secure):
|
||||
mplex_stream_0, swarm_0, mplex_stream_1, swarm_1 = await mplex_stream_pair_factory(
|
||||
is_host_secure
|
||||
)
|
||||
try:
|
||||
yield mplex_stream_0, mplex_stream_1
|
||||
finally:
|
||||
await asyncio.gather(*[swarm_0.close(), swarm_1.close()])
|
||||
async with mplex_stream_pair_factory(is_host_secure) as mplex_stream_pair:
|
||||
yield mplex_stream_pair
|
||||
|
||||
@ -1,5 +1,3 @@
|
||||
import asyncio
|
||||
|
||||
import pytest
|
||||
import trio
|
||||
|
||||
@ -12,25 +10,26 @@ from libp2p.tools.constants import MAX_READ_LEN, LISTEN_MADDR
|
||||
from libp2p.tools.factories import SwarmFactory
|
||||
from libp2p.tools.utils import connect_swarm
|
||||
|
||||
|
||||
DATA = b"data_123"
|
||||
|
||||
|
||||
@pytest.mark.trio
|
||||
async def test_mplex_stream_read_write(nursery):
|
||||
swarm0, swarm1 = SwarmFactory(), SwarmFactory()
|
||||
await swarm0.listen(LISTEN_MADDR, nursery=nursery)
|
||||
await swarm1.listen(LISTEN_MADDR, nursery=nursery)
|
||||
await connect_swarm(swarm0, swarm1, nursery)
|
||||
conn_0 = swarm0.connections[swarm1.get_peer_id()]
|
||||
conn_1 = swarm1.connections[swarm0.get_peer_id()]
|
||||
stream_0 = await conn_0.muxed_conn.open_stream()
|
||||
await trio.sleep(1)
|
||||
stream_1 = tuple(conn_1.muxed_conn.streams.values())[0]
|
||||
await stream_0.write(DATA)
|
||||
assert (await stream_1.read(MAX_READ_LEN)) == DATA
|
||||
async def test_mplex_stream_read_write():
|
||||
async with SwarmFactory.create_batch_and_listen(False, 2) as swarms:
|
||||
await swarms[0].listen(LISTEN_MADDR)
|
||||
await swarms[1].listen(LISTEN_MADDR)
|
||||
await connect_swarm(swarms[0], swarms[1])
|
||||
conn_0 = swarms[0].connections[swarms[1].get_peer_id()]
|
||||
conn_1 = swarms[1].connections[swarms[0].get_peer_id()]
|
||||
stream_0 = await conn_0.muxed_conn.open_stream()
|
||||
await trio.sleep(1)
|
||||
stream_1 = tuple(conn_1.muxed_conn.streams.values())[0]
|
||||
await stream_0.write(DATA)
|
||||
assert (await stream_1.read(MAX_READ_LEN)) == DATA
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@pytest.mark.trio
|
||||
async def test_mplex_stream_pair_read_until_eof(mplex_stream_pair):
|
||||
read_bytes = bytearray()
|
||||
stream_0, stream_1 = mplex_stream_pair
|
||||
@ -38,43 +37,43 @@ async def test_mplex_stream_pair_read_until_eof(mplex_stream_pair):
|
||||
async def read_until_eof():
|
||||
read_bytes.extend(await stream_1.read())
|
||||
|
||||
task = asyncio.ensure_future(read_until_eof())
|
||||
task = trio.ensure_future(read_until_eof())
|
||||
|
||||
expected_data = bytearray()
|
||||
|
||||
# Test: `read` doesn't return before `close` is called.
|
||||
await stream_0.write(DATA)
|
||||
expected_data.extend(DATA)
|
||||
await asyncio.sleep(0.01)
|
||||
await trio.sleep(0.01)
|
||||
assert len(read_bytes) == 0
|
||||
# Test: `read` doesn't return before `close` is called.
|
||||
await stream_0.write(DATA)
|
||||
expected_data.extend(DATA)
|
||||
await asyncio.sleep(0.01)
|
||||
await trio.sleep(0.01)
|
||||
assert len(read_bytes) == 0
|
||||
|
||||
# Test: Close the stream, `read` returns, and receive previous sent data.
|
||||
await stream_0.close()
|
||||
await asyncio.sleep(0.01)
|
||||
await trio.sleep(0.01)
|
||||
assert read_bytes == expected_data
|
||||
|
||||
task.cancel()
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@pytest.mark.trio
|
||||
async def test_mplex_stream_read_after_remote_closed(mplex_stream_pair):
|
||||
stream_0, stream_1 = mplex_stream_pair
|
||||
assert not stream_1.event_remote_closed.is_set()
|
||||
await stream_0.write(DATA)
|
||||
await stream_0.close()
|
||||
await asyncio.sleep(0.01)
|
||||
await trio.sleep(0.01)
|
||||
assert stream_1.event_remote_closed.is_set()
|
||||
assert (await stream_1.read(MAX_READ_LEN)) == DATA
|
||||
with pytest.raises(MplexStreamEOF):
|
||||
await stream_1.read(MAX_READ_LEN)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@pytest.mark.trio
|
||||
async def test_mplex_stream_read_after_local_reset(mplex_stream_pair):
|
||||
stream_0, stream_1 = mplex_stream_pair
|
||||
await stream_0.reset()
|
||||
@ -82,29 +81,29 @@ async def test_mplex_stream_read_after_local_reset(mplex_stream_pair):
|
||||
await stream_0.read(MAX_READ_LEN)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@pytest.mark.trio
|
||||
async def test_mplex_stream_read_after_remote_reset(mplex_stream_pair):
|
||||
stream_0, stream_1 = mplex_stream_pair
|
||||
await stream_0.write(DATA)
|
||||
await stream_0.reset()
|
||||
# Sleep to let `stream_1` receive the message.
|
||||
await asyncio.sleep(0.01)
|
||||
await trio.sleep(0.01)
|
||||
with pytest.raises(MplexStreamReset):
|
||||
await stream_1.read(MAX_READ_LEN)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@pytest.mark.trio
|
||||
async def test_mplex_stream_read_after_remote_closed_and_reset(mplex_stream_pair):
|
||||
stream_0, stream_1 = mplex_stream_pair
|
||||
await stream_0.write(DATA)
|
||||
await stream_0.close()
|
||||
await stream_0.reset()
|
||||
# Sleep to let `stream_1` receive the message.
|
||||
await asyncio.sleep(0.01)
|
||||
await trio.sleep(0.01)
|
||||
assert (await stream_1.read(MAX_READ_LEN)) == DATA
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@pytest.mark.trio
|
||||
async def test_mplex_stream_write_after_local_closed(mplex_stream_pair):
|
||||
stream_0, stream_1 = mplex_stream_pair
|
||||
await stream_0.write(DATA)
|
||||
@ -113,7 +112,7 @@ async def test_mplex_stream_write_after_local_closed(mplex_stream_pair):
|
||||
await stream_0.write(DATA)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@pytest.mark.trio
|
||||
async def test_mplex_stream_write_after_local_reset(mplex_stream_pair):
|
||||
stream_0, stream_1 = mplex_stream_pair
|
||||
await stream_0.reset()
|
||||
@ -121,16 +120,16 @@ async def test_mplex_stream_write_after_local_reset(mplex_stream_pair):
|
||||
await stream_0.write(DATA)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@pytest.mark.trio
|
||||
async def test_mplex_stream_write_after_remote_reset(mplex_stream_pair):
|
||||
stream_0, stream_1 = mplex_stream_pair
|
||||
await stream_1.reset()
|
||||
await asyncio.sleep(0.01)
|
||||
await trio.sleep(0.01)
|
||||
with pytest.raises(MplexStreamClosed):
|
||||
await stream_0.write(DATA)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@pytest.mark.trio
|
||||
async def test_mplex_stream_both_close(mplex_stream_pair):
|
||||
stream_0, stream_1 = mplex_stream_pair
|
||||
# Flags are not set initially.
|
||||
@ -144,7 +143,7 @@ async def test_mplex_stream_both_close(mplex_stream_pair):
|
||||
|
||||
# Test: Close one side.
|
||||
await stream_0.close()
|
||||
await asyncio.sleep(0.01)
|
||||
await trio.sleep(0.01)
|
||||
|
||||
assert stream_0.event_local_closed.is_set()
|
||||
assert not stream_1.event_local_closed.is_set()
|
||||
@ -156,7 +155,7 @@ async def test_mplex_stream_both_close(mplex_stream_pair):
|
||||
|
||||
# Test: Close the other side.
|
||||
await stream_1.close()
|
||||
await asyncio.sleep(0.01)
|
||||
await trio.sleep(0.01)
|
||||
# Both sides are closed.
|
||||
assert stream_0.event_local_closed.is_set()
|
||||
assert stream_1.event_local_closed.is_set()
|
||||
@ -170,11 +169,11 @@ async def test_mplex_stream_both_close(mplex_stream_pair):
|
||||
await stream_0.reset()
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@pytest.mark.trio
|
||||
async def test_mplex_stream_reset(mplex_stream_pair):
|
||||
stream_0, stream_1 = mplex_stream_pair
|
||||
await stream_0.reset()
|
||||
await asyncio.sleep(0.01)
|
||||
await trio.sleep(0.01)
|
||||
|
||||
# Both sides are closed.
|
||||
assert stream_0.event_local_closed.is_set()
|
||||
|
||||
Reference in New Issue
Block a user