Fix tests_interop

- Remove pexpect
- Use new version of `p2pclient`, which makes use of anyio
- Clean up tests
This commit is contained in:
mhchia
2020-01-07 14:14:34 +08:00
parent 000e777ac7
commit fe4354d377
11 changed files with 415 additions and 477 deletions

View File

@ -7,7 +7,7 @@ from libp2p.pubsub import floodsub, gossipsub
# Just a arbitrary large number.
# It is used when calling `MplexStream.read(MAX_READ_LEN)`,
# to avoid `MplexStream.read()`, which blocking reads until EOF.
MAX_READ_LEN = 2 ** 32 - 1
MAX_READ_LEN = 65535
LISTEN_MADDR = multiaddr.Multiaddr("/ip4/127.0.0.1/tcp/0")

View File

@ -1,2 +1 @@
LOCALHOST_IP = "127.0.0.1"
PEXPECT_NEW_LINE = "\r\n"

View File

@ -1,52 +1,22 @@
import asyncio
import time
from typing import Any, Awaitable, Callable, List
from typing import AsyncIterator
from async_generator import asynccontextmanager
import multiaddr
from multiaddr import Multiaddr
from p2pclient import Client
import pytest
import trio
from libp2p.peer.id import ID
from libp2p.peer.peerinfo import PeerInfo, info_from_p2p_addr
from .constants import LOCALHOST_IP
from .envs import GO_BIN_PATH
from .process import BaseInteractiveProcess
P2PD_PATH = GO_BIN_PATH / "p2pd"
TIMEOUT_DURATION = 30
async def try_until_success(
coro_func: Callable[[], Awaitable[Any]], timeout: int = TIMEOUT_DURATION
) -> None:
"""
Keep running ``coro_func`` until either it succeed or time is up.
All arguments of ``coro_func`` should be filled, i.e. it should be
called without arguments.
"""
t_start = time.monotonic()
while True:
result = await coro_func()
if result:
break
if (time.monotonic() - t_start) >= timeout:
# timeout
pytest.fail(f"{coro_func} is still failing after `{timeout}` seconds")
await asyncio.sleep(0.01)
class P2PDProcess:
proc: asyncio.subprocess.Process
cmd: str = str(P2PD_PATH)
args: List[Any]
is_proc_running: bool
_tasks: List["asyncio.Future[Any]"]
class P2PDProcess(BaseInteractiveProcess):
def __init__(
self,
control_maddr: Multiaddr,
@ -75,74 +45,21 @@ class P2PDProcess:
# - gossipsubHeartbeatInterval: GossipSubHeartbeatInitialDelay = 100 * time.Millisecond # noqa: E501
# - gossipsubHeartbeatInitialDelay: GossipSubHeartbeatInterval = 1 * time.Second
# Referece: https://github.com/libp2p/go-libp2p-daemon/blob/b95e77dbfcd186ccf817f51e95f73f9fd5982600/p2pd/main.go#L348-L353 # noqa: E501
self.proc = None
self.cmd = str(P2PD_PATH)
self.args = args
self.is_proc_running = False
self._tasks = []
async def wait_until_ready(self) -> None:
lines_head_pattern = (b"Control socket:", b"Peer ID:", b"Peer Addrs:")
lines_head_occurred = {line: False for line in lines_head_pattern}
async def read_from_daemon_and_check() -> bool:
line = await self.proc.stdout.readline()
for head_pattern in lines_head_occurred:
if line.startswith(head_pattern):
lines_head_occurred[head_pattern] = True
return all([value for value in lines_head_occurred.values()])
await try_until_success(read_from_daemon_and_check)
# Sleep a little bit to ensure the listener is up after logs are emitted.
await asyncio.sleep(0.01)
async def start_printing_logs(self) -> None:
async def _print_from_stream(
src_name: str, reader: asyncio.StreamReader
) -> None:
while True:
line = await reader.readline()
if line != b"":
print(f"{src_name}\t: {line.rstrip().decode()}")
await asyncio.sleep(0.01)
self._tasks.append(
asyncio.ensure_future(_print_from_stream("out", self.proc.stdout))
)
self._tasks.append(
asyncio.ensure_future(_print_from_stream("err", self.proc.stderr))
)
await asyncio.sleep(0)
async def start(self) -> None:
if self.is_proc_running:
return
self.proc = await asyncio.subprocess.create_subprocess_exec(
self.cmd,
*self.args,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
bufsize=0,
)
self.is_proc_running = True
await self.wait_until_ready()
await self.start_printing_logs()
async def close(self) -> None:
if self.is_proc_running:
self.proc.terminate()
await self.proc.wait()
self.is_proc_running = False
for task in self._tasks:
task.cancel()
self.patterns = (b"Control socket:", b"Peer ID:", b"Peer Addrs:")
self.bytes_read = bytearray()
self.event_ready = trio.Event()
class Daemon:
p2pd_proc: P2PDProcess
p2pd_proc: BaseInteractiveProcess
control: Client
peer_info: PeerInfo
def __init__(
self, p2pd_proc: P2PDProcess, control: Client, peer_info: PeerInfo
self, p2pd_proc: BaseInteractiveProcess, control: Client, peer_info: PeerInfo
) -> None:
self.p2pd_proc = p2pd_proc
self.control = control
@ -164,6 +81,7 @@ class Daemon:
await self.control.close()
@asynccontextmanager
async def make_p2pd(
daemon_control_port: int,
client_callback_port: int,
@ -172,7 +90,7 @@ async def make_p2pd(
is_gossipsub: bool = True,
is_pubsub_signing: bool = False,
is_pubsub_signing_strict: bool = False,
) -> Daemon:
) -> AsyncIterator[Daemon]:
control_maddr = Multiaddr(f"/ip4/{LOCALHOST_IP}/tcp/{daemon_control_port}")
p2pd_proc = P2PDProcess(
control_maddr,
@ -185,21 +103,22 @@ async def make_p2pd(
await p2pd_proc.start()
client_callback_maddr = Multiaddr(f"/ip4/{LOCALHOST_IP}/tcp/{client_callback_port}")
p2pc = Client(control_maddr, client_callback_maddr)
await p2pc.listen()
peer_id, maddrs = await p2pc.identify()
listen_maddr: Multiaddr = None
for maddr in maddrs:
try:
ip = maddr.value_for_protocol(multiaddr.protocols.P_IP4)
# NOTE: Check if this `maddr` uses `tcp`.
maddr.value_for_protocol(multiaddr.protocols.P_TCP)
except multiaddr.exceptions.ProtocolLookupError:
continue
if ip == LOCALHOST_IP:
listen_maddr = maddr
break
assert listen_maddr is not None, "no loopback maddr is found"
peer_info = info_from_p2p_addr(
listen_maddr.encapsulate(Multiaddr(f"/p2p/{peer_id.to_string()}"))
)
return Daemon(p2pd_proc, p2pc, peer_info)
async with p2pc.listen():
peer_id, maddrs = await p2pc.identify()
listen_maddr: Multiaddr = None
for maddr in maddrs:
try:
ip = maddr.value_for_protocol(multiaddr.protocols.P_IP4)
# NOTE: Check if this `maddr` uses `tcp`.
maddr.value_for_protocol(multiaddr.protocols.P_TCP)
except multiaddr.exceptions.ProtocolLookupError:
continue
if ip == LOCALHOST_IP:
listen_maddr = maddr
break
assert listen_maddr is not None, "no loopback maddr is found"
peer_info = info_from_p2p_addr(
listen_maddr.encapsulate(Multiaddr(f"/p2p/{peer_id.to_string()}"))
)
yield Daemon(p2pd_proc, p2pc, peer_info)

View File

@ -0,0 +1,66 @@
from abc import ABC, abstractmethod
import subprocess
from typing import Iterable, List
import trio
TIMEOUT_DURATION = 30
class AbstractInterativeProcess(ABC):
@abstractmethod
async def start(self) -> None:
...
@abstractmethod
async def close(self) -> None:
...
class BaseInteractiveProcess(AbstractInterativeProcess):
proc: trio.Process = None
cmd: str
args: List[str]
bytes_read: bytearray
patterns: Iterable[bytes] = None
event_ready: trio.Event
async def wait_until_ready(self) -> None:
patterns_occurred = {pat: False for pat in self.patterns}
async def read_from_daemon_and_check() -> None:
async for data in self.proc.stdout:
# TODO: It takes O(n^2), which is quite bad.
# But it should succeed in a few seconds.
self.bytes_read.extend(data)
for pat, occurred in patterns_occurred.items():
if occurred:
continue
if pat in self.bytes_read:
patterns_occurred[pat] = True
if all([value for value in patterns_occurred.values()]):
return
with trio.fail_after(TIMEOUT_DURATION):
await read_from_daemon_and_check()
self.event_ready.set()
# Sleep a little bit to ensure the listener is up after logs are emitted.
await trio.sleep(0.01)
async def start(self) -> None:
if self.proc is not None:
return
# NOTE: Ignore type checks here since mypy complains about bufsize=0
self.proc = await trio.open_process( # type: ignore
[self.cmd] + self.args,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, # Redirect stderr to stdout, which makes parsing easier
bufsize=0,
)
await self.wait_until_ready()
async def close(self) -> None:
if self.proc is None:
return
self.proc.terminate()
await self.proc.wait()

View File

@ -1,7 +1,7 @@
import asyncio
from typing import Union
from multiaddr import Multiaddr
import trio
from libp2p.host.host_interface import IHost
from libp2p.peer.id import ID
@ -50,7 +50,7 @@ async def connect(a: TDaemonOrHost, b: TDaemonOrHost) -> None:
else: # isinstance(b, IHost)
await a.connect(b_peer_info)
# Allow additional sleep for both side to establish the connection.
await asyncio.sleep(0.1)
await trio.sleep(0.1)
a_peer_info = _get_peer_info(a)