Remove cleanup

`cleanup` cancels all tasks in the loop, including the main one run by
`run_until_complete`
This commit is contained in:
mhchia
2019-09-09 23:09:33 +08:00
parent a45eb76421
commit bb0da41eda
10 changed files with 9 additions and 65 deletions

View File

@ -4,7 +4,7 @@ import pytest
from tests.configs import LISTEN_MADDR
from tests.factories import PubsubFactory
from tests.utils import cleanup, connect
from tests.utils import connect
from .configs import FLOODSUB_PROTOCOL_ID
@ -258,4 +258,3 @@ async def perform_test_from_obj(obj, router_factory):
assert node_map[origin_node_id].get_id().to_bytes() == msg.from_id
# Success, terminate pending tasks.
await cleanup()

View File

@ -3,7 +3,7 @@ from threading import Thread
import pytest
from tests.utils import cleanup, connect
from tests.utils import connect
from .dummy_account_node import DummyAccountNode
@ -64,7 +64,6 @@ async def perform_test(num_nodes, adjacency_map, action_func, assertion_func):
assertion_func(dummy_node)
# Success, terminate pending tasks.
await cleanup()
@pytest.mark.asyncio

View File

@ -4,7 +4,7 @@ import pytest
from libp2p.peer.id import ID
from tests.factories import FloodsubFactory
from tests.utils import cleanup, connect
from tests.utils import connect
from .floodsub_integration_test_settings import (
floodsub_protocol_pytest_params,
@ -36,7 +36,6 @@ async def test_simple_two_nodes(pubsubs_fsub):
assert res_b.topicIDs == [topic]
# Success, terminate pending tasks.
await cleanup()
# Initialize Pubsub with a cache_size of 4
@ -82,7 +81,6 @@ async def test_lru_cache_two_nodes(pubsubs_fsub, monkeypatch):
assert sub_b.empty()
# Success, terminate pending tasks.
await cleanup()
@pytest.mark.parametrize("test_case_obj", floodsub_protocol_pytest_params)

View File

@ -3,7 +3,7 @@ import random
import pytest
from tests.utils import cleanup, connect
from tests.utils import connect
from .configs import GossipsubParams
from .utils import dense_connect, one_to_all_connect
@ -61,8 +61,6 @@ async def test_join(num_hosts, hosts, pubsubs_gsub):
assert hosts[i].get_id() not in gossipsubs[central_node_index].mesh[topic]
assert topic not in gossipsubs[i].mesh
await cleanup()
@pytest.mark.parametrize("num_hosts", (1,))
@pytest.mark.asyncio
@ -81,8 +79,6 @@ async def test_leave(pubsubs_gsub):
# Test re-leave
await gossipsub.leave(topic)
await cleanup()
@pytest.mark.parametrize("num_hosts", (2,))
@pytest.mark.asyncio
@ -133,8 +129,6 @@ async def test_handle_graft(pubsubs_gsub, hosts, event_loop, monkeypatch):
# Check that bob is now alice's mesh peer
assert id_bob in gossipsubs[index_alice].mesh[topic]
await cleanup()
@pytest.mark.parametrize(
"num_hosts, gossipsub_params", ((2, GossipsubParams(heartbeat_interval=3)),)
@ -174,8 +168,6 @@ async def test_handle_prune(pubsubs_gsub, hosts):
assert id_alice not in gossipsubs[index_bob].mesh[topic]
assert id_bob in gossipsubs[index_alice].mesh[topic]
await cleanup()
@pytest.mark.parametrize("num_hosts", (10,))
@pytest.mark.asyncio
@ -210,7 +202,6 @@ async def test_dense(num_hosts, pubsubs_gsub, hosts):
for queue in queues:
msg = await queue.get()
assert msg.data == msg_content
await cleanup()
@pytest.mark.parametrize("num_hosts", (10,))
@ -268,8 +259,6 @@ async def test_fanout(hosts, pubsubs_gsub):
msg = await queue.get()
assert msg.data == msg_content
await cleanup()
@pytest.mark.parametrize("num_hosts", (10,))
@pytest.mark.asyncio
@ -340,8 +329,6 @@ async def test_fanout_maintenance(hosts, pubsubs_gsub):
msg = await queue.get()
assert msg.data == msg_content
await cleanup()
@pytest.mark.parametrize(
"num_hosts, gossipsub_params",
@ -380,5 +367,3 @@ async def test_gossip_propagation(hosts, pubsubs_gsub):
# should be able to read message
msg = await queue_1.get()
assert msg.data == msg_content
await cleanup()