From 8f9e5a28ff70036c91b2a3f01db4b111db723264 Mon Sep 17 00:00:00 2001 From: mhchia Date: Mon, 22 Jul 2019 18:12:54 +0800 Subject: [PATCH 01/17] Temp --- libp2p/network/swarm.py | 9 +++++++++ libp2p/pubsub/pubsub.py | 24 ++++++++++++++++++++++++ 2 files changed, 33 insertions(+) diff --git a/libp2p/network/swarm.py b/libp2p/network/swarm.py index 7d059915..e3d037e8 100644 --- a/libp2p/network/swarm.py +++ b/libp2p/network/swarm.py @@ -195,6 +195,15 @@ class Swarm(INetwork): def add_router(self, router): self.router = router + # TODO: `tear_down` + async def tear_down(self) -> None: + # pylint: disable=line-too-long + # Reference: https://github.com/libp2p/go-libp2p-swarm/blob/8be680aef8dea0a4497283f2f98470c2aeae6b65/swarm.go#L118 # noqa: E501 + pass + + # TODO: `disconnect`? + + def create_generic_protocol_handler(swarm): """ Create a generic protocol handler from the given swarm. We use swarm diff --git a/libp2p/pubsub/pubsub.py b/libp2p/pubsub/pubsub.py index 7afdc2a1..fa04d2b0 100644 --- a/libp2p/pubsub/pubsub.py +++ b/libp2p/pubsub/pubsub.py @@ -9,6 +9,14 @@ from typing import ( Tuple, ) +from typing import ( + Any, + Dict, + List, + Sequence, + Tuple, +) + from lru import LRU from libp2p.host.host_interface import ( @@ -319,3 +327,19 @@ class Pubsub: for _, stream in self.peers.items(): # Write message to stream await stream.write(rpc_msg) + + def list_peers(self, topic_id: str) -> Tuple[ID]: + return + + def publish(self, topic_id: str, data: bytes) -> None: + # TODO: Create pb message + # TODO: Sign with our signing key + # TODO: `p.pushMsg(p.host.ID(), msg)` + # TODO: - Check if the source is in the blacklist. If yes, reject. + # TODO: - Check if the `from` is in the blacklist. If yes, reject. + # TODO: - Check if the message is seen. If yes, reject it. + # TODO: - Validate the message. If failed, reject it. + # TODO: - Mark as seen and `publishMessage` + # TODO: - Notify the subscribers + # TODO: - Router.Publish + return From 218bdb42c412ed459f291f835d1aa8f7c62c106f Mon Sep 17 00:00:00 2001 From: mhchia Date: Tue, 23 Jul 2019 16:10:14 +0800 Subject: [PATCH 02/17] Add basic functionalities of `publish` --- libp2p/pubsub/pubsub.py | 41 +++++++++++++++++++++++++++++++++-------- 1 file changed, 33 insertions(+), 8 deletions(-) diff --git a/libp2p/pubsub/pubsub.py b/libp2p/pubsub/pubsub.py index fa04d2b0..5f8390fb 100644 --- a/libp2p/pubsub/pubsub.py +++ b/libp2p/pubsub/pubsub.py @@ -1,6 +1,7 @@ # pylint: disable=no-name-in-module import asyncio import time +<<<<<<< HEAD from typing import ( Any, Dict, @@ -9,6 +10,8 @@ from typing import ( Tuple, ) +======= +>>>>>>> Add basic functionalities of `publish` from typing import ( Any, Dict, @@ -331,15 +334,37 @@ class Pubsub: def list_peers(self, topic_id: str) -> Tuple[ID]: return - def publish(self, topic_id: str, data: bytes) -> None: - # TODO: Create pb message + async def publish(self, topic_id: str, data: bytes) -> None: + msg = rpc_pb2.Message( + data=data, + topicIDs=[topic_id], + from_id=self.host.get_id().to_bytes(), + seqno=self._next_seqno(), + ) # TODO: Sign with our signing key - # TODO: `p.pushMsg(p.host.ID(), msg)` + self.push_msg(self.host.get_id(), msg) + + async def push_msg(self, src: ID, msg: rpc_pb2.Message): # TODO: - Check if the source is in the blacklist. If yes, reject. # TODO: - Check if the `from` is in the blacklist. If yes, reject. - # TODO: - Check if the message is seen. If yes, reject it. + # TODO: - Check if signing is required and if so signature should be attached. + if self._is_msg_seen(msg): + return # TODO: - Validate the message. If failed, reject it. - # TODO: - Mark as seen and `publishMessage` - # TODO: - Notify the subscribers - # TODO: - Router.Publish - return + self._mark_msg_seen(msg) + await self.handle_talk(msg) + await self.router.publish(src, msg) + + def _next_seqno(self) -> bytes: + self.counter += 1 + return self.counter.to_bytes(8, 'big') + + def _is_msg_seen(self, msg: rpc_pb2.Message) -> bool: + msg_id = get_msg_id(msg) + return msg_id in self.seen_messages + + def _mark_msg_seen(self, msg: rpc_pb2.Message) -> None: + msg_id = get_msg_id(msg) + # FIXME: Mapping `msg_id` to `1` is quite awkward. Should investigate if there is a + # more appropriate way. + self.seen_messages[msg_id] = 1 From b528c211b97f78db2ae55df1ac6e2fb04c6a9621 Mon Sep 17 00:00:00 2001 From: mhchia Date: Wed, 24 Jul 2019 14:35:14 +0800 Subject: [PATCH 03/17] Temp modified publish --- libp2p/pubsub/floodsub.py | 13 +++++++++---- libp2p/pubsub/pubsub.py | 7 +++++++ 2 files changed, 16 insertions(+), 4 deletions(-) diff --git a/libp2p/pubsub/floodsub.py b/libp2p/pubsub/floodsub.py index c0562141..659b6728 100644 --- a/libp2p/pubsub/floodsub.py +++ b/libp2p/pubsub/floodsub.py @@ -46,7 +46,7 @@ class FloodSub(IPubsubRouter): :param rpc: rpc message """ - async def publish(self, sender_peer_id: ID, rpc_message: rpc_pb2.Message) -> None: + async def publish(self, from_peer: ID, pubsub_message: rpc_pb2.Message) -> None: """ Invoked to forward a new message that has been validated. This is where the "flooding" part of floodsub happens @@ -60,9 +60,14 @@ class FloodSub(IPubsubRouter): :param sender_peer_id: peer_id of message sender :param rpc_message: pubsub message in RPC string format """ - packet = rpc_pb2.RPC() - packet.ParseFromString(rpc_message) - msg_sender = str(sender_peer_id) + # packet = rpc_pb2.RPC() + # packet.ParseFromString(rpc_message) + + from_peer_str = str(from_peer) + for topic in pubsub_message.topicIDs: + if topic not in self.pubsub.topics: + continue + peers = self.pubsub.peer_topics[topic] # Deliver to self if self was origin # Note: handle_talk checks if self is subscribed to topics in message for message in packet.publish: diff --git a/libp2p/pubsub/pubsub.py b/libp2p/pubsub/pubsub.py index 5f8390fb..cd2345f2 100644 --- a/libp2p/pubsub/pubsub.py +++ b/libp2p/pubsub/pubsub.py @@ -341,16 +341,23 @@ class Pubsub: from_id=self.host.get_id().to_bytes(), seqno=self._next_seqno(), ) + # TODO: Sign with our signing key + self.push_msg(self.host.get_id(), msg) async def push_msg(self, src: ID, msg: rpc_pb2.Message): # TODO: - Check if the source is in the blacklist. If yes, reject. + # TODO: - Check if the `from` is in the blacklist. If yes, reject. + # TODO: - Check if signing is required and if so signature should be attached. + if self._is_msg_seen(msg): return + # TODO: - Validate the message. If failed, reject it. + self._mark_msg_seen(msg) await self.handle_talk(msg) await self.router.publish(src, msg) From 3f52b0dc0abcd6872f0ac9919e5a1442e89b6f44 Mon Sep 17 00:00:00 2001 From: mhchia Date: Wed, 24 Jul 2019 21:57:46 +0800 Subject: [PATCH 04/17] Remove leftover imports --- libp2p/pubsub/pubsub.py | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/libp2p/pubsub/pubsub.py b/libp2p/pubsub/pubsub.py index cd2345f2..af37ee02 100644 --- a/libp2p/pubsub/pubsub.py +++ b/libp2p/pubsub/pubsub.py @@ -1,17 +1,6 @@ # pylint: disable=no-name-in-module import asyncio import time -<<<<<<< HEAD -from typing import ( - Any, - Dict, - List, - Sequence, - Tuple, -) - -======= ->>>>>>> Add basic functionalities of `publish` from typing import ( Any, Dict, From 93cf5a2c3229e2fe8bbadcecbea65181c9ea704d Mon Sep 17 00:00:00 2001 From: mhchia Date: Wed, 24 Jul 2019 22:33:32 +0800 Subject: [PATCH 05/17] A roughly skeleton of `floodsub.publish` Still need to ensure when to deliver to ourselves --- libp2p/peer/id.py | 3 ++ libp2p/pubsub/floodsub.py | 93 +++++++++++++++++++++++++++------------ 2 files changed, 67 insertions(+), 29 deletions(-) diff --git a/libp2p/peer/id.py b/libp2p/peer/id.py index 3097e1fb..77e2f87e 100644 --- a/libp2p/peer/id.py +++ b/libp2p/peer/id.py @@ -16,6 +16,9 @@ class ID: def __init__(self, id_str): self._id_str = id_str + def to_bytes(self) -> bytes: + return self._id_str + def get_raw_id(self): return self._id_str diff --git a/libp2p/pubsub/floodsub.py b/libp2p/pubsub/floodsub.py index 659b6728..f1eaa9c3 100644 --- a/libp2p/pubsub/floodsub.py +++ b/libp2p/pubsub/floodsub.py @@ -1,3 +1,8 @@ +from typing import ( + Generator, + Sequence, +) + from libp2p.peer.id import ( ID, ) @@ -60,40 +65,70 @@ class FloodSub(IPubsubRouter): :param sender_peer_id: peer_id of message sender :param rpc_message: pubsub message in RPC string format """ + + peers_gen = self._get_peers_to_send( + pubsub_message.topicIDs, + from_peer_id=from_peer, + src_peer_id=ID(pubsub_message.from_id), + ) + rpc_msg = rpc_pb2.RPC( + publish=[pubsub_message], + ) + for peer_id in peers_gen: + stream = self.pubsub.peers[str(peer_id)] + await stream.write(rpc_msg.SerializeToString()) + # packet = rpc_pb2.RPC() # packet.ParseFromString(rpc_message) - from_peer_str = str(from_peer) - for topic in pubsub_message.topicIDs: + # from_peer_str = str(from_peer) + # for topic in pubsub_message.topicIDs: + # if topic not in self.pubsub.topics: + # continue + # peers = self.pubsub.peer_topics[topic] + # # Deliver to self if self was origin + # # Note: handle_talk checks if self is subscribed to topics in message + # for message in packet.publish: + # decoded_from_id = message.from_id.decode('utf-8') + # if msg_sender == decoded_from_id and msg_sender == str(self.pubsub.host.get_id()): + # id_in_seen_msgs = (message.seqno, message.from_id) + + # if id_in_seen_msgs not in self.pubsub.seen_messages: + # self.pubsub.seen_messages[id_in_seen_msgs] = 1 + + # await self.pubsub.handle_talk(message) + + # # Deliver to self and peers + # for topic in message.topicIDs: + # if topic in self.pubsub.peer_topics: + # for peer_id_in_topic in self.pubsub.peer_topics[topic]: + # # Forward to all known peers in the topic that are not the + # # message sender and are not the message origin + # if peer_id_in_topic not in (msg_sender, decoded_from_id): + # stream = self.pubsub.peers[peer_id_in_topic] + # # Create new packet with just publish message + # new_packet = rpc_pb2.RPC() + # new_packet.publish.extend([message]) + + # # Publish the packet + # await stream.write(new_packet.SerializeToString()) + + def _get_peers_to_send( + self, + topic_ids: Sequence[str], + from_peer_id: ID, + src_peer_id: ID) -> Generator[ID]: + # TODO: should send to self if `src_peer_id` is ourself? + for topic in topic_ids: if topic not in self.pubsub.topics: continue - peers = self.pubsub.peer_topics[topic] - # Deliver to self if self was origin - # Note: handle_talk checks if self is subscribed to topics in message - for message in packet.publish: - decoded_from_id = message.from_id.decode('utf-8') - if msg_sender == decoded_from_id and msg_sender == str(self.pubsub.host.get_id()): - id_in_seen_msgs = (message.seqno, message.from_id) - - if id_in_seen_msgs not in self.pubsub.seen_messages: - self.pubsub.seen_messages[id_in_seen_msgs] = 1 - - await self.pubsub.handle_talk(message) - - # Deliver to self and peers - for topic in message.topicIDs: - if topic in self.pubsub.peer_topics: - for peer_id_in_topic in self.pubsub.peer_topics[topic]: - # Forward to all known peers in the topic that are not the - # message sender and are not the message origin - if peer_id_in_topic not in (msg_sender, decoded_from_id): - stream = self.pubsub.peers[peer_id_in_topic] - # Create new packet with just publish message - new_packet = rpc_pb2.RPC() - new_packet.publish.extend([message]) - - # Publish the packet - await stream.write(new_packet.SerializeToString()) + for peer_id in self.pubsub.peer_topics[topic]: + if peer_id in (from_peer_id, src_peer_id): + continue + # FIXME: Should change `self.pubsub.peers` to Dict[PeerID, ...] + if str(peer_id) not in self.pubsub.peers: + continue + yield peer_id async def join(self, topic): """ From cae4f340349b1fd17c2300fcd8fe60f51be7c0e6 Mon Sep 17 00:00:00 2001 From: mhchia Date: Thu, 25 Jul 2019 14:08:16 +0800 Subject: [PATCH 06/17] Refactor `floodsub.publish` Passed the first test of floodsub --- libp2p/pubsub/floodsub.py | 70 +++++++----------------- libp2p/pubsub/pubsub.py | 25 +++++++-- libp2p/pubsub/pubsub_router_interface.py | 6 +- tests/pubsub/test_floodsub.py | 36 ++++++++---- tests/pubsub/utils.py | 22 +++++++- 5 files changed, 87 insertions(+), 72 deletions(-) diff --git a/libp2p/pubsub/floodsub.py b/libp2p/pubsub/floodsub.py index f1eaa9c3..03b7330f 100644 --- a/libp2p/pubsub/floodsub.py +++ b/libp2p/pubsub/floodsub.py @@ -1,10 +1,10 @@ from typing import ( - Generator, - Sequence, + Iterable, ) from libp2p.peer.id import ( ID, + id_b58_decode, ) from .pb import rpc_pb2 @@ -51,7 +51,7 @@ class FloodSub(IPubsubRouter): :param rpc: rpc message """ - async def publish(self, from_peer: ID, pubsub_message: rpc_pb2.Message) -> None: + async def publish(self, src: ID, pubsub_msg: rpc_pb2.Message) -> None: """ Invoked to forward a new message that has been validated. This is where the "flooding" part of floodsub happens @@ -62,68 +62,36 @@ class FloodSub(IPubsubRouter): so that seen messages are not further forwarded. It also never forwards a message back to the source or the peer that forwarded the message. - :param sender_peer_id: peer_id of message sender - :param rpc_message: pubsub message in RPC string format + :param src: the peer id of the peer who forwards the message to me. + :param pubsub_msg: pubsub message in protobuf. """ peers_gen = self._get_peers_to_send( - pubsub_message.topicIDs, - from_peer_id=from_peer, - src_peer_id=ID(pubsub_message.from_id), + pubsub_msg.topicIDs, + src=src, + origin=ID(pubsub_msg.from_id), ) rpc_msg = rpc_pb2.RPC( - publish=[pubsub_message], + publish=[pubsub_msg], ) for peer_id in peers_gen: stream = self.pubsub.peers[str(peer_id)] await stream.write(rpc_msg.SerializeToString()) - # packet = rpc_pb2.RPC() - # packet.ParseFromString(rpc_message) - - # from_peer_str = str(from_peer) - # for topic in pubsub_message.topicIDs: - # if topic not in self.pubsub.topics: - # continue - # peers = self.pubsub.peer_topics[topic] - # # Deliver to self if self was origin - # # Note: handle_talk checks if self is subscribed to topics in message - # for message in packet.publish: - # decoded_from_id = message.from_id.decode('utf-8') - # if msg_sender == decoded_from_id and msg_sender == str(self.pubsub.host.get_id()): - # id_in_seen_msgs = (message.seqno, message.from_id) - - # if id_in_seen_msgs not in self.pubsub.seen_messages: - # self.pubsub.seen_messages[id_in_seen_msgs] = 1 - - # await self.pubsub.handle_talk(message) - - # # Deliver to self and peers - # for topic in message.topicIDs: - # if topic in self.pubsub.peer_topics: - # for peer_id_in_topic in self.pubsub.peer_topics[topic]: - # # Forward to all known peers in the topic that are not the - # # message sender and are not the message origin - # if peer_id_in_topic not in (msg_sender, decoded_from_id): - # stream = self.pubsub.peers[peer_id_in_topic] - # # Create new packet with just publish message - # new_packet = rpc_pb2.RPC() - # new_packet.publish.extend([message]) - - # # Publish the packet - # await stream.write(new_packet.SerializeToString()) - def _get_peers_to_send( self, - topic_ids: Sequence[str], - from_peer_id: ID, - src_peer_id: ID) -> Generator[ID]: - # TODO: should send to self if `src_peer_id` is ourself? + topic_ids: Iterable[str], + src: ID, + origin: ID) -> Iterable[ID]: + """ + :return: the list of protocols supported by the router + """ for topic in topic_ids: - if topic not in self.pubsub.topics: + if topic not in self.pubsub.peer_topics: continue - for peer_id in self.pubsub.peer_topics[topic]: - if peer_id in (from_peer_id, src_peer_id): + for peer_id_str in self.pubsub.peer_topics[topic]: + peer_id = id_b58_decode(peer_id_str) + if peer_id in (src, origin): continue # FIXME: Should change `self.pubsub.peers` to Dict[PeerID, ...] if str(peer_id) not in self.pubsub.peers: diff --git a/libp2p/pubsub/pubsub.py b/libp2p/pubsub/pubsub.py index af37ee02..a7e542c0 100644 --- a/libp2p/pubsub/pubsub.py +++ b/libp2p/pubsub/pubsub.py @@ -45,7 +45,8 @@ class Pubsub: outgoing_messages: asyncio.Queue() seen_messages: LRU my_topics: Dict[str, asyncio.Queue] - peer_topics: Dict[str, List[ID]] + # FIXME: Should be changed to `Dict[str, List[ID]]` + peer_topics: Dict[str, List[str]] # FIXME: Should be changed to `Dict[ID, INetStream]` peers: Dict[str, INetStream] # NOTE: Be sure it is increased atomically everytime. @@ -320,23 +321,34 @@ class Pubsub: # Write message to stream await stream.write(rpc_msg) - def list_peers(self, topic_id: str) -> Tuple[ID]: + def list_peers(self, topic_id: str) -> Tuple[ID, ...]: return async def publish(self, topic_id: str, data: bytes) -> None: + """ + Publish data to a topic + :param topic_id: topic which we are going to publish the data to + :param data: data which we are publishing + """ msg = rpc_pb2.Message( data=data, topicIDs=[topic_id], + # Origin is myself. from_id=self.host.get_id().to_bytes(), seqno=self._next_seqno(), ) # TODO: Sign with our signing key - self.push_msg(self.host.get_id(), msg) + await self.push_msg(self.host.get_id(), msg) - async def push_msg(self, src: ID, msg: rpc_pb2.Message): - # TODO: - Check if the source is in the blacklist. If yes, reject. + async def push_msg(self, src: ID, msg: rpc_pb2.Message) -> None: + """ + Push a pubsub message to others. + :param src: the peer who forward us the message. + :param msg: the message we are going to push out. + """ + # TODO: - Check if the `source` is in the blacklist. If yes, reject. # TODO: - Check if the `from` is in the blacklist. If yes, reject. @@ -352,6 +364,9 @@ class Pubsub: await self.router.publish(src, msg) def _next_seqno(self) -> bytes: + """ + Make the next message sequence id. + """ self.counter += 1 return self.counter.to_bytes(8, 'big') diff --git a/libp2p/pubsub/pubsub_router_interface.py b/libp2p/pubsub/pubsub_router_interface.py index e581570c..6f69b9b0 100644 --- a/libp2p/pubsub/pubsub_router_interface.py +++ b/libp2p/pubsub/pubsub_router_interface.py @@ -42,11 +42,11 @@ class IPubsubRouter(ABC): """ @abstractmethod - def publish(self, sender_peer_id, rpc_message): + async def publish(self, src, pubsub_msg) -> None: """ Invoked to forward a new message that has been validated - :param sender_peer_id: peer_id of message sender - :param rpc_message: message to forward + :param src: peer_id of message sender + :param pubsub_msg: pubsub message to forward """ @abstractmethod diff --git a/tests/pubsub/test_floodsub.py b/tests/pubsub/test_floodsub.py index 2c67185c..389c217a 100644 --- a/tests/pubsub/test_floodsub.py +++ b/tests/pubsub/test_floodsub.py @@ -8,7 +8,12 @@ from libp2p.peer.peerinfo import info_from_p2p_addr from libp2p.pubsub.pb import rpc_pb2 from libp2p.pubsub.pubsub import Pubsub from libp2p.pubsub.floodsub import FloodSub -from utils import message_id_generator, generate_RPC_packet + +from .utils import ( + make_pubsub_msg, + message_id_generator, + generate_RPC_packet, +) # pylint: disable=too-many-locals @@ -20,6 +25,7 @@ async def connect(node1, node2): info = info_from_p2p_addr(addr) await node1.connect(info) + @pytest.mark.asyncio async def test_simple_two_nodes(): node_a = await new_node(transport_opt=["/ip4/127.0.0.1/tcp/0"]) @@ -29,6 +35,8 @@ async def test_simple_two_nodes(): await node_b.get_network().listen(multiaddr.Multiaddr("/ip4/127.0.0.1/tcp/0")) supported_protocols = ["/floodsub/1.0.0"] + topic = "my_topic" + data = b"some data" floodsub_a = FloodSub(supported_protocols) pubsub_a = Pubsub(node_a, floodsub_a, "a") @@ -38,26 +46,30 @@ async def test_simple_two_nodes(): await connect(node_a, node_b) await asyncio.sleep(0.25) - qb = await pubsub_b.subscribe("my_topic") + sub_b = await pubsub_b.subscribe(topic) await asyncio.sleep(0.25) - node_a_id = str(node_a.get_id()) - next_msg_id_func = message_id_generator(0) - msg = generate_RPC_packet(node_a_id, ["my_topic"], "some data", next_msg_id_func()) - await floodsub_a.publish(node_a_id, msg.SerializeToString()) + msg = make_pubsub_msg( + origin_id=node_a.get_id(), + topic_ids=[topic], + data=data, + seqno=next_msg_id_func(), + ) + await floodsub_a.publish(node_a.get_id(), msg) await asyncio.sleep(0.25) - res_b = await qb.get() + res_b = await sub_b.get() # Check that the msg received by node_b is the same # as the message sent by node_a - assert res_b.SerializeToString() == msg.publish[0].SerializeToString() + assert res_b.SerializeToString() == msg.SerializeToString() # Success, terminate pending tasks. await cleanup() + @pytest.mark.asyncio async def test_lru_cache_two_nodes(): # two nodes with cache_size of 4 @@ -100,7 +112,7 @@ async def test_lru_cache_two_nodes(): messages = [first_message] # for the next 5 messages for i in range(2, 6): - # write first message + # write first message await floodsub_a.publish(node_a_id, first_message.SerializeToString()) await asyncio.sleep(0.25) @@ -127,7 +139,7 @@ async def test_lru_cache_two_nodes(): res_b = await qb.get() assert res_b.SerializeToString() == first_message.publish[0].SerializeToString() assert qb.empty() - + # Success, terminate pending tasks. await cleanup() @@ -136,7 +148,7 @@ async def perform_test_from_obj(obj): """ Perform a floodsub test from a test obj. test obj are composed as follows: - + { "supported_protocols": ["supported/protocol/1.0.0",...], "adj_list": { @@ -190,7 +202,7 @@ async def perform_test_from_obj(obj): if neighbor_id not in node_map: neighbor_node = await new_node(transport_opt=["/ip4/127.0.0.1/tcp/0"]) await neighbor_node.get_network().listen(multiaddr.Multiaddr("/ip4/127.0.0.1/tcp/0")) - + node_map[neighbor_id] = neighbor_node floodsub = FloodSub(supported_protocols) diff --git a/tests/pubsub/utils.py b/tests/pubsub/utils.py index e406536e..24db5cf9 100644 --- a/tests/pubsub/utils.py +++ b/tests/pubsub/utils.py @@ -3,9 +3,14 @@ import multiaddr import uuid import random import struct +from typing import ( + Sequence, +) + from libp2p import new_node from libp2p.pubsub.pb import rpc_pb2 from libp2p.peer.peerinfo import info_from_p2p_addr +from libp2p.peer.id import ID from libp2p.pubsub.pubsub import Pubsub from libp2p.pubsub.gossipsub import GossipSub @@ -29,6 +34,20 @@ def message_id_generator(start_val): return generator + +def make_pubsub_msg( + origin_id: ID, + topic_ids: Sequence[str], + data: bytes, + seqno: bytes) -> rpc_pb2.Message: + return rpc_pb2.Message( + from_id=origin_id.to_bytes(), + seqno=seqno, + data=data, + topicIDs=list(topic_ids), + ) + + def generate_RPC_packet(origin_id, topics, msg_content, msg_id): """ Generate RPC packet to send over wire @@ -42,7 +61,7 @@ def generate_RPC_packet(origin_id, topics, msg_content, msg_id): from_id=origin_id.encode('utf-8'), seqno=msg_id, data=msg_content.encode('utf-8'), - ) + ) for topic in topics: message.topicIDs.extend([topic.encode('utf-8')]) @@ -50,6 +69,7 @@ def generate_RPC_packet(origin_id, topics, msg_content, msg_id): packet.publish.extend([message]) return packet + async def connect(node1, node2): """ Connect node1 to node2 From dadcf8138eb4ff4ba49bd76d875f2a6d14902308 Mon Sep 17 00:00:00 2001 From: mhchia Date: Thu, 25 Jul 2019 16:58:00 +0800 Subject: [PATCH 07/17] Fix the tests according to `pubsub.Publish` And refactored a bit. --- libp2p/pubsub/gossipsub.py | 1 + libp2p/pubsub/pubsub.py | 29 ++++----- tests/pubsub/test_floodsub.py | 118 ++++++++++++++-------------------- 3 files changed, 62 insertions(+), 86 deletions(-) diff --git a/libp2p/pubsub/gossipsub.py b/libp2p/pubsub/gossipsub.py index 31ab606f..8f593a68 100644 --- a/libp2p/pubsub/gossipsub.py +++ b/libp2p/pubsub/gossipsub.py @@ -91,6 +91,7 @@ class GossipSub(IPubsubRouter): :param rpc: rpc message """ control_message = rpc.control + sender_peer_id = str(sender_peer_id) # Relay each rpc control to the appropriate handler if control_message.ihave: diff --git a/libp2p/pubsub/pubsub.py b/libp2p/pubsub/pubsub.py index a7e542c0..560b8c2c 100644 --- a/libp2p/pubsub/pubsub.py +++ b/libp2p/pubsub/pubsub.py @@ -128,26 +128,21 @@ class Pubsub: messages from other nodes :param stream: stream to continously read from """ - - # TODO check on types here - peer_id = str(stream.mplex_conn.peer_id) + peer_id = stream.mplex_conn.peer_id while True: incoming = (await stream.read()) rpc_incoming = rpc_pb2.RPC() rpc_incoming.ParseFromString(incoming) - should_publish = False - if rpc_incoming.publish: # deal with RPC.publish - for message in rpc_incoming.publish: - id_in_seen_msgs = (message.seqno, message.from_id) - if id_in_seen_msgs not in self.seen_messages: - should_publish = True - self.seen_messages[id_in_seen_msgs] = 1 - - await self.handle_talk(message) + for msg in rpc_incoming.publish: + if not self._is_subscribed_to_msg(msg): + continue + # TODO(mhchia): This will block this read_stream loop until all data are pushed. + # Should investigate further if this is an issue. + await self.push_msg(src=peer_id, msg=msg) if rpc_incoming.subscriptions: # deal with RPC.subscriptions @@ -158,10 +153,6 @@ class Pubsub: for message in rpc_incoming.subscriptions: self.handle_subscription(peer_id, message) - if should_publish: - # relay message to peers with router - await self.router.publish(peer_id, incoming) - if rpc_incoming.control: # Pass rpc to router so router could perform custom logic await self.router.handle_rpc(rpc_incoming, peer_id) @@ -228,6 +219,7 @@ class Pubsub: :param origin_id: id of the peer who subscribe to the message :param sub_message: RPC.SubOpts """ + origin_id = str(origin_id) if sub_message.subscribe: if sub_message.topicid not in self.peer_topics: self.peer_topics[sub_message.topicid] = [origin_id] @@ -379,3 +371,8 @@ class Pubsub: # FIXME: Mapping `msg_id` to `1` is quite awkward. Should investigate if there is a # more appropriate way. self.seen_messages[msg_id] = 1 + + def _is_subscribed_to_msg(self, msg: rpc_pb2.Message) -> bool: + if len(self.my_topics) == 0: + return False + return all([topic in self.my_topics for topic in msg.topicIDs]) diff --git a/tests/pubsub/test_floodsub.py b/tests/pubsub/test_floodsub.py index 389c217a..82831b8f 100644 --- a/tests/pubsub/test_floodsub.py +++ b/tests/pubsub/test_floodsub.py @@ -4,17 +4,17 @@ import pytest from tests.utils import cleanup from libp2p import new_node +from libp2p.peer.id import ID from libp2p.peer.peerinfo import info_from_p2p_addr -from libp2p.pubsub.pb import rpc_pb2 from libp2p.pubsub.pubsub import Pubsub from libp2p.pubsub.floodsub import FloodSub from .utils import ( - make_pubsub_msg, message_id_generator, generate_RPC_packet, ) + # pylint: disable=too-many-locals async def connect(node1, node2): @@ -39,106 +39,84 @@ async def test_simple_two_nodes(): data = b"some data" floodsub_a = FloodSub(supported_protocols) - pubsub_a = Pubsub(node_a, floodsub_a, "a") + pubsub_a = Pubsub(node_a, floodsub_a, ID(b"a" * 32)) floodsub_b = FloodSub(supported_protocols) - pubsub_b = Pubsub(node_b, floodsub_b, "b") + pubsub_b = Pubsub(node_b, floodsub_b, ID(b"b" * 32)) await connect(node_a, node_b) - await asyncio.sleep(0.25) + sub_b = await pubsub_b.subscribe(topic) - + # Sleep to let a know of b's subscription await asyncio.sleep(0.25) - next_msg_id_func = message_id_generator(0) - msg = make_pubsub_msg( - origin_id=node_a.get_id(), - topic_ids=[topic], - data=data, - seqno=next_msg_id_func(), - ) - await floodsub_a.publish(node_a.get_id(), msg) - await asyncio.sleep(0.25) + await pubsub_a.publish(topic, data) res_b = await sub_b.get() # Check that the msg received by node_b is the same # as the message sent by node_a - assert res_b.SerializeToString() == msg.SerializeToString() + assert ID(res_b.from_id) == node_a.get_id() + assert res_b.data == data + assert res_b.topicIDs == [topic] # Success, terminate pending tasks. await cleanup() @pytest.mark.asyncio -async def test_lru_cache_two_nodes(): +async def test_lru_cache_two_nodes(monkeypatch): # two nodes with cache_size of 4 - # node_a send the following messages to node_b - # [1, 1, 2, 1, 3, 1, 4, 1, 5, 1] - # node_b should only receive the following - # [1, 2, 3, 4, 5, 1] - node_a = await new_node(transport_opt=["/ip4/127.0.0.1/tcp/0"]) - node_b = await new_node(transport_opt=["/ip4/127.0.0.1/tcp/0"]) + # `node_a` send the following messages to node_b + message_indices = [1, 1, 2, 1, 3, 1, 4, 1, 5, 1] + # `node_b` should only receive the following + expected_received_indices = [1, 2, 3, 4, 5, 1] - await node_a.get_network().listen(multiaddr.Multiaddr("/ip4/127.0.0.1/tcp/0")) - await node_b.get_network().listen(multiaddr.Multiaddr("/ip4/127.0.0.1/tcp/0")) + listen_maddr = multiaddr.Multiaddr("/ip4/127.0.0.1/tcp/0") + node_a = await new_node() + node_b = await new_node() + + await node_a.get_network().listen(listen_maddr) + await node_b.get_network().listen(listen_maddr) supported_protocols = ["/floodsub/1.0.0"] + topic = "my_topic" - # initialize PubSub with a cache_size of 4 + # Mock `get_msg_id` to make us easier to manipulate `msg_id` by `data`. + def get_msg_id(msg): + # Originally it is `(msg.seqno, msg.from_id)` + return (msg.data, msg.from_id) + import libp2p.pubsub.pubsub + monkeypatch.setattr(libp2p.pubsub.pubsub, "get_msg_id", get_msg_id) + + # Initialize Pubsub with a cache_size of 4 + cache_size = 4 floodsub_a = FloodSub(supported_protocols) - pubsub_a = Pubsub(node_a, floodsub_a, "a", 4) + pubsub_a = Pubsub(node_a, floodsub_a, ID(b"a" * 32), cache_size) + floodsub_b = FloodSub(supported_protocols) - pubsub_b = Pubsub(node_b, floodsub_b, "b", 4) + pubsub_b = Pubsub(node_b, floodsub_b, ID(b"b" * 32), cache_size) await connect(node_a, node_b) - - await asyncio.sleep(0.25) - qb = await pubsub_b.subscribe("my_topic") - await asyncio.sleep(0.25) - node_a_id = str(node_a.get_id()) - - # initialize message_id_generator - # store first message - next_msg_id_func = message_id_generator(0) - first_message = generate_RPC_packet(node_a_id, ["my_topic"], "some data 1", next_msg_id_func()) - - await floodsub_a.publish(node_a_id, first_message.SerializeToString()) - await asyncio.sleep(0.25) - print (first_message) - - messages = [first_message] - # for the next 5 messages - for i in range(2, 6): - # write first message - await floodsub_a.publish(node_a_id, first_message.SerializeToString()) - await asyncio.sleep(0.25) - - # generate and write next message - msg = generate_RPC_packet(node_a_id, ["my_topic"], "some data " + str(i), next_msg_id_func()) - messages.append(msg) - - await floodsub_a.publish(node_a_id, msg.SerializeToString()) - await asyncio.sleep(0.25) - - # write first message again - await floodsub_a.publish(node_a_id, first_message.SerializeToString()) + sub_b = await pubsub_b.subscribe(topic) await asyncio.sleep(0.25) - # check the first five messages in queue - # should only see 1 first_message - for i in range(5): - # Check that the msg received by node_b is the same - # as the message sent by node_a - res_b = await qb.get() - assert res_b.SerializeToString() == messages[i].publish[0].SerializeToString() + def _make_testing_data(i: int) -> bytes: + num_int_bytes = 4 + if i >= 2**(num_int_bytes * 8): + raise ValueError("") + return b"data" + i.to_bytes(num_int_bytes, "big") - # the 6th message should be first_message - res_b = await qb.get() - assert res_b.SerializeToString() == first_message.publish[0].SerializeToString() - assert qb.empty() + for index in message_indices: + await pubsub_a.publish(topic, _make_testing_data(index)) + await asyncio.sleep(0.25) + + for index in expected_received_indices: + res_b = await sub_b.get() + assert res_b.data == _make_testing_data(index) + assert sub_b.empty() # Success, terminate pending tasks. await cleanup() From 035d08b8bdc33a11b4f91e07ca6b3aab545749b4 Mon Sep 17 00:00:00 2001 From: mhchia Date: Thu, 25 Jul 2019 23:11:27 +0800 Subject: [PATCH 08/17] Fix `test_floodsub.py` --- libp2p/pubsub/floodsub.py | 7 +- tests/pubsub/__init__.py | 0 tests/pubsub/test_floodsub.py | 317 +++++++++++++++------------------- tests/pubsub/utils.py | 8 - tests/utils.py | 10 ++ 5 files changed, 159 insertions(+), 183 deletions(-) create mode 100644 tests/pubsub/__init__.py diff --git a/libp2p/pubsub/floodsub.py b/libp2p/pubsub/floodsub.py index 03b7330f..0f9ebf46 100644 --- a/libp2p/pubsub/floodsub.py +++ b/libp2p/pubsub/floodsub.py @@ -76,6 +76,8 @@ class FloodSub(IPubsubRouter): ) for peer_id in peers_gen: stream = self.pubsub.peers[str(peer_id)] + # FIXME: We should add a `WriteMsg` similar to write delimited messages. + # Ref: https://github.com/libp2p/go-libp2p-pubsub/blob/master/comm.go#L107 await stream.write(rpc_msg.SerializeToString()) def _get_peers_to_send( @@ -84,7 +86,10 @@ class FloodSub(IPubsubRouter): src: ID, origin: ID) -> Iterable[ID]: """ - :return: the list of protocols supported by the router + Get the eligible peers to send the data to. + :param src: the peer id of the peer who forwards the message to me. + :param origin: the peer id of the peer who originally broadcast the message. + :return: a generator of the peer ids who we send data to. """ for topic in topic_ids: if topic not in self.pubsub.peer_topics: diff --git a/tests/pubsub/__init__.py b/tests/pubsub/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/pubsub/test_floodsub.py b/tests/pubsub/test_floodsub.py index 82831b8f..3b4de2bf 100644 --- a/tests/pubsub/test_floodsub.py +++ b/tests/pubsub/test_floodsub.py @@ -2,13 +2,15 @@ import asyncio import multiaddr import pytest -from tests.utils import cleanup from libp2p import new_node from libp2p.peer.id import ID -from libp2p.peer.peerinfo import info_from_p2p_addr from libp2p.pubsub.pubsub import Pubsub from libp2p.pubsub.floodsub import FloodSub +from tests.utils import ( + cleanup, + connect, +) from .utils import ( message_id_generator, generate_RPC_packet, @@ -16,25 +18,21 @@ from .utils import ( # pylint: disable=too-many-locals +FLOODSUB_PROTOCOL_ID = "/floodsub/1.0.0" +SUPPORTED_PROTOCOLS = [FLOODSUB_PROTOCOL_ID] -async def connect(node1, node2): - """ - Connect node1 to node2 - """ - addr = node2.get_addrs()[0] - info = info_from_p2p_addr(addr) - await node1.connect(info) +LISTEN_MADDR = multiaddr.Multiaddr("/ip4/127.0.0.1/tcp/0") @pytest.mark.asyncio async def test_simple_two_nodes(): - node_a = await new_node(transport_opt=["/ip4/127.0.0.1/tcp/0"]) - node_b = await new_node(transport_opt=["/ip4/127.0.0.1/tcp/0"]) + node_a = await new_node(transport_opt=[str(LISTEN_MADDR)]) + node_b = await new_node(transport_opt=[str(LISTEN_MADDR)]) - await node_a.get_network().listen(multiaddr.Multiaddr("/ip4/127.0.0.1/tcp/0")) - await node_b.get_network().listen(multiaddr.Multiaddr("/ip4/127.0.0.1/tcp/0")) + await node_a.get_network().listen(LISTEN_MADDR) + await node_b.get_network().listen(LISTEN_MADDR) - supported_protocols = ["/floodsub/1.0.0"] + supported_protocols = [FLOODSUB_PROTOCOL_ID] topic = "my_topic" data = b"some data" @@ -72,14 +70,13 @@ async def test_lru_cache_two_nodes(monkeypatch): # `node_b` should only receive the following expected_received_indices = [1, 2, 3, 4, 5, 1] - listen_maddr = multiaddr.Multiaddr("/ip4/127.0.0.1/tcp/0") - node_a = await new_node() - node_b = await new_node() + node_a = await new_node(transport_opt=[str(LISTEN_MADDR)]) + node_b = await new_node(transport_opt=[str(LISTEN_MADDR)]) - await node_a.get_network().listen(listen_maddr) - await node_b.get_network().listen(listen_maddr) + await node_a.get_network().listen(LISTEN_MADDR) + await node_b.get_network().listen(LISTEN_MADDR) - supported_protocols = ["/floodsub/1.0.0"] + supported_protocols = SUPPORTED_PROTOCOLS topic = "my_topic" # Mock `get_msg_id` to make us easier to manipulate `msg_id` by `data`. @@ -106,7 +103,7 @@ async def test_lru_cache_two_nodes(monkeypatch): def _make_testing_data(i: int) -> bytes: num_int_bytes = 4 if i >= 2**(num_int_bytes * 8): - raise ValueError("") + raise ValueError("integer is too large to be serialized") return b"data" + i.to_bytes(num_int_bytes, "big") for index in message_indices: @@ -140,7 +137,7 @@ async def perform_test_from_obj(obj): "messages": [ { "topics": ["topic1_for_message", "topic2_for_message", ...], - "data": "some contents of the message (newlines are not supported)", + "data": b"some contents of the message (newlines are not supported)", "node_id": "message sender node id" }, ... @@ -157,45 +154,34 @@ async def perform_test_from_obj(obj): floodsub_map = {} pubsub_map = {} + async def add_node(node_id: str) -> None: + node = await new_node(transport_opt=[str(LISTEN_MADDR)]) + await node.get_network().listen(LISTEN_MADDR) + node_map[node_id] = node + floodsub = FloodSub(supported_protocols) + floodsub_map[node_id] = floodsub + pubsub = Pubsub(node, floodsub, ID(node_id.encode())) + pubsub_map[node_id] = pubsub + supported_protocols = obj["supported_protocols"] tasks_connect = [] for start_node_id in adj_list: # Create node if node does not yet exist if start_node_id not in node_map: - node = await new_node(transport_opt=["/ip4/127.0.0.1/tcp/0"]) - await node.get_network().listen(multiaddr.Multiaddr("/ip4/127.0.0.1/tcp/0")) - - node_map[start_node_id] = node - - floodsub = FloodSub(supported_protocols) - floodsub_map[start_node_id] = floodsub - pubsub = Pubsub(node, floodsub, start_node_id) - pubsub_map[start_node_id] = pubsub + await add_node(start_node_id) # For each neighbor of start_node, create if does not yet exist, # then connect start_node to neighbor for neighbor_id in adj_list[start_node_id]: # Create neighbor if neighbor does not yet exist if neighbor_id not in node_map: - neighbor_node = await new_node(transport_opt=["/ip4/127.0.0.1/tcp/0"]) - await neighbor_node.get_network().listen(multiaddr.Multiaddr("/ip4/127.0.0.1/tcp/0")) - - node_map[neighbor_id] = neighbor_node - - floodsub = FloodSub(supported_protocols) - floodsub_map[neighbor_id] = floodsub - pubsub = Pubsub(neighbor_node, floodsub, neighbor_id) - pubsub_map[neighbor_id] = pubsub - - # Connect node and neighbor - # await connect(node_map[start_node_id], node_map[neighbor_id]) - tasks_connect.append(asyncio.ensure_future(connect(node_map[start_node_id], node_map[neighbor_id]))) - tasks_connect.append(asyncio.sleep(2)) - await asyncio.gather(*tasks_connect) - - # Allow time for graph creation before continuing - # await asyncio.sleep(0.25) + await add_node(neighbor_id) + tasks_connect.append( + connect(node_map[start_node_id], node_map[neighbor_id]) + ) + # Connect nodes and wait at least for 2 seconds + await asyncio.gather(*tasks_connect, asyncio.sleep(2)) # Step 2) Subscribe to topics queues_map = {} @@ -203,89 +189,70 @@ async def perform_test_from_obj(obj): tasks_topic = [] tasks_topic_data = [] - for topic in topic_map: - for node_id in topic_map[topic]: - """ - # Subscribe node to topic - q = await pubsub_map[node_id].subscribe(topic) - - # Create topic-queue map for node_id if one does not yet exist - if node_id not in queues_map: - queues_map[node_id] = {} - - # Store queue in topic-queue map for node - queues_map[node_id][topic] = q - """ - tasks_topic.append(asyncio.ensure_future(pubsub_map[node_id].subscribe(topic))) + for topic, node_ids in topic_map.items(): + for node_id in node_ids: + tasks_topic.append(pubsub_map[node_id].subscribe(topic)) tasks_topic_data.append((node_id, topic)) tasks_topic.append(asyncio.sleep(2)) # Gather is like Promise.all responses = await asyncio.gather(*tasks_topic, return_exceptions=True) for i in range(len(responses) - 1): - q = responses[i] node_id, topic = tasks_topic_data[i] if node_id not in queues_map: queues_map[node_id] = {} - # Store queue in topic-queue map for node - queues_map[node_id][topic] = q + queues_map[node_id][topic] = responses[i] # Allow time for subscribing before continuing - # await asyncio.sleep(0.01) + await asyncio.sleep(0.01) # Step 3) Publish messages topics_in_msgs_ordered = [] messages = obj["messages"] tasks_publish = [] - next_msg_id_func = message_id_generator(0) for msg in messages: topics = msg["topics"] - data = msg["data"] node_id = msg["node_id"] - # Get actual id for sender node (not the id from the test obj) - actual_node_id = str(node_map[node_id].get_id()) - - # Create correctly formatted message - msg_talk = generate_RPC_packet(actual_node_id, topics, data, next_msg_id_func()) - # Publish message - # await floodsub_map[node_id].publish(actual_node_id, msg_talk.to_str()) - tasks_publish.append(asyncio.ensure_future(floodsub_map[node_id].publish(\ - actual_node_id, msg_talk.SerializeToString()))) + # FIXME: Should be single RPC package with several topics + for topic in topics: + tasks_publish.append( + pubsub_map[node_id].publish( + topic, + data, + ) + ) # For each topic in topics, add topic, msg_talk tuple to ordered test list # TODO: Update message sender to be correct message sender before # adding msg_talk to this list for topic in topics: - topics_in_msgs_ordered.append((topic, msg_talk)) + topics_in_msgs_ordered.append((topic, data)) # Allow time for publishing before continuing - # await asyncio.sleep(0.4) - tasks_publish.append(asyncio.sleep(2)) - await asyncio.gather(*tasks_publish) + await asyncio.gather(*tasks_publish, asyncio.sleep(2)) # Step 4) Check that all messages were received correctly. # TODO: Check message sender too - for i in range(len(topics_in_msgs_ordered)): - topic, actual_msg = topics_in_msgs_ordered[i] - + for topic, data in topics_in_msgs_ordered: # Look at each node in each topic for node_id in topic_map[topic]: # Get message from subscription queue - msg_on_node_str = await queues_map[node_id][topic].get() - assert actual_msg.publish[0].SerializeToString() == msg_on_node_str.SerializeToString() + msg = await queues_map[node_id][topic].get() + assert data == msg.data # Success, terminate pending tasks. await cleanup() + @pytest.mark.asyncio async def test_simple_two_nodes_test_obj(): test_obj = { - "supported_protocols": ["/floodsub/1.0.0"], + "supported_protocols": SUPPORTED_PROTOCOLS, "adj_list": { "A": ["B"] }, @@ -295,7 +262,7 @@ async def test_simple_two_nodes_test_obj(): "messages": [ { "topics": ["topic1"], - "data": "foo", + "data": b"foo", "node_id": "A" } ] @@ -305,25 +272,25 @@ async def test_simple_two_nodes_test_obj(): @pytest.mark.asyncio async def test_three_nodes_two_topics_test_obj(): test_obj = { - "supported_protocols": ["/floodsub/1.0.0"], + "supported_protocols": SUPPORTED_PROTOCOLS, "adj_list": { "A": ["B"], - "B": ["C"] + "B": ["C"], }, "topic_map": { "topic1": ["B", "C"], - "topic2": ["B", "C"] + "topic2": ["B", "C"], }, "messages": [ { "topics": ["topic1"], - "data": "foo", - "node_id": "A" + "data": b"foo", + "node_id": "A", }, { "topics": ["topic2"], - "data": "Alex is tall", - "node_id": "A" + "data": b"Alex is tall", + "node_id": "A", } ] } @@ -332,18 +299,18 @@ async def test_three_nodes_two_topics_test_obj(): @pytest.mark.asyncio async def test_two_nodes_one_topic_single_subscriber_is_sender_test_obj(): test_obj = { - "supported_protocols": ["/floodsub/1.0.0"], + "supported_protocols": SUPPORTED_PROTOCOLS, "adj_list": { - "A": ["B"] + "A": ["B"], }, "topic_map": { - "topic1": ["B"] + "topic1": ["B"], }, "messages": [ { "topics": ["topic1"], - "data": "Alex is tall", - "node_id": "B" + "data": b"Alex is tall", + "node_id": "B", } ] } @@ -352,23 +319,23 @@ async def test_two_nodes_one_topic_single_subscriber_is_sender_test_obj(): @pytest.mark.asyncio async def test_two_nodes_one_topic_two_msgs_test_obj(): test_obj = { - "supported_protocols": ["/floodsub/1.0.0"], + "supported_protocols": SUPPORTED_PROTOCOLS, "adj_list": { - "A": ["B"] + "A": ["B"], }, "topic_map": { - "topic1": ["B"] + "topic1": ["B"], }, "messages": [ { "topics": ["topic1"], - "data": "Alex is tall", - "node_id": "B" + "data": b"Alex is tall", + "node_id": "B", }, { "topics": ["topic1"], - "data": "foo", - "node_id": "A" + "data": b"foo", + "node_id": "A", } ] } @@ -377,20 +344,20 @@ async def test_two_nodes_one_topic_two_msgs_test_obj(): @pytest.mark.asyncio async def test_seven_nodes_tree_one_topics_test_obj(): test_obj = { - "supported_protocols": ["/floodsub/1.0.0"], + "supported_protocols": SUPPORTED_PROTOCOLS, "adj_list": { "1": ["2", "3"], "2": ["4", "5"], - "3": ["6", "7"] + "3": ["6", "7"], }, "topic_map": { - "astrophysics": ["2", "3", "4", "5", "6", "7"] + "astrophysics": ["2", "3", "4", "5", "6", "7"], }, "messages": [ { "topics": ["astrophysics"], - "data": "e=mc^2", - "node_id": "1" + "data": b"e=mc^2", + "node_id": "1", } ] } @@ -399,32 +366,32 @@ async def test_seven_nodes_tree_one_topics_test_obj(): @pytest.mark.asyncio async def test_seven_nodes_tree_three_topics_test_obj(): test_obj = { - "supported_protocols": ["/floodsub/1.0.0"], + "supported_protocols": SUPPORTED_PROTOCOLS, "adj_list": { "1": ["2", "3"], "2": ["4", "5"], - "3": ["6", "7"] + "3": ["6", "7"], }, "topic_map": { "astrophysics": ["2", "3", "4", "5", "6", "7"], "space": ["2", "3", "4", "5", "6", "7"], - "onions": ["2", "3", "4", "5", "6", "7"] + "onions": ["2", "3", "4", "5", "6", "7"], }, "messages": [ { "topics": ["astrophysics"], - "data": "e=mc^2", - "node_id": "1" + "data": b"e=mc^2", + "node_id": "1", }, { "topics": ["space"], - "data": "foobar", - "node_id": "1" + "data": b"foobar", + "node_id": "1", }, { "topics": ["onions"], - "data": "I am allergic", - "node_id": "1" + "data": b"I am allergic", + "node_id": "1", } ] } @@ -433,32 +400,32 @@ async def test_seven_nodes_tree_three_topics_test_obj(): @pytest.mark.asyncio async def test_seven_nodes_tree_three_topics_diff_origin_test_obj(): test_obj = { - "supported_protocols": ["/floodsub/1.0.0"], + "supported_protocols": SUPPORTED_PROTOCOLS, "adj_list": { "1": ["2", "3"], "2": ["4", "5"], - "3": ["6", "7"] + "3": ["6", "7"], }, "topic_map": { "astrophysics": ["1", "2", "3", "4", "5", "6", "7"], "space": ["1", "2", "3", "4", "5", "6", "7"], - "onions": ["1", "2", "3", "4", "5", "6", "7"] + "onions": ["1", "2", "3", "4", "5", "6", "7"], }, "messages": [ { "topics": ["astrophysics"], - "data": "e=mc^2", - "node_id": "1" + "data": b"e=mc^2", + "node_id": "1", }, { "topics": ["space"], - "data": "foobar", - "node_id": "4" + "data": b"foobar", + "node_id": "4", }, { "topics": ["onions"], - "data": "I am allergic", - "node_id": "7" + "data": b"I am allergic", + "node_id": "7", } ] } @@ -467,139 +434,141 @@ async def test_seven_nodes_tree_three_topics_diff_origin_test_obj(): @pytest.mark.asyncio async def test_three_nodes_clique_two_topic_diff_origin_test_obj(): test_obj = { - "supported_protocols": ["/floodsub/1.0.0"], + "supported_protocols": SUPPORTED_PROTOCOLS, "adj_list": { "1": ["2", "3"], - "2": ["3"] + "2": ["3"], }, "topic_map": { "astrophysics": ["1", "2", "3"], - "school": ["1", "2", "3"] + "school": ["1", "2", "3"], }, "messages": [ { "topics": ["astrophysics"], - "data": "e=mc^2", - "node_id": "1" + "data": b"e=mc^2", + "node_id": "1", }, { "topics": ["school"], - "data": "foobar", - "node_id": "2" + "data": b"foobar", + "node_id": "2", }, { "topics": ["astrophysics"], - "data": "I am allergic", - "node_id": "1" + "data": b"I am allergic", + "node_id": "1", } ] } await perform_test_from_obj(test_obj) + @pytest.mark.asyncio async def test_four_nodes_clique_two_topic_diff_origin_many_msgs_test_obj(): test_obj = { - "supported_protocols": ["/floodsub/1.0.0"], + "supported_protocols": SUPPORTED_PROTOCOLS, "adj_list": { "1": ["2", "3", "4"], "2": ["1", "3", "4"], "3": ["1", "2", "4"], - "4": ["1", "2", "3"] + "4": ["1", "2", "3"], }, "topic_map": { "astrophysics": ["1", "2", "3", "4"], - "school": ["1", "2", "3", "4"] + "school": ["1", "2", "3", "4"], }, "messages": [ { "topics": ["astrophysics"], - "data": "e=mc^2", - "node_id": "1" + "data": b"e=mc^2", + "node_id": "1", }, { "topics": ["school"], - "data": "foobar", - "node_id": "2" + "data": b"foobar", + "node_id": "2", }, { "topics": ["astrophysics"], - "data": "I am allergic", - "node_id": "1" + "data": b"I am allergic", + "node_id": "1", }, { "topics": ["school"], - "data": "foobar2", - "node_id": "2" + "data": b"foobar2", + "node_id": "2", }, { "topics": ["astrophysics"], - "data": "I am allergic2", - "node_id": "1" + "data": b"I am allergic2", + "node_id": "1", }, { "topics": ["school"], - "data": "foobar3", - "node_id": "2" + "data": b"foobar3", + "node_id": "2", }, { "topics": ["astrophysics"], - "data": "I am allergic3", - "node_id": "1" + "data": b"I am allergic3", + "node_id": "1", } ] } await perform_test_from_obj(test_obj) + @pytest.mark.asyncio async def test_five_nodes_ring_two_topic_diff_origin_many_msgs_test_obj(): test_obj = { - "supported_protocols": ["/floodsub/1.0.0"], + "supported_protocols": SUPPORTED_PROTOCOLS, "adj_list": { "1": ["2"], "2": ["3"], "3": ["4"], "4": ["5"], - "5": ["1"] + "5": ["1"], }, "topic_map": { "astrophysics": ["1", "2", "3", "4", "5"], - "school": ["1", "2", "3", "4", "5"] + "school": ["1", "2", "3", "4", "5"], }, "messages": [ { "topics": ["astrophysics"], - "data": "e=mc^2", - "node_id": "1" + "data": b"e=mc^2", + "node_id": "1", }, { "topics": ["school"], - "data": "foobar", - "node_id": "2" + "data": b"foobar", + "node_id": "2", }, { "topics": ["astrophysics"], - "data": "I am allergic", - "node_id": "1" + "data": b"I am allergic", + "node_id": "1", }, { "topics": ["school"], - "data": "foobar2", - "node_id": "2" + "data": b"foobar2", + "node_id": "2", }, { "topics": ["astrophysics"], - "data": "I am allergic2", - "node_id": "1" + "data": b"I am allergic2", + "node_id": "1", }, { "topics": ["school"], - "data": "foobar3", - "node_id": "2" + "data": b"foobar3", + "node_id": "2", }, { "topics": ["astrophysics"], - "data": "I am allergic3", - "node_id": "1" + "data": b"I am allergic3", + "node_id": "1", } ] } diff --git a/tests/pubsub/utils.py b/tests/pubsub/utils.py index 24db5cf9..72d796f9 100644 --- a/tests/pubsub/utils.py +++ b/tests/pubsub/utils.py @@ -70,14 +70,6 @@ def generate_RPC_packet(origin_id, topics, msg_content, msg_id): return packet -async def connect(node1, node2): - """ - Connect node1 to node2 - """ - addr = node2.get_addrs()[0] - info = info_from_p2p_addr(addr) - await node1.connect(info) - async def create_libp2p_hosts(num_hosts): """ Create libp2p hosts diff --git a/tests/utils.py b/tests/utils.py index 4efde83c..686d0869 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -3,6 +3,16 @@ import asyncio import multiaddr from libp2p import new_node +from libp2p.peer.peerinfo import info_from_p2p_addr + + +async def connect(node1, node2): + """ + Connect node1 to node2 + """ + addr = node2.get_addrs()[0] + info = info_from_p2p_addr(addr) + await node1.connect(info) async def cleanup(): From 65aedcb25ad505b6b3c9fbb34bff7ef6f9946cd4 Mon Sep 17 00:00:00 2001 From: mhchia Date: Fri, 26 Jul 2019 18:35:25 +0800 Subject: [PATCH 09/17] Fix several tests --- libp2p/pubsub/floodsub.py | 30 ++-- libp2p/pubsub/gossipsub.py | 145 ++++++++++------- tests/pubsub/dummy_account_node.py | 50 +++--- tests/pubsub/test_dummyaccount_demo.py | 25 +-- tests/pubsub/test_gossipsub.py | 147 ++++++++---------- .../test_gossipsub_backward_compatibility.py | 52 +++---- tests/pubsub/utils.py | 9 ++ 7 files changed, 245 insertions(+), 213 deletions(-) diff --git a/libp2p/pubsub/floodsub.py b/libp2p/pubsub/floodsub.py index 0f9ebf46..40aafc5b 100644 --- a/libp2p/pubsub/floodsub.py +++ b/libp2p/pubsub/floodsub.py @@ -80,6 +80,21 @@ class FloodSub(IPubsubRouter): # Ref: https://github.com/libp2p/go-libp2p-pubsub/blob/master/comm.go#L107 await stream.write(rpc_msg.SerializeToString()) + async def join(self, topic): + """ + Join notifies the router that we want to receive and + forward messages in a topic. It is invoked after the + subscription announcement + :param topic: topic to join + """ + + async def leave(self, topic): + """ + Leave notifies the router that we are no longer interested in a topic. + It is invoked after the unsubscription announcement. + :param topic: topic to leave + """ + def _get_peers_to_send( self, topic_ids: Iterable[str], @@ -102,18 +117,3 @@ class FloodSub(IPubsubRouter): if str(peer_id) not in self.pubsub.peers: continue yield peer_id - - async def join(self, topic): - """ - Join notifies the router that we want to receive and - forward messages in a topic. It is invoked after the - subscription announcement - :param topic: topic to join - """ - - async def leave(self, topic): - """ - Leave notifies the router that we are no longer interested in a topic. - It is invoked after the unsubscription announcement. - :param topic: topic to leave - """ diff --git a/libp2p/pubsub/gossipsub.py b/libp2p/pubsub/gossipsub.py index 8f593a68..8051a3b9 100644 --- a/libp2p/pubsub/gossipsub.py +++ b/libp2p/pubsub/gossipsub.py @@ -1,10 +1,22 @@ -import random import asyncio +import random +from typing import ( + Iterable, + List, + MutableSet, + Sequence, +) from ast import literal_eval + +from libp2p.peer.id import ( + ID, + id_b58_decode, +) + +from .mcache import MessageCache from .pb import rpc_pb2 from .pubsub_router_interface import IPubsubRouter -from .mcache import MessageCache class GossipSub(IPubsubRouter): @@ -107,70 +119,73 @@ class GossipSub(IPubsubRouter): for prune in control_message.prune: await self.handle_prune(prune, sender_peer_id) - async def publish(self, sender_peer_id, rpc_message): + async def publish(self, src: ID, pubsub_msg: rpc_pb2.Message) -> None: # pylint: disable=too-many-locals """ Invoked to forward a new message that has been validated. """ + self.mcache.put(pubsub_msg) - packet = rpc_pb2.RPC() - packet.ParseFromString(rpc_message) - msg_sender = str(sender_peer_id) + peers_gen = self._get_peers_to_send( + pubsub_msg.topicIDs, + src=src, + origin=ID(pubsub_msg.from_id), + ) + rpc_msg = rpc_pb2.RPC( + publish=[pubsub_msg], + ) + for peer_id in peers_gen: + stream = self.pubsub.peers[str(peer_id)] + # FIXME: We should add a `WriteMsg` similar to write delimited messages. + # Ref: https://github.com/libp2p/go-libp2p-pubsub/blob/master/comm.go#L107 + # TODO: Go use `sendRPC`, which possibly piggybacks gossip/control messages. + await stream.write(rpc_msg.SerializeToString()) - # Deliver to self if self was origin - # Note: handle_talk checks if self is subscribed to topics in message - for message in packet.publish: - # Add RPC message to cache - self.mcache.put(message) + def _get_peers_to_send( + self, + topic_ids: Iterable[str], + src: ID, + origin: ID) -> Iterable[ID]: + """ + Get the eligible peers to send the data to. + :param src: the peer id of the peer who forwards the message to me. + :param origin: the peer id of the peer who originally broadcast the message. + :return: a generator of the peer ids who we send data to. + """ + to_send: MutableSet[ID] = set() + for topic in topic_ids: + if topic not in self.pubsub.peer_topics: + continue - decoded_from_id = message.from_id.decode('utf-8') - new_packet = rpc_pb2.RPC() - new_packet.publish.extend([message]) - new_packet_serialized = new_packet.SerializeToString() + # floodsub peers + for peer_id_str in self.pubsub.peer_topics[topic]: + peer_id = id_b58_decode(peer_id_str) + # FIXME: `gossipsub.peers_floodsub` can be changed to `gossipsub.peers` in go. + # This will improve the efficiency when searching for a peer's protocol id. + if peer_id_str in self.peers_floodsub: + to_send.add(peer_id) - # Deliver to self if needed - if msg_sender == decoded_from_id and msg_sender == str(self.pubsub.host.get_id()): - id_in_seen_msgs = (message.seqno, message.from_id) + # gossipsub peers + # FIXME: Change `str` to `ID` + gossipsub_peers: List[str] = None + # TODO: Do we need to check `topic in self.pubsub.my_topics`? + if topic in self.mesh: + gossipsub_peers = self.mesh[topic] + else: + # TODO(robzajac): Is topic DEFINITELY supposed to be in fanout if we are not + # subscribed? + # I assume there could be short periods between heartbeats where topic may not + # be but we should check that this path gets hit appropriately - if id_in_seen_msgs not in self.pubsub.seen_messages: - self.pubsub.seen_messages[id_in_seen_msgs] = 1 - - await self.pubsub.handle_talk(message) - - # Deliver to peers - for topic in message.topicIDs: - # If topic has floodsub peers, deliver to floodsub peers - # TODO: This can be done more efficiently. Do it more efficiently. - floodsub_peers_in_topic = [] - if topic in self.pubsub.peer_topics: - for peer in self.pubsub.peer_topics[topic]: - if str(peer) in self.peers_floodsub: - floodsub_peers_in_topic.append(peer) - - await self.deliver_messages_to_peers(floodsub_peers_in_topic, msg_sender, - decoded_from_id, new_packet_serialized) - - # If you are subscribed to topic, send to mesh, otherwise send to fanout - if topic in self.pubsub.my_topics and topic in self.mesh: - await self.deliver_messages_to_peers(self.mesh[topic], msg_sender, - decoded_from_id, new_packet_serialized) - else: - # Send to fanout peers - if topic not in self.fanout: - # If no peers in fanout, choose some peers from gossipsub peers in topic - gossipsub_peers_in_topic = [peer for peer in self.pubsub.peer_topics[topic] - if peer in self.peers_gossipsub] - - selected = \ - GossipSub.select_from_minus(self.degree, gossipsub_peers_in_topic, []) - self.fanout[topic] = selected - - # TODO: Is topic DEFINITELY supposed to be in fanout if we are not subscribed? - # I assume there could be short periods between heartbeats where topic may not - # be but we should check that this path gets hit appropriately - - await self.deliver_messages_to_peers(self.fanout[topic], msg_sender, - decoded_from_id, new_packet_serialized) + # pylint: disable=len-as-condition + if (topic not in self.fanout) or (len(self.fanout[topic]) == 0): + # If no peers in fanout, choose some peers from gossipsub peers in topic. + self.fanout[topic] = self._get_peers_from_minus(topic, self.degree, []) + gossipsub_peers = self.fanout[topic] + for peer_id_str in gossipsub_peers: + to_send.add(id_b58_decode(peer_id_str)) + # Excludes `src` and `origin` + yield from to_send.difference([src, origin]) async def join(self, topic): # Note: the comments here are the near-exact algorithm description from the spec @@ -401,6 +416,22 @@ class GossipSub(IPubsubRouter): return selection + def _get_peers_from_minus( + self, + topic: str, + num_to_select: int, + minus: Sequence[ID]) -> List[ID]: + gossipsub_peers_in_topic = [ + peer_str + for peer_str in self.pubsub.peer_topics[topic] + if peer_str in self.peers_gossipsub + ] + return self.select_from_minus( + num_to_select, + gossipsub_peers_in_topic, + list(minus), + ) + # RPC handlers async def handle_ihave(self, ihave_msg, sender_peer_id): diff --git a/tests/pubsub/dummy_account_node.py b/tests/pubsub/dummy_account_node.py index a6409971..84460283 100644 --- a/tests/pubsub/dummy_account_node.py +++ b/tests/pubsub/dummy_account_node.py @@ -2,11 +2,14 @@ import asyncio import multiaddr import uuid -from utils import message_id_generator, generate_RPC_packet from libp2p import new_node +from libp2p.host.host_interface import IHost from libp2p.pubsub.pubsub import Pubsub from libp2p.pubsub.floodsub import FloodSub +from .utils import message_id_generator, generate_RPC_packet + + SUPPORTED_PUBSUB_PROTOCOLS = ["/floodsub/1.0.0"] CRYPTO_TOPIC = "ethereum" @@ -17,14 +20,25 @@ CRYPTO_TOPIC = "ethereum" # Ex. set,rob,5 # Determine message type by looking at first item before first comma -class DummyAccountNode(): + +class DummyAccountNode: """ - Node which has an internal balance mapping, meant to serve as + Node which has an internal balance mapping, meant to serve as a dummy crypto blockchain. There is no actual blockchain, just a simple map indicating how much crypto each user in the mappings holds """ + libp2p_node: IHost + pubsub: Pubsub + floodsub: FloodSub - def __init__(self): + def __init__( + self, + libp2p_node: IHost, + pubsub: Pubsub, + floodsub: FloodSub): + self.libp2p_node = libp2p_node + self.pubsub = pubsub + self.floodsub = floodsub self.balances = {} self.next_msg_id_func = message_id_generator(0) self.node_id = str(uuid.uuid1()) @@ -38,16 +52,21 @@ class DummyAccountNode(): We use create as this serves as a factory function and allows us to use async await, unlike the init function """ - self = DummyAccountNode() libp2p_node = await new_node(transport_opt=["/ip4/127.0.0.1/tcp/0"]) await libp2p_node.get_network().listen(multiaddr.Multiaddr("/ip4/127.0.0.1/tcp/0")) - self.libp2p_node = libp2p_node - - self.floodsub = FloodSub(SUPPORTED_PUBSUB_PROTOCOLS) - self.pubsub = Pubsub(self.libp2p_node, self.floodsub, "a") - return self + floodsub = FloodSub(SUPPORTED_PUBSUB_PROTOCOLS) + pubsub = Pubsub( + libp2p_node, + floodsub, + "a", + ) + return cls( + libp2p_node=libp2p_node, + pubsub=pubsub, + floodsub=floodsub, + ) async def handle_incoming_msgs(self): """ @@ -78,10 +97,8 @@ class DummyAccountNode(): :param dest_user: user to send crypto to :param amount: amount of crypto to send """ - my_id = str(self.libp2p_node.get_id()) msg_contents = "send," + source_user + "," + dest_user + "," + str(amount) - packet = generate_RPC_packet(my_id, [CRYPTO_TOPIC], msg_contents, self.next_msg_id_func()) - await self.floodsub.publish(my_id, packet.SerializeToString()) + await self.pubsub.publish(CRYPTO_TOPIC, msg_contents.encode()) async def publish_set_crypto(self, user, amount): """ @@ -89,18 +106,15 @@ class DummyAccountNode(): :param user: user to set crypto for :param amount: amount of crypto """ - my_id = str(self.libp2p_node.get_id()) msg_contents = "set," + user + "," + str(amount) - packet = generate_RPC_packet(my_id, [CRYPTO_TOPIC], msg_contents, self.next_msg_id_func()) - - await self.floodsub.publish(my_id, packet.SerializeToString()) + await self.pubsub.publish(CRYPTO_TOPIC, msg_contents.encode()) def handle_send_crypto(self, source_user, dest_user, amount): """ Handle incoming send_crypto message :param source_user: user to send crypto from :param dest_user: user to send crypto to - :param amount: amount of crypto to send + :param amount: amount of crypto to send """ if source_user in self.balances: self.balances[source_user] -= amount diff --git a/tests/pubsub/test_dummyaccount_demo.py b/tests/pubsub/test_dummyaccount_demo.py index 9fa2aa7b..b1c4a64a 100644 --- a/tests/pubsub/test_dummyaccount_demo.py +++ b/tests/pubsub/test_dummyaccount_demo.py @@ -1,35 +1,37 @@ import asyncio +from threading import Thread + import multiaddr + import pytest -from threading import Thread -from tests.utils import cleanup from libp2p import new_node from libp2p.peer.peerinfo import info_from_p2p_addr from libp2p.pubsub.pubsub import Pubsub from libp2p.pubsub.floodsub import FloodSub -from dummy_account_node import DummyAccountNode + +from tests.utils import ( + cleanup, + connect, +) +from .dummy_account_node import DummyAccountNode # pylint: disable=too-many-locals -async def connect(node1, node2): - # node1 connects to node2 - addr = node2.get_addrs()[0] - info = info_from_p2p_addr(addr) - await node1.connect(info) def create_setup_in_new_thread_func(dummy_node): def setup_in_new_thread(): asyncio.ensure_future(dummy_node.setup_crypto_networking()) return setup_in_new_thread + async def perform_test(num_nodes, adjacency_map, action_func, assertion_func): """ Helper function to allow for easy construction of custom tests for dummy account nodes in various network topologies :param num_nodes: number of nodes in the test :param adjacency_map: adjacency map defining each node and its list of neighbors - :param action_func: function to execute that includes actions by the nodes, + :param action_func: function to execute that includes actions by the nodes, such as send crypto and set crypto :param assertion_func: assertions for testing the results of the actions are correct """ @@ -73,6 +75,7 @@ async def perform_test(num_nodes, adjacency_map, action_func, assertion_func): # Success, terminate pending tasks. await cleanup() + @pytest.mark.asyncio async def test_simple_two_nodes(): num_nodes = 2 @@ -86,6 +89,7 @@ async def test_simple_two_nodes(): await perform_test(num_nodes, adj_map, action_func, assertion_func) + @pytest.mark.asyncio async def test_simple_three_nodes_line_topography(): num_nodes = 3 @@ -99,6 +103,7 @@ async def test_simple_three_nodes_line_topography(): await perform_test(num_nodes, adj_map, action_func, assertion_func) + @pytest.mark.asyncio async def test_simple_three_nodes_triangle_topography(): num_nodes = 3 @@ -112,6 +117,7 @@ async def test_simple_three_nodes_triangle_topography(): await perform_test(num_nodes, adj_map, action_func, assertion_func) + @pytest.mark.asyncio async def test_simple_seven_nodes_tree_topography(): num_nodes = 7 @@ -125,6 +131,7 @@ async def test_simple_seven_nodes_tree_topography(): await perform_test(num_nodes, adj_map, action_func, assertion_func) + @pytest.mark.asyncio async def test_set_then_send_from_root_seven_nodes_tree_topography(): num_nodes = 7 diff --git a/tests/pubsub/test_gossipsub.py b/tests/pubsub/test_gossipsub.py index bb47135f..7e2c7575 100644 --- a/tests/pubsub/test_gossipsub.py +++ b/tests/pubsub/test_gossipsub.py @@ -1,11 +1,16 @@ import asyncio -import pytest import random -from utils import message_id_generator, generate_RPC_packet, \ +import pytest + +from tests.utils import ( + cleanup, + connect, +) + +from .utils import message_id_generator, generate_RPC_packet, \ create_libp2p_hosts, create_pubsub_and_gossipsub_instances, sparse_connect, dense_connect, \ - connect, one_to_all_connect -from tests.utils import cleanup + one_to_all_connect SUPPORTED_PROTOCOLS = ["/gossipsub/1.0.0"] @@ -41,13 +46,8 @@ async def test_join(): # Central node publish to the topic so that this topic # is added to central node's fanout - next_msg_id_func = message_id_generator(0) - msg_content = "" - host_id = str(libp2p_hosts[central_node_index].get_id()) - # Generate message packet - packet = generate_RPC_packet(host_id, [topic], msg_content, next_msg_id_func()) # publish from the randomly chosen host - await gossipsubs[central_node_index].publish(host_id, packet.SerializeToString()) + await pubsubs[central_node_index].publish(topic, b"") # Check that the gossipsub of central node has fanout for the topic assert topic in gossipsubs[central_node_index].fanout @@ -86,6 +86,8 @@ async def test_leave(): gossipsub = gossipsubs[0] topic = "test_leave" + assert topic not in gossipsub.mesh + await gossipsub.join(topic) assert topic in gossipsub.mesh @@ -205,14 +207,12 @@ async def test_handle_prune(): @pytest.mark.asyncio async def test_dense(): # Create libp2p hosts - next_msg_id_func = message_id_generator(0) - num_hosts = 10 num_msgs = 5 libp2p_hosts = await create_libp2p_hosts(num_hosts) # Create pubsub, gossipsub instances - pubsubs, gossipsubs = create_pubsub_and_gossipsub_instances(libp2p_hosts, \ + pubsubs, _ = create_pubsub_and_gossipsub_instances(libp2p_hosts, \ SUPPORTED_PROTOCOLS, \ 10, 9, 11, 30, 3, 5, 0.5) @@ -231,41 +231,35 @@ async def test_dense(): await asyncio.sleep(2) for i in range(num_msgs): - msg_content = "foo " + str(i) + msg_content = b"foo " + i.to_bytes(1, 'big') # randomly pick a message origin origin_idx = random.randint(0, num_hosts - 1) - origin_host = libp2p_hosts[origin_idx] - host_id = str(origin_host.get_id()) - - # Generate message packet - packet = generate_RPC_packet(host_id, ["foobar"], msg_content, next_msg_id_func()) # publish from the randomly chosen host - await gossipsubs[origin_idx].publish(host_id, packet.SerializeToString()) + await pubsubs[origin_idx].publish("foobar", msg_content) await asyncio.sleep(0.5) # Assert that all blocking queues receive the message for queue in queues: msg = await queue.get() - assert msg.data == packet.publish[0].data + assert msg.data == msg_content await cleanup() + @pytest.mark.asyncio async def test_fanout(): # Create libp2p hosts - next_msg_id_func = message_id_generator(0) - num_hosts = 10 num_msgs = 5 libp2p_hosts = await create_libp2p_hosts(num_hosts) # Create pubsub, gossipsub instances - pubsubs, gossipsubs = create_pubsub_and_gossipsub_instances(libp2p_hosts, \ + pubsubs, _ = create_pubsub_and_gossipsub_instances(libp2p_hosts, \ SUPPORTED_PROTOCOLS, \ 10, 9, 11, 30, 3, 5, 0.5) - # All pubsub subscribe to foobar + # All pubsub subscribe to foobar except for `pubsubs[0]` queues = [] for i in range(1, len(pubsubs)): q = await pubsubs[i].subscribe("foobar") @@ -279,71 +273,61 @@ async def test_fanout(): # Wait 2 seconds for heartbeat to allow mesh to connect await asyncio.sleep(2) + topic = "foobar" # Send messages with origin not subscribed for i in range(num_msgs): - msg_content = "foo " + str(i) + msg_content = b"foo " + i.to_bytes(1, "big") # Pick the message origin to the node that is not subscribed to 'foobar' origin_idx = 0 - origin_host = libp2p_hosts[origin_idx] - host_id = str(origin_host.get_id()) - - # Generate message packet - packet = generate_RPC_packet(host_id, ["foobar"], msg_content, next_msg_id_func()) # publish from the randomly chosen host - await gossipsubs[origin_idx].publish(host_id, packet.SerializeToString()) + await pubsubs[origin_idx].publish(topic, msg_content) await asyncio.sleep(0.5) # Assert that all blocking queues receive the message for queue in queues: msg = await queue.get() - assert msg.SerializeToString() == packet.publish[0].SerializeToString() + assert msg.data == msg_content # Subscribe message origin - queues.append(await pubsubs[0].subscribe("foobar")) + queues.insert(0, await pubsubs[0].subscribe(topic)) # Send messages again for i in range(num_msgs): - msg_content = "foo " + str(i) + msg_content = b"bar " + i.to_bytes(1, 'big') # Pick the message origin to the node that is not subscribed to 'foobar' origin_idx = 0 - origin_host = libp2p_hosts[origin_idx] - host_id = str(origin_host.get_id()) - - # Generate message packet - packet = generate_RPC_packet(host_id, ["foobar"], msg_content, next_msg_id_func()) # publish from the randomly chosen host - await gossipsubs[origin_idx].publish(host_id, packet.SerializeToString()) + await pubsubs[origin_idx].publish(topic, msg_content) await asyncio.sleep(0.5) # Assert that all blocking queues receive the message for queue in queues: msg = await queue.get() - assert msg.SerializeToString() == packet.publish[0].SerializeToString() + assert msg.data == msg_content await cleanup() @pytest.mark.asyncio async def test_fanout_maintenance(): # Create libp2p hosts - next_msg_id_func = message_id_generator(0) - num_hosts = 10 num_msgs = 5 libp2p_hosts = await create_libp2p_hosts(num_hosts) # Create pubsub, gossipsub instances - pubsubs, gossipsubs = create_pubsub_and_gossipsub_instances(libp2p_hosts, \ + pubsubs, _ = create_pubsub_and_gossipsub_instances(libp2p_hosts, \ SUPPORTED_PROTOCOLS, \ 10, 9, 11, 30, 3, 5, 0.5) # All pubsub subscribe to foobar queues = [] + topic = "foobar" for i in range(1, len(pubsubs)): - q = await pubsubs[i].subscribe("foobar") + q = await pubsubs[i].subscribe(topic) # Add each blocking queue to an array of blocking queues queues.append(q) @@ -356,27 +340,22 @@ async def test_fanout_maintenance(): # Send messages with origin not subscribed for i in range(num_msgs): - msg_content = "foo " + str(i) + msg_content = b"foo " + i.to_bytes(1, 'big') # Pick the message origin to the node that is not subscribed to 'foobar' origin_idx = 0 - origin_host = libp2p_hosts[origin_idx] - host_id = str(origin_host.get_id()) - - # Generate message packet - packet = generate_RPC_packet(host_id, ["foobar"], msg_content, next_msg_id_func()) # publish from the randomly chosen host - await gossipsubs[origin_idx].publish(host_id, packet.SerializeToString()) + await pubsubs[origin_idx].publish(topic, msg_content) await asyncio.sleep(0.5) # Assert that all blocking queues receive the message for queue in queues: msg = await queue.get() - assert msg.SerializeToString() == packet.publish[0].SerializeToString() + assert msg.data == msg_content for sub in pubsubs: - await sub.unsubscribe('foobar') + await sub.unsubscribe(topic) queues = [] @@ -384,7 +363,7 @@ async def test_fanout_maintenance(): # Resub and repeat for i in range(1, len(pubsubs)): - q = await pubsubs[i].subscribe("foobar") + q = await pubsubs[i].subscribe(topic) # Add each blocking queue to an array of blocking queues queues.append(q) @@ -393,65 +372,61 @@ async def test_fanout_maintenance(): # Check messages can still be sent for i in range(num_msgs): - msg_content = "foo " + str(i) + msg_content = b"bar " + i.to_bytes(1, 'big') # Pick the message origin to the node that is not subscribed to 'foobar' origin_idx = 0 - origin_host = libp2p_hosts[origin_idx] - host_id = str(origin_host.get_id()) - - # Generate message packet - packet = generate_RPC_packet(host_id, ["foobar"], msg_content, next_msg_id_func()) # publish from the randomly chosen host - await gossipsubs[origin_idx].publish(host_id, packet.SerializeToString()) + await pubsubs[origin_idx].publish(topic, msg_content) await asyncio.sleep(0.5) # Assert that all blocking queues receive the message for queue in queues: msg = await queue.get() - assert msg.SerializeToString() == packet.publish[0].SerializeToString() + assert msg.data == msg_content await cleanup() + @pytest.mark.asyncio async def test_gossip_propagation(): # Create libp2p hosts - next_msg_id_func = message_id_generator(0) - num_hosts = 2 - libp2p_hosts = await create_libp2p_hosts(num_hosts) + hosts = await create_libp2p_hosts(num_hosts) # Create pubsub, gossipsub instances - pubsubs, gossipsubs = create_pubsub_and_gossipsub_instances(libp2p_hosts, \ - SUPPORTED_PROTOCOLS, \ - 1, 0, 2, 30, 50, 100, 0.5) - node1, node2 = libp2p_hosts[0], libp2p_hosts[1] - sub1, sub2 = pubsubs[0], pubsubs[1] - gsub1, gsub2 = gossipsubs[0], gossipsubs[1] + pubsubs, _ = create_pubsub_and_gossipsub_instances( + hosts, + SUPPORTED_PROTOCOLS, + 1, + 0, + 2, + 30, + 50, + 100, + 0.5, + ) - node1_queue = await sub1.subscribe('foo') + topic = "foo" + await pubsubs[0].subscribe(topic) - # node 1 publish to topic - msg_content = 'foo_msg' - node1_id = str(node1.get_id()) - - # Generate message packet - packet = generate_RPC_packet(node1_id, ["foo"], msg_content, next_msg_id_func()) + # node 0 publish to topic + msg_content = b'foo_msg' # publish from the randomly chosen host - await gsub1.publish(node1_id, packet.SerializeToString()) + await pubsubs[0].publish(topic, msg_content) - # now node 2 subscribes - node2_queue = await sub2.subscribe('foo') + # now node 1 subscribes + queue_1 = await pubsubs[1].subscribe(topic) - await connect(node2, node1) + await connect(hosts[0], hosts[1]) # wait for gossip heartbeat await asyncio.sleep(2) # should be able to read message - msg = await node2_queue.get() - assert msg.SerializeToString() == packet.publish[0].SerializeToString() + msg = await queue_1.get() + assert msg.data == msg_content await cleanup() diff --git a/tests/pubsub/test_gossipsub_backward_compatibility.py b/tests/pubsub/test_gossipsub_backward_compatibility.py index 468e25fd..060973a5 100644 --- a/tests/pubsub/test_gossipsub_backward_compatibility.py +++ b/tests/pubsub/test_gossipsub_backward_compatibility.py @@ -8,18 +8,17 @@ from libp2p.pubsub.gossipsub import GossipSub from libp2p.pubsub.floodsub import FloodSub from libp2p.pubsub.pb import rpc_pb2 from libp2p.pubsub.pubsub import Pubsub -from utils import message_id_generator, generate_RPC_packet + from tests.utils import cleanup -# pylint: disable=too-many-locals +from .utils import ( + connect, + message_id_generator, + generate_RPC_packet, +) -async def connect(node1, node2): - """ - Connect node1 to node2 - """ - addr = node2.get_addrs()[0] - info = info_from_p2p_addr(addr) - await node1.connect(info) + +# pylint: disable=too-many-locals @pytest.mark.asyncio async def test_init(): @@ -37,11 +36,12 @@ async def test_init(): await cleanup() + async def perform_test_from_obj(obj): """ Perform a floodsub test from a test obj. test obj are composed as follows: - + { "supported_protocols": ["supported/protocol/1.0.0",...], "adj_list": { @@ -95,7 +95,7 @@ async def perform_test_from_obj(obj): if neighbor_id not in node_map: neighbor_node = await new_node(transport_opt=["/ip4/127.0.0.1/tcp/0"]) await neighbor_node.get_network().listen(multiaddr.Multiaddr("/ip4/127.0.0.1/tcp/0")) - + node_map[neighbor_id] = neighbor_node gossipsub = GossipSub(supported_protocols, 3, 2, 4, 30) @@ -104,7 +104,7 @@ async def perform_test_from_obj(obj): pubsub_map[neighbor_id] = pubsub # Connect node and neighbor - tasks_connect.append(asyncio.ensure_future(connect(node_map[start_node_id], node_map[neighbor_id]))) + tasks_connect.append(connect(node_map[start_node_id], node_map[neighbor_id])) tasks_connect.append(asyncio.sleep(2)) await asyncio.gather(*tasks_connect) @@ -130,7 +130,7 @@ async def perform_test_from_obj(obj): # Store queue in topic-queue map for node queues_map[node_id][topic] = q """ - tasks_topic.append(asyncio.ensure_future(pubsub_map[node_id].subscribe(topic))) + tasks_topic.append(pubsub_map[node_id].subscribe(topic)) tasks_topic_data.append((node_id, topic)) tasks_topic.append(asyncio.sleep(2)) @@ -152,29 +152,27 @@ async def perform_test_from_obj(obj): topics_in_msgs_ordered = [] messages = obj["messages"] tasks_publish = [] - next_msg_id_func = message_id_generator(0) for msg in messages: topics = msg["topics"] - data = msg["data"] node_id = msg["node_id"] - # Get actual id for sender node (not the id from the test obj) - actual_node_id = str(node_map[node_id].get_id()) - - # Create correctly formatted message - msg_talk = generate_RPC_packet(actual_node_id, topics, data, next_msg_id_func()) - # Publish message - tasks_publish.append(asyncio.ensure_future(gossipsub_map[node_id].publish(\ - actual_node_id, msg_talk.SerializeToString()))) + # FIXME: This should be one RPC packet with several topics + for topic in topics: + tasks_publish.append( + pubsub_map[node_id].publish( + topic, + data, + ) + ) # For each topic in topics, add topic, msg_talk tuple to ordered test list # TODO: Update message sender to be correct message sender before # adding msg_talk to this list for topic in topics: - topics_in_msgs_ordered.append((topic, msg_talk)) + topics_in_msgs_ordered.append((topic, data)) # Allow time for publishing before continuing # await asyncio.sleep(0.4) @@ -183,14 +181,12 @@ async def perform_test_from_obj(obj): # Step 4) Check that all messages were received correctly. # TODO: Check message sender too - for i in range(len(topics_in_msgs_ordered)): - topic, actual_msg = topics_in_msgs_ordered[i] - + for topic, data in topics_in_msgs_ordered: # Look at each node in each topic for node_id in topic_map[topic]: # Get message from subscription queue msg_on_node = await queues_map[node_id][topic].get() - assert actual_msg.publish[0].SerializeToString() == msg_on_node.SerializeToString() + assert msg_on_node.data == data # Success, terminate pending tasks. await cleanup() diff --git a/tests/pubsub/utils.py b/tests/pubsub/utils.py index 72d796f9..bb49a2f8 100644 --- a/tests/pubsub/utils.py +++ b/tests/pubsub/utils.py @@ -14,6 +14,8 @@ from libp2p.peer.id import ID from libp2p.pubsub.pubsub import Pubsub from libp2p.pubsub.gossipsub import GossipSub +from tests.utils import connect + def message_id_generator(start_val): """ @@ -22,6 +24,7 @@ def message_id_generator(start_val): :return: message id """ val = start_val + def generator(): # Allow manipulation of val within closure nonlocal val @@ -105,6 +108,10 @@ def create_pubsub_and_gossipsub_instances(libp2p_hosts, supported_protocols, deg return pubsubs, gossipsubs + +# FIXME: There is no difference between `sparse_connect` and `dense_connect`, +# before `connect_some` is fixed. + async def sparse_connect(hosts): await connect_some(hosts, 3) @@ -113,6 +120,7 @@ async def dense_connect(hosts): await connect_some(hosts, 10) +# FIXME: `degree` is not used at all async def connect_some(hosts, degree): for i, host in enumerate(hosts): for j, host2 in enumerate(hosts): @@ -135,6 +143,7 @@ async def connect_some(hosts, degree): # j += 1 + async def one_to_all_connect(hosts, central_host_index): for i, host in enumerate(hosts): if i != central_host_index: From c028aef2de5c3e6d899df65766d5973670f80599 Mon Sep 17 00:00:00 2001 From: mhchia Date: Sat, 27 Jul 2019 11:49:03 +0800 Subject: [PATCH 10/17] Fix all tests - Dedup `perform_test_from_obj` and the test cases used in both `test_floodsub` and `test_gossipsub_backward_compatibility.py`. Therefore, they are put in the standalone file `tests/pubsub/floodsub_integration_test_settings.py`. The functions and testcases are imported from there then. - IMO still need a refactor on the tests. There are still some duplicate code. --- libp2p/pubsub/gossipsub.py | 2 +- tests/pubsub/configs.py | 7 + tests/pubsub/dummy_account_node.py | 4 +- .../floodsub_integration_test_settings.py | 448 +++++++++++++++ tests/pubsub/test_floodsub.py | 480 +--------------- tests/pubsub/test_gossipsub.py | 10 +- .../test_gossipsub_backward_compatibility.py | 519 +----------------- tests/pubsub/utils.py | 18 +- 8 files changed, 526 insertions(+), 962 deletions(-) create mode 100644 tests/pubsub/configs.py create mode 100644 tests/pubsub/floodsub_integration_test_settings.py diff --git a/libp2p/pubsub/gossipsub.py b/libp2p/pubsub/gossipsub.py index 8051a3b9..b65462dd 100644 --- a/libp2p/pubsub/gossipsub.py +++ b/libp2p/pubsub/gossipsub.py @@ -32,8 +32,8 @@ class GossipSub(IPubsubRouter): # Store target degree, upper degree bound, and lower degree bound self.degree = degree - self.degree_high = degree_high self.degree_low = degree_low + self.degree_high = degree_high # Store time to live (for topics in fanout) self.time_to_live = time_to_live diff --git a/tests/pubsub/configs.py b/tests/pubsub/configs.py new file mode 100644 index 00000000..d6849f7c --- /dev/null +++ b/tests/pubsub/configs.py @@ -0,0 +1,7 @@ +import multiaddr + + +FLOODSUB_PROTOCOL_ID = "/floodsub/1.0.0" +SUPPORTED_PROTOCOLS = [FLOODSUB_PROTOCOL_ID] + +LISTEN_MADDR = multiaddr.Multiaddr("/ip4/127.0.0.1/tcp/0") diff --git a/tests/pubsub/dummy_account_node.py b/tests/pubsub/dummy_account_node.py index 84460283..f0a95f87 100644 --- a/tests/pubsub/dummy_account_node.py +++ b/tests/pubsub/dummy_account_node.py @@ -1,7 +1,8 @@ import asyncio -import multiaddr import uuid +import multiaddr + from libp2p import new_node from libp2p.host.host_interface import IHost from libp2p.pubsub.pubsub import Pubsub @@ -144,4 +145,3 @@ class DummyAccountNode: return self.balances[user] else: return -1 - diff --git a/tests/pubsub/floodsub_integration_test_settings.py b/tests/pubsub/floodsub_integration_test_settings.py new file mode 100644 index 00000000..eac62baf --- /dev/null +++ b/tests/pubsub/floodsub_integration_test_settings.py @@ -0,0 +1,448 @@ +import asyncio + +import pytest + +import multiaddr + +from libp2p import new_node +from libp2p.peer.id import ID +from libp2p.pubsub.pubsub import Pubsub + +from tests.utils import ( + cleanup, + connect, +) + + +FLOODSUB_PROTOCOL_ID = "/floodsub/1.0.0" +SUPPORTED_PROTOCOLS = [FLOODSUB_PROTOCOL_ID] + +FLOODSUB_PROTOCOL_TEST_CASES = [ + { + "name": "simple_two_nodes", + "supported_protocols": SUPPORTED_PROTOCOLS, + "adj_list": { + "A": ["B"] + }, + "topic_map": { + "topic1": ["B"] + }, + "messages": [ + { + "topics": ["topic1"], + "data": b"foo", + "node_id": "A" + } + ] + }, + { + "name": "three_nodes_two_topics", + "supported_protocols": SUPPORTED_PROTOCOLS, + "adj_list": { + "A": ["B"], + "B": ["C"], + }, + "topic_map": { + "topic1": ["B", "C"], + "topic2": ["B", "C"], + }, + "messages": [ + { + "topics": ["topic1"], + "data": b"foo", + "node_id": "A", + }, + { + "topics": ["topic2"], + "data": b"Alex is tall", + "node_id": "A", + } + ] + }, + { + "name": "two_nodes_one_topic_single_subscriber_is_sender", + "supported_protocols": SUPPORTED_PROTOCOLS, + "adj_list": { + "A": ["B"], + }, + "topic_map": { + "topic1": ["B"], + }, + "messages": [ + { + "topics": ["topic1"], + "data": b"Alex is tall", + "node_id": "B", + } + ] + }, + { + "name": "two_nodes_one_topic_two_msgs", + "supported_protocols": SUPPORTED_PROTOCOLS, + "adj_list": { + "A": ["B"], + }, + "topic_map": { + "topic1": ["B"], + }, + "messages": [ + { + "topics": ["topic1"], + "data": b"Alex is tall", + "node_id": "B", + }, + { + "topics": ["topic1"], + "data": b"foo", + "node_id": "A", + } + ] + }, + { + "name": "seven_nodes_tree_one_topics", + "supported_protocols": SUPPORTED_PROTOCOLS, + "adj_list": { + "1": ["2", "3"], + "2": ["4", "5"], + "3": ["6", "7"], + }, + "topic_map": { + "astrophysics": ["2", "3", "4", "5", "6", "7"], + }, + "messages": [ + { + "topics": ["astrophysics"], + "data": b"e=mc^2", + "node_id": "1", + } + ] + }, + { + "name": "seven_nodes_tree_three_topics", + "supported_protocols": SUPPORTED_PROTOCOLS, + "adj_list": { + "1": ["2", "3"], + "2": ["4", "5"], + "3": ["6", "7"], + }, + "topic_map": { + "astrophysics": ["2", "3", "4", "5", "6", "7"], + "space": ["2", "3", "4", "5", "6", "7"], + "onions": ["2", "3", "4", "5", "6", "7"], + }, + "messages": [ + { + "topics": ["astrophysics"], + "data": b"e=mc^2", + "node_id": "1", + }, + { + "topics": ["space"], + "data": b"foobar", + "node_id": "1", + }, + { + "topics": ["onions"], + "data": b"I am allergic", + "node_id": "1", + } + ] + }, + { + "name": "seven_nodes_tree_three_topics_diff_origin", + "supported_protocols": SUPPORTED_PROTOCOLS, + "adj_list": { + "1": ["2", "3"], + "2": ["4", "5"], + "3": ["6", "7"], + }, + "topic_map": { + "astrophysics": ["1", "2", "3", "4", "5", "6", "7"], + "space": ["1", "2", "3", "4", "5", "6", "7"], + "onions": ["1", "2", "3", "4", "5", "6", "7"], + }, + "messages": [ + { + "topics": ["astrophysics"], + "data": b"e=mc^2", + "node_id": "1", + }, + { + "topics": ["space"], + "data": b"foobar", + "node_id": "4", + }, + { + "topics": ["onions"], + "data": b"I am allergic", + "node_id": "7", + } + ] + }, + { + "name": "three_nodes_clique_two_topic_diff_origin", + "supported_protocols": SUPPORTED_PROTOCOLS, + "adj_list": { + "1": ["2", "3"], + "2": ["3"], + }, + "topic_map": { + "astrophysics": ["1", "2", "3"], + "school": ["1", "2", "3"], + }, + "messages": [ + { + "topics": ["astrophysics"], + "data": b"e=mc^2", + "node_id": "1", + }, + { + "topics": ["school"], + "data": b"foobar", + "node_id": "2", + }, + { + "topics": ["astrophysics"], + "data": b"I am allergic", + "node_id": "1", + } + ] + }, + { + "name": "four_nodes_clique_two_topic_diff_origin_many_msgs", + "supported_protocols": SUPPORTED_PROTOCOLS, + "adj_list": { + "1": ["2", "3", "4"], + "2": ["1", "3", "4"], + "3": ["1", "2", "4"], + "4": ["1", "2", "3"], + }, + "topic_map": { + "astrophysics": ["1", "2", "3", "4"], + "school": ["1", "2", "3", "4"], + }, + "messages": [ + { + "topics": ["astrophysics"], + "data": b"e=mc^2", + "node_id": "1", + }, + { + "topics": ["school"], + "data": b"foobar", + "node_id": "2", + }, + { + "topics": ["astrophysics"], + "data": b"I am allergic", + "node_id": "1", + }, + { + "topics": ["school"], + "data": b"foobar2", + "node_id": "2", + }, + { + "topics": ["astrophysics"], + "data": b"I am allergic2", + "node_id": "1", + }, + { + "topics": ["school"], + "data": b"foobar3", + "node_id": "2", + }, + { + "topics": ["astrophysics"], + "data": b"I am allergic3", + "node_id": "1", + } + ] + }, + { + "name": "five_nodes_ring_two_topic_diff_origin_many_msgs", + "supported_protocols": SUPPORTED_PROTOCOLS, + "adj_list": { + "1": ["2"], + "2": ["3"], + "3": ["4"], + "4": ["5"], + "5": ["1"], + }, + "topic_map": { + "astrophysics": ["1", "2", "3", "4", "5"], + "school": ["1", "2", "3", "4", "5"], + }, + "messages": [ + { + "topics": ["astrophysics"], + "data": b"e=mc^2", + "node_id": "1", + }, + { + "topics": ["school"], + "data": b"foobar", + "node_id": "2", + }, + { + "topics": ["astrophysics"], + "data": b"I am allergic", + "node_id": "1", + }, + { + "topics": ["school"], + "data": b"foobar2", + "node_id": "2", + }, + { + "topics": ["astrophysics"], + "data": b"I am allergic2", + "node_id": "1", + }, + { + "topics": ["school"], + "data": b"foobar3", + "node_id": "2", + }, + { + "topics": ["astrophysics"], + "data": b"I am allergic3", + "node_id": "1", + } + ] + } +] + +# pylint: disable=invalid-name +floodsub_protocol_pytest_params = [ + pytest.param(test_case, id=test_case["name"]) + for test_case in FLOODSUB_PROTOCOL_TEST_CASES +] + + +# pylint: disable=too-many-locals +async def perform_test_from_obj(obj, router_factory): + """ + Perform pubsub tests from a test obj. + test obj are composed as follows: + + { + "supported_protocols": ["supported/protocol/1.0.0",...], + "adj_list": { + "node1": ["neighbor1_of_node1", "neighbor2_of_node1", ...], + "node2": ["neighbor1_of_node2", "neighbor2_of_node2", ...], + ... + }, + "topic_map": { + "topic1": ["node1_subscribed_to_topic1", "node2_subscribed_to_topic1", ...] + }, + "messages": [ + { + "topics": ["topic1_for_message", "topic2_for_message", ...], + "data": b"some contents of the message (newlines are not supported)", + "node_id": "message sender node id" + }, + ... + ] + } + NOTE: In adj_list, for any neighbors A and B, only list B as a neighbor of A + or B as a neighbor of A once. Do NOT list both A: ["B"] and B:["A"] as the behavior + is undefined (even if it may work) + """ + listen_maddr = multiaddr.Multiaddr("/ip4/127.0.0.1/tcp/0") + + # Step 1) Create graph + adj_list = obj["adj_list"] + node_map = {} + pubsub_map = {} + + async def add_node(node_id: str) -> None: + node = await new_node(transport_opt=[str(listen_maddr)]) + await node.get_network().listen(listen_maddr) + node_map[node_id] = node + pubsub_router = router_factory(protocols=obj["supported_protocols"]) + pubsub = Pubsub(node, pubsub_router, ID(node_id.encode())) + pubsub_map[node_id] = pubsub + + tasks_connect = [] + for start_node_id in adj_list: + # Create node if node does not yet exist + if start_node_id not in node_map: + await add_node(start_node_id) + + # For each neighbor of start_node, create if does not yet exist, + # then connect start_node to neighbor + for neighbor_id in adj_list[start_node_id]: + # Create neighbor if neighbor does not yet exist + if neighbor_id not in node_map: + await add_node(neighbor_id) + tasks_connect.append( + connect(node_map[start_node_id], node_map[neighbor_id]) + ) + # Connect nodes and wait at least for 2 seconds + await asyncio.gather(*tasks_connect, asyncio.sleep(2)) + + # Step 2) Subscribe to topics + queues_map = {} + topic_map = obj["topic_map"] + + tasks_topic = [] + tasks_topic_data = [] + for topic, node_ids in topic_map.items(): + for node_id in node_ids: + tasks_topic.append(pubsub_map[node_id].subscribe(topic)) + tasks_topic_data.append((node_id, topic)) + tasks_topic.append(asyncio.sleep(2)) + + # Gather is like Promise.all + responses = await asyncio.gather(*tasks_topic, return_exceptions=True) + for i in range(len(responses) - 1): + node_id, topic = tasks_topic_data[i] + if node_id not in queues_map: + queues_map[node_id] = {} + # Store queue in topic-queue map for node + queues_map[node_id][topic] = responses[i] + + # Allow time for subscribing before continuing + await asyncio.sleep(0.01) + + # Step 3) Publish messages + topics_in_msgs_ordered = [] + messages = obj["messages"] + tasks_publish = [] + + for msg in messages: + topics = msg["topics"] + data = msg["data"] + node_id = msg["node_id"] + + # Publish message + # FIXME: Should be single RPC package with several topics + for topic in topics: + tasks_publish.append( + pubsub_map[node_id].publish( + topic, + data, + ) + ) + + # For each topic in topics, add topic, msg_talk tuple to ordered test list + # TODO: Update message sender to be correct message sender before + # adding msg_talk to this list + for topic in topics: + topics_in_msgs_ordered.append((topic, data)) + + # Allow time for publishing before continuing + await asyncio.gather(*tasks_publish, asyncio.sleep(2)) + + # Step 4) Check that all messages were received correctly. + # TODO: Check message sender too + for topic, data in topics_in_msgs_ordered: + # Look at each node in each topic + for node_id in topic_map[topic]: + # Get message from subscription queue + msg = await queues_map[node_id][topic].get() + assert data == msg.data + + # Success, terminate pending tasks. + await cleanup() diff --git a/tests/pubsub/test_floodsub.py b/tests/pubsub/test_floodsub.py index 3b4de2bf..c3d3e24e 100644 --- a/tests/pubsub/test_floodsub.py +++ b/tests/pubsub/test_floodsub.py @@ -4,24 +4,26 @@ import pytest from libp2p import new_node from libp2p.peer.id import ID -from libp2p.pubsub.pubsub import Pubsub from libp2p.pubsub.floodsub import FloodSub +from libp2p.pubsub.pubsub import Pubsub from tests.utils import ( cleanup, connect, ) -from .utils import ( - message_id_generator, - generate_RPC_packet, + +from .configs import ( + FLOODSUB_PROTOCOL_ID, + LISTEN_MADDR, +) +from .floodsub_integration_test_settings import ( + perform_test_from_obj, + floodsub_protocol_pytest_params, ) -# pylint: disable=too-many-locals -FLOODSUB_PROTOCOL_ID = "/floodsub/1.0.0" SUPPORTED_PROTOCOLS = [FLOODSUB_PROTOCOL_ID] - -LISTEN_MADDR = multiaddr.Multiaddr("/ip4/127.0.0.1/tcp/0") +# pylint: disable=too-many-locals @pytest.mark.asyncio @@ -119,457 +121,13 @@ async def test_lru_cache_two_nodes(monkeypatch): await cleanup() -async def perform_test_from_obj(obj): - """ - Perform a floodsub test from a test obj. - test obj are composed as follows: - - { - "supported_protocols": ["supported/protocol/1.0.0",...], - "adj_list": { - "node1": ["neighbor1_of_node1", "neighbor2_of_node1", ...], - "node2": ["neighbor1_of_node2", "neighbor2_of_node2", ...], - ... - }, - "topic_map": { - "topic1": ["node1_subscribed_to_topic1", "node2_subscribed_to_topic1", ...] - }, - "messages": [ - { - "topics": ["topic1_for_message", "topic2_for_message", ...], - "data": b"some contents of the message (newlines are not supported)", - "node_id": "message sender node id" - }, - ... - ] - } - NOTE: In adj_list, for any neighbors A and B, only list B as a neighbor of A - or B as a neighbor of A once. Do NOT list both A: ["B"] and B:["A"] as the behavior - is undefined (even if it may work) - """ - - # Step 1) Create graph - adj_list = obj["adj_list"] - node_map = {} - floodsub_map = {} - pubsub_map = {} - - async def add_node(node_id: str) -> None: - node = await new_node(transport_opt=[str(LISTEN_MADDR)]) - await node.get_network().listen(LISTEN_MADDR) - node_map[node_id] = node - floodsub = FloodSub(supported_protocols) - floodsub_map[node_id] = floodsub - pubsub = Pubsub(node, floodsub, ID(node_id.encode())) - pubsub_map[node_id] = pubsub - - supported_protocols = obj["supported_protocols"] - - tasks_connect = [] - for start_node_id in adj_list: - # Create node if node does not yet exist - if start_node_id not in node_map: - await add_node(start_node_id) - - # For each neighbor of start_node, create if does not yet exist, - # then connect start_node to neighbor - for neighbor_id in adj_list[start_node_id]: - # Create neighbor if neighbor does not yet exist - if neighbor_id not in node_map: - await add_node(neighbor_id) - tasks_connect.append( - connect(node_map[start_node_id], node_map[neighbor_id]) - ) - # Connect nodes and wait at least for 2 seconds - await asyncio.gather(*tasks_connect, asyncio.sleep(2)) - - # Step 2) Subscribe to topics - queues_map = {} - topic_map = obj["topic_map"] - - tasks_topic = [] - tasks_topic_data = [] - for topic, node_ids in topic_map.items(): - for node_id in node_ids: - tasks_topic.append(pubsub_map[node_id].subscribe(topic)) - tasks_topic_data.append((node_id, topic)) - tasks_topic.append(asyncio.sleep(2)) - - # Gather is like Promise.all - responses = await asyncio.gather(*tasks_topic, return_exceptions=True) - for i in range(len(responses) - 1): - node_id, topic = tasks_topic_data[i] - if node_id not in queues_map: - queues_map[node_id] = {} - # Store queue in topic-queue map for node - queues_map[node_id][topic] = responses[i] - - # Allow time for subscribing before continuing - await asyncio.sleep(0.01) - - # Step 3) Publish messages - topics_in_msgs_ordered = [] - messages = obj["messages"] - tasks_publish = [] - - for msg in messages: - topics = msg["topics"] - data = msg["data"] - node_id = msg["node_id"] - - # Publish message - # FIXME: Should be single RPC package with several topics - for topic in topics: - tasks_publish.append( - pubsub_map[node_id].publish( - topic, - data, - ) - ) - - # For each topic in topics, add topic, msg_talk tuple to ordered test list - # TODO: Update message sender to be correct message sender before - # adding msg_talk to this list - for topic in topics: - topics_in_msgs_ordered.append((topic, data)) - - # Allow time for publishing before continuing - await asyncio.gather(*tasks_publish, asyncio.sleep(2)) - - # Step 4) Check that all messages were received correctly. - # TODO: Check message sender too - for topic, data in topics_in_msgs_ordered: - # Look at each node in each topic - for node_id in topic_map[topic]: - # Get message from subscription queue - msg = await queues_map[node_id][topic].get() - assert data == msg.data - - # Success, terminate pending tasks. - await cleanup() - - +@pytest.mark.parametrize( + "test_case_obj", + floodsub_protocol_pytest_params, +) @pytest.mark.asyncio -async def test_simple_two_nodes_test_obj(): - test_obj = { - "supported_protocols": SUPPORTED_PROTOCOLS, - "adj_list": { - "A": ["B"] - }, - "topic_map": { - "topic1": ["B"] - }, - "messages": [ - { - "topics": ["topic1"], - "data": b"foo", - "node_id": "A" - } - ] - } - await perform_test_from_obj(test_obj) - -@pytest.mark.asyncio -async def test_three_nodes_two_topics_test_obj(): - test_obj = { - "supported_protocols": SUPPORTED_PROTOCOLS, - "adj_list": { - "A": ["B"], - "B": ["C"], - }, - "topic_map": { - "topic1": ["B", "C"], - "topic2": ["B", "C"], - }, - "messages": [ - { - "topics": ["topic1"], - "data": b"foo", - "node_id": "A", - }, - { - "topics": ["topic2"], - "data": b"Alex is tall", - "node_id": "A", - } - ] - } - await perform_test_from_obj(test_obj) - -@pytest.mark.asyncio -async def test_two_nodes_one_topic_single_subscriber_is_sender_test_obj(): - test_obj = { - "supported_protocols": SUPPORTED_PROTOCOLS, - "adj_list": { - "A": ["B"], - }, - "topic_map": { - "topic1": ["B"], - }, - "messages": [ - { - "topics": ["topic1"], - "data": b"Alex is tall", - "node_id": "B", - } - ] - } - await perform_test_from_obj(test_obj) - -@pytest.mark.asyncio -async def test_two_nodes_one_topic_two_msgs_test_obj(): - test_obj = { - "supported_protocols": SUPPORTED_PROTOCOLS, - "adj_list": { - "A": ["B"], - }, - "topic_map": { - "topic1": ["B"], - }, - "messages": [ - { - "topics": ["topic1"], - "data": b"Alex is tall", - "node_id": "B", - }, - { - "topics": ["topic1"], - "data": b"foo", - "node_id": "A", - } - ] - } - await perform_test_from_obj(test_obj) - -@pytest.mark.asyncio -async def test_seven_nodes_tree_one_topics_test_obj(): - test_obj = { - "supported_protocols": SUPPORTED_PROTOCOLS, - "adj_list": { - "1": ["2", "3"], - "2": ["4", "5"], - "3": ["6", "7"], - }, - "topic_map": { - "astrophysics": ["2", "3", "4", "5", "6", "7"], - }, - "messages": [ - { - "topics": ["astrophysics"], - "data": b"e=mc^2", - "node_id": "1", - } - ] - } - await perform_test_from_obj(test_obj) - -@pytest.mark.asyncio -async def test_seven_nodes_tree_three_topics_test_obj(): - test_obj = { - "supported_protocols": SUPPORTED_PROTOCOLS, - "adj_list": { - "1": ["2", "3"], - "2": ["4", "5"], - "3": ["6", "7"], - }, - "topic_map": { - "astrophysics": ["2", "3", "4", "5", "6", "7"], - "space": ["2", "3", "4", "5", "6", "7"], - "onions": ["2", "3", "4", "5", "6", "7"], - }, - "messages": [ - { - "topics": ["astrophysics"], - "data": b"e=mc^2", - "node_id": "1", - }, - { - "topics": ["space"], - "data": b"foobar", - "node_id": "1", - }, - { - "topics": ["onions"], - "data": b"I am allergic", - "node_id": "1", - } - ] - } - await perform_test_from_obj(test_obj) - -@pytest.mark.asyncio -async def test_seven_nodes_tree_three_topics_diff_origin_test_obj(): - test_obj = { - "supported_protocols": SUPPORTED_PROTOCOLS, - "adj_list": { - "1": ["2", "3"], - "2": ["4", "5"], - "3": ["6", "7"], - }, - "topic_map": { - "astrophysics": ["1", "2", "3", "4", "5", "6", "7"], - "space": ["1", "2", "3", "4", "5", "6", "7"], - "onions": ["1", "2", "3", "4", "5", "6", "7"], - }, - "messages": [ - { - "topics": ["astrophysics"], - "data": b"e=mc^2", - "node_id": "1", - }, - { - "topics": ["space"], - "data": b"foobar", - "node_id": "4", - }, - { - "topics": ["onions"], - "data": b"I am allergic", - "node_id": "7", - } - ] - } - await perform_test_from_obj(test_obj) - -@pytest.mark.asyncio -async def test_three_nodes_clique_two_topic_diff_origin_test_obj(): - test_obj = { - "supported_protocols": SUPPORTED_PROTOCOLS, - "adj_list": { - "1": ["2", "3"], - "2": ["3"], - }, - "topic_map": { - "astrophysics": ["1", "2", "3"], - "school": ["1", "2", "3"], - }, - "messages": [ - { - "topics": ["astrophysics"], - "data": b"e=mc^2", - "node_id": "1", - }, - { - "topics": ["school"], - "data": b"foobar", - "node_id": "2", - }, - { - "topics": ["astrophysics"], - "data": b"I am allergic", - "node_id": "1", - } - ] - } - await perform_test_from_obj(test_obj) - - -@pytest.mark.asyncio -async def test_four_nodes_clique_two_topic_diff_origin_many_msgs_test_obj(): - test_obj = { - "supported_protocols": SUPPORTED_PROTOCOLS, - "adj_list": { - "1": ["2", "3", "4"], - "2": ["1", "3", "4"], - "3": ["1", "2", "4"], - "4": ["1", "2", "3"], - }, - "topic_map": { - "astrophysics": ["1", "2", "3", "4"], - "school": ["1", "2", "3", "4"], - }, - "messages": [ - { - "topics": ["astrophysics"], - "data": b"e=mc^2", - "node_id": "1", - }, - { - "topics": ["school"], - "data": b"foobar", - "node_id": "2", - }, - { - "topics": ["astrophysics"], - "data": b"I am allergic", - "node_id": "1", - }, - { - "topics": ["school"], - "data": b"foobar2", - "node_id": "2", - }, - { - "topics": ["astrophysics"], - "data": b"I am allergic2", - "node_id": "1", - }, - { - "topics": ["school"], - "data": b"foobar3", - "node_id": "2", - }, - { - "topics": ["astrophysics"], - "data": b"I am allergic3", - "node_id": "1", - } - ] - } - await perform_test_from_obj(test_obj) - - -@pytest.mark.asyncio -async def test_five_nodes_ring_two_topic_diff_origin_many_msgs_test_obj(): - test_obj = { - "supported_protocols": SUPPORTED_PROTOCOLS, - "adj_list": { - "1": ["2"], - "2": ["3"], - "3": ["4"], - "4": ["5"], - "5": ["1"], - }, - "topic_map": { - "astrophysics": ["1", "2", "3", "4", "5"], - "school": ["1", "2", "3", "4", "5"], - }, - "messages": [ - { - "topics": ["astrophysics"], - "data": b"e=mc^2", - "node_id": "1", - }, - { - "topics": ["school"], - "data": b"foobar", - "node_id": "2", - }, - { - "topics": ["astrophysics"], - "data": b"I am allergic", - "node_id": "1", - }, - { - "topics": ["school"], - "data": b"foobar2", - "node_id": "2", - }, - { - "topics": ["astrophysics"], - "data": b"I am allergic2", - "node_id": "1", - }, - { - "topics": ["school"], - "data": b"foobar3", - "node_id": "2", - }, - { - "topics": ["astrophysics"], - "data": b"I am allergic3", - "node_id": "1", - } - ] - } - await perform_test_from_obj(test_obj) +async def test_gossipsub_run_with_floodsub_tests(test_case_obj): + await perform_test_from_obj( + test_case_obj, + FloodSub, + ) diff --git a/tests/pubsub/test_gossipsub.py b/tests/pubsub/test_gossipsub.py index 7e2c7575..7f4c81ca 100644 --- a/tests/pubsub/test_gossipsub.py +++ b/tests/pubsub/test_gossipsub.py @@ -8,9 +8,13 @@ from tests.utils import ( connect, ) -from .utils import message_id_generator, generate_RPC_packet, \ - create_libp2p_hosts, create_pubsub_and_gossipsub_instances, sparse_connect, dense_connect, \ - one_to_all_connect +from .utils import ( + create_libp2p_hosts, + create_pubsub_and_gossipsub_instances, + dense_connect, + one_to_all_connect, +) + SUPPORTED_PROTOCOLS = ["/gossipsub/1.0.0"] diff --git a/tests/pubsub/test_gossipsub_backward_compatibility.py b/tests/pubsub/test_gossipsub_backward_compatibility.py index 060973a5..2a3167bf 100644 --- a/tests/pubsub/test_gossipsub_backward_compatibility.py +++ b/tests/pubsub/test_gossipsub_backward_compatibility.py @@ -1,34 +1,31 @@ -import asyncio -import multiaddr +import functools + import pytest from libp2p import new_node -from libp2p.peer.peerinfo import info_from_p2p_addr from libp2p.pubsub.gossipsub import GossipSub -from libp2p.pubsub.floodsub import FloodSub -from libp2p.pubsub.pb import rpc_pb2 from libp2p.pubsub.pubsub import Pubsub from tests.utils import cleanup -from .utils import ( - connect, - message_id_generator, - generate_RPC_packet, +from .configs import ( + FLOODSUB_PROTOCOL_ID, + LISTEN_MADDR, +) +from .floodsub_integration_test_settings import ( + perform_test_from_obj, + floodsub_protocol_pytest_params, ) # pylint: disable=too-many-locals - @pytest.mark.asyncio -async def test_init(): - node = await new_node(transport_opt=["/ip4/127.1/tcp/0"]) +async def test_gossipsub_initialize_with_floodsub_protocol(): + node = await new_node(transport_opt=[str(LISTEN_MADDR)]) - await node.get_network().listen(multiaddr.Multiaddr("/ip4/127.1/tcp/0")) + await node.get_network().listen(LISTEN_MADDR) - supported_protocols = ["/gossipsub/1.0.0"] - - gossipsub = GossipSub(supported_protocols, 3, 2, 4, 30) + gossipsub = GossipSub([FLOODSUB_PROTOCOL_ID], 3, 2, 4, 30) pubsub = Pubsub(node, gossipsub, "a") # Did it work? @@ -37,479 +34,19 @@ async def test_init(): await cleanup() -async def perform_test_from_obj(obj): - """ - Perform a floodsub test from a test obj. - test obj are composed as follows: - - { - "supported_protocols": ["supported/protocol/1.0.0",...], - "adj_list": { - "node1": ["neighbor1_of_node1", "neighbor2_of_node1", ...], - "node2": ["neighbor1_of_node2", "neighbor2_of_node2", ...], - ... - }, - "topic_map": { - "topic1": ["node1_subscribed_to_topic1", "node2_subscribed_to_topic1", ...] - }, - "messages": [ - { - "topics": ["topic1_for_message", "topic2_for_message", ...], - "data": "some contents of the message (newlines are not supported)", - "node_id": "message sender node id" - }, - ... - ] - } - NOTE: In adj_list, for any neighbors A and B, only list B as a neighbor of A - or B as a neighbor of A once. Do NOT list both A: ["B"] and B:["A"] as the behavior - is undefined (even if it may work) - """ - - # Step 1) Create graph - adj_list = obj["adj_list"] - node_map = {} - gossipsub_map = {} - pubsub_map = {} - - supported_protocols = obj["supported_protocols"] - - tasks_connect = [] - for start_node_id in adj_list: - # Create node if node does not yet exist - if start_node_id not in node_map: - node = await new_node(transport_opt=["/ip4/127.0.0.1/tcp/0"]) - await node.get_network().listen(multiaddr.Multiaddr("/ip4/127.0.0.1/tcp/0")) - - node_map[start_node_id] = node - - gossipsub = GossipSub(supported_protocols, 3, 2, 4, 30) - gossipsub_map[start_node_id] = gossipsub - pubsub = Pubsub(node, gossipsub, start_node_id) - pubsub_map[start_node_id] = pubsub - - # For each neighbor of start_node, create if does not yet exist, - # then connect start_node to neighbor - for neighbor_id in adj_list[start_node_id]: - # Create neighbor if neighbor does not yet exist - if neighbor_id not in node_map: - neighbor_node = await new_node(transport_opt=["/ip4/127.0.0.1/tcp/0"]) - await neighbor_node.get_network().listen(multiaddr.Multiaddr("/ip4/127.0.0.1/tcp/0")) - - node_map[neighbor_id] = neighbor_node - - gossipsub = GossipSub(supported_protocols, 3, 2, 4, 30) - gossipsub_map[neighbor_id] = gossipsub - pubsub = Pubsub(neighbor_node, gossipsub, neighbor_id) - pubsub_map[neighbor_id] = pubsub - - # Connect node and neighbor - tasks_connect.append(connect(node_map[start_node_id], node_map[neighbor_id])) - tasks_connect.append(asyncio.sleep(2)) - await asyncio.gather(*tasks_connect) - - # Allow time for graph creation before continuing - # await asyncio.sleep(0.25) - - # Step 2) Subscribe to topics - queues_map = {} - topic_map = obj["topic_map"] - - tasks_topic = [] - tasks_topic_data = [] - for topic in topic_map: - for node_id in topic_map[topic]: - """ - # Subscribe node to topic - q = await pubsub_map[node_id].subscribe(topic) - - # Create topic-queue map for node_id if one does not yet exist - if node_id not in queues_map: - queues_map[node_id] = {} - - # Store queue in topic-queue map for node - queues_map[node_id][topic] = q - """ - tasks_topic.append(pubsub_map[node_id].subscribe(topic)) - tasks_topic_data.append((node_id, topic)) - tasks_topic.append(asyncio.sleep(2)) - - # Gather is like Promise.all - responses = await asyncio.gather(*tasks_topic, return_exceptions=True) - for i in range(len(responses) - 1): - q = responses[i] - node_id, topic = tasks_topic_data[i] - if node_id not in queues_map: - queues_map[node_id] = {} - - # Store queue in topic-queue map for node - queues_map[node_id][topic] = q - - # Allow time for subscribing before continuing - # await asyncio.sleep(0.01) - - # Step 3) Publish messages - topics_in_msgs_ordered = [] - messages = obj["messages"] - tasks_publish = [] - - for msg in messages: - topics = msg["topics"] - data = msg["data"] - node_id = msg["node_id"] - - # Publish message - # FIXME: This should be one RPC packet with several topics - for topic in topics: - tasks_publish.append( - pubsub_map[node_id].publish( - topic, - data, - ) - ) - - # For each topic in topics, add topic, msg_talk tuple to ordered test list - # TODO: Update message sender to be correct message sender before - # adding msg_talk to this list - for topic in topics: - topics_in_msgs_ordered.append((topic, data)) - - # Allow time for publishing before continuing - # await asyncio.sleep(0.4) - tasks_publish.append(asyncio.sleep(2)) - await asyncio.gather(*tasks_publish) - - # Step 4) Check that all messages were received correctly. - # TODO: Check message sender too - for topic, data in topics_in_msgs_ordered: - # Look at each node in each topic - for node_id in topic_map[topic]: - # Get message from subscription queue - msg_on_node = await queues_map[node_id][topic].get() - assert msg_on_node.data == data - - # Success, terminate pending tasks. - await cleanup() - +@pytest.mark.parametrize( + "test_case_obj", + floodsub_protocol_pytest_params, +) @pytest.mark.asyncio -async def test_simple_two_nodes_test_obj(): - test_obj = { - "supported_protocols": ["/floodsub/1.0.0"], - "adj_list": { - "A": ["B"] - }, - "topic_map": { - "topic1": ["B"] - }, - "messages": [ - { - "topics": ["topic1"], - "data": "foo", - "node_id": "A" - } - ] - } - await perform_test_from_obj(test_obj) - -@pytest.mark.asyncio -async def test_three_nodes_two_topics_test_obj(): - test_obj = { - "supported_protocols": ["/floodsub/1.0.0"], - "adj_list": { - "A": ["B"], - "B": ["C"] - }, - "topic_map": { - "topic1": ["B", "C"], - "topic2": ["B", "C"] - }, - "messages": [ - { - "topics": ["topic1"], - "data": "foo", - "node_id": "A" - }, - { - "topics": ["topic2"], - "data": "Alex is tall", - "node_id": "A" - } - ] - } - await perform_test_from_obj(test_obj) - -@pytest.mark.asyncio -async def test_two_nodes_one_topic_single_subscriber_is_sender_test_obj(): - test_obj = { - "supported_protocols": ["/floodsub/1.0.0"], - "adj_list": { - "A": ["B"] - }, - "topic_map": { - "topic1": ["B"] - }, - "messages": [ - { - "topics": ["topic1"], - "data": "Alex is tall", - "node_id": "B" - } - ] - } - await perform_test_from_obj(test_obj) - -@pytest.mark.asyncio -async def test_two_nodes_one_topic_two_msgs_test_obj(): - test_obj = { - "supported_protocols": ["/floodsub/1.0.0"], - "adj_list": { - "A": ["B"] - }, - "topic_map": { - "topic1": ["B"] - }, - "messages": [ - { - "topics": ["topic1"], - "data": "Alex is tall", - "node_id": "B" - }, - { - "topics": ["topic1"], - "data": "foo", - "node_id": "A" - } - ] - } - await perform_test_from_obj(test_obj) - -@pytest.mark.asyncio -async def test_seven_nodes_tree_one_topics_test_obj(): - test_obj = { - "supported_protocols": ["/floodsub/1.0.0"], - "adj_list": { - "1": ["2", "3"], - "2": ["4", "5"], - "3": ["6", "7"] - }, - "topic_map": { - "astrophysics": ["2", "3", "4", "5", "6", "7"] - }, - "messages": [ - { - "topics": ["astrophysics"], - "data": "e=mc^2", - "node_id": "1" - } - ] - } - await perform_test_from_obj(test_obj) - -@pytest.mark.asyncio -async def test_seven_nodes_tree_three_topics_test_obj(): - test_obj = { - "supported_protocols": ["/floodsub/1.0.0"], - "adj_list": { - "1": ["2", "3"], - "2": ["4", "5"], - "3": ["6", "7"] - }, - "topic_map": { - "astrophysics": ["2", "3", "4", "5", "6", "7"], - "space": ["2", "3", "4", "5", "6", "7"], - "onions": ["2", "3", "4", "5", "6", "7"] - }, - "messages": [ - { - "topics": ["astrophysics"], - "data": "e=mc^2", - "node_id": "1" - }, - { - "topics": ["space"], - "data": "foobar", - "node_id": "1" - }, - { - "topics": ["onions"], - "data": "I am allergic", - "node_id": "1" - } - ] - } - await perform_test_from_obj(test_obj) - -@pytest.mark.asyncio -async def test_seven_nodes_tree_three_topics_diff_origin_test_obj(): - test_obj = { - "supported_protocols": ["/floodsub/1.0.0"], - "adj_list": { - "1": ["2", "3"], - "2": ["4", "5"], - "3": ["6", "7"] - }, - "topic_map": { - "astrophysics": ["1", "2", "3", "4", "5", "6", "7"], - "space": ["1", "2", "3", "4", "5", "6", "7"], - "onions": ["1", "2", "3", "4", "5", "6", "7"] - }, - "messages": [ - { - "topics": ["astrophysics"], - "data": "e=mc^2", - "node_id": "1" - }, - { - "topics": ["space"], - "data": "foobar", - "node_id": "4" - }, - { - "topics": ["onions"], - "data": "I am allergic", - "node_id": "7" - } - ] - } - await perform_test_from_obj(test_obj) - -@pytest.mark.asyncio -async def test_three_nodes_clique_two_topic_diff_origin_test_obj(): - test_obj = { - "supported_protocols": ["/floodsub/1.0.0"], - "adj_list": { - "1": ["2", "3"], - "2": ["3"] - }, - "topic_map": { - "astrophysics": ["1", "2", "3"], - "school": ["1", "2", "3"] - }, - "messages": [ - { - "topics": ["astrophysics"], - "data": "e=mc^2", - "node_id": "1" - }, - { - "topics": ["school"], - "data": "foobar", - "node_id": "2" - }, - { - "topics": ["astrophysics"], - "data": "I am allergic", - "node_id": "1" - } - ] - } - await perform_test_from_obj(test_obj) - -@pytest.mark.asyncio -async def test_four_nodes_clique_two_topic_diff_origin_many_msgs_test_obj(): - test_obj = { - "supported_protocols": ["/floodsub/1.0.0"], - "adj_list": { - "1": ["2", "3", "4"], - "2": ["1", "3", "4"], - "3": ["1", "2", "4"], - "4": ["1", "2", "3"] - }, - "topic_map": { - "astrophysics": ["1", "2", "3", "4"], - "school": ["1", "2", "3", "4"] - }, - "messages": [ - { - "topics": ["astrophysics"], - "data": "e=mc^2", - "node_id": "1" - }, - { - "topics": ["school"], - "data": "foobar", - "node_id": "2" - }, - { - "topics": ["astrophysics"], - "data": "I am allergic", - "node_id": "1" - }, - { - "topics": ["school"], - "data": "foobar2", - "node_id": "2" - }, - { - "topics": ["astrophysics"], - "data": "I am allergic2", - "node_id": "1" - }, - { - "topics": ["school"], - "data": "foobar3", - "node_id": "2" - }, - { - "topics": ["astrophysics"], - "data": "I am allergic3", - "node_id": "1" - } - ] - } - await perform_test_from_obj(test_obj) - -@pytest.mark.asyncio -async def test_five_nodes_ring_two_topic_diff_origin_many_msgs_test_obj(): - test_obj = { - "supported_protocols": ["/floodsub/1.0.0"], - "adj_list": { - "1": ["2"], - "2": ["3"], - "3": ["4"], - "4": ["5"], - "5": ["1"] - }, - "topic_map": { - "astrophysics": ["1", "2", "3", "4", "5"], - "school": ["1", "2", "3", "4", "5"] - }, - "messages": [ - { - "topics": ["astrophysics"], - "data": "e=mc^2", - "node_id": "1" - }, - { - "topics": ["school"], - "data": "foobar", - "node_id": "2" - }, - { - "topics": ["astrophysics"], - "data": "I am allergic", - "node_id": "1" - }, - { - "topics": ["school"], - "data": "foobar2", - "node_id": "2" - }, - { - "topics": ["astrophysics"], - "data": "I am allergic2", - "node_id": "1" - }, - { - "topics": ["school"], - "data": "foobar3", - "node_id": "2" - }, - { - "topics": ["astrophysics"], - "data": "I am allergic3", - "node_id": "1" - } - ] - } - await perform_test_from_obj(test_obj) +async def test_gossipsub_run_with_floodsub_tests(test_case_obj): + await perform_test_from_obj( + test_case_obj, + functools.partial( + GossipSub, + degree=3, + degree_low=2, + degree_high=4, + time_to_live=30, + ) + ) diff --git a/tests/pubsub/utils.py b/tests/pubsub/utils.py index bb49a2f8..9bfc57e2 100644 --- a/tests/pubsub/utils.py +++ b/tests/pubsub/utils.py @@ -1,11 +1,12 @@ import asyncio -import multiaddr -import uuid import random import struct from typing import ( Sequence, ) +import uuid + +import multiaddr from libp2p import new_node from libp2p.pubsub.pb import rpc_pb2 @@ -93,8 +94,17 @@ async def create_libp2p_hosts(num_hosts): return hosts -def create_pubsub_and_gossipsub_instances(libp2p_hosts, supported_protocols, degree, degree_low, \ - degree_high, time_to_live, gossip_window, gossip_history, heartbeat_interval): + +def create_pubsub_and_gossipsub_instances( + libp2p_hosts, + supported_protocols, + degree, + degree_low, + degree_high, + time_to_live, + gossip_window, + gossip_history, + heartbeat_interval): pubsubs = [] gossipsubs = [] for node in libp2p_hosts: From 766d8ba1e10c1379fadaae7bf9f9c48aaa5b4e4d Mon Sep 17 00:00:00 2001 From: mhchia Date: Sat, 27 Jul 2019 12:06:36 +0800 Subject: [PATCH 11/17] A little bit clean up --- libp2p/pubsub/gossipsub.py | 14 ++++++-------- libp2p/pubsub/pubsub.py | 3 --- 2 files changed, 6 insertions(+), 11 deletions(-) diff --git a/libp2p/pubsub/gossipsub.py b/libp2p/pubsub/gossipsub.py index b65462dd..d394e95e 100644 --- a/libp2p/pubsub/gossipsub.py +++ b/libp2p/pubsub/gossipsub.py @@ -208,13 +208,11 @@ class GossipSub(IPubsubRouter): # in the fanout for a topic (or the topic is not in the fanout). # Selects the remaining number of peers (D-x) from peers.gossipsub[topic]. if topic in self.pubsub.peer_topics: - gossipsub_peers_in_topic = [peer for peer in self.pubsub.peer_topics[topic] - if peer in self.peers_gossipsub] - selected_peers = \ - GossipSub.select_from_minus(self.degree - fanout_size, - gossipsub_peers_in_topic, - fanout_peers) - + selected_peers = self._get_peers_from_minus( + topic, + self.degree - fanout_size, + fanout_peers, + ) # Combine fanout peers with selected peers fanout_peers += selected_peers @@ -295,7 +293,7 @@ class GossipSub(IPubsubRouter): selected_peers = GossipSub.select_from_minus( self.degree - num_mesh_peers_in_topic, gossipsub_peers_in_topic, - self.mesh[topic] + self.mesh[topic], ) fanout_peers_not_in_mesh = [ diff --git a/libp2p/pubsub/pubsub.py b/libp2p/pubsub/pubsub.py index 560b8c2c..7c33d0b9 100644 --- a/libp2p/pubsub/pubsub.py +++ b/libp2p/pubsub/pubsub.py @@ -313,9 +313,6 @@ class Pubsub: # Write message to stream await stream.write(rpc_msg) - def list_peers(self, topic_id: str) -> Tuple[ID, ...]: - return - async def publish(self, topic_id: str, data: bytes) -> None: """ Publish data to a topic From c252c620099e7edbac12d5994ac42731b85a9011 Mon Sep 17 00:00:00 2001 From: Kevin Mai-Husan Chia Date: Sun, 28 Jul 2019 16:05:29 +0800 Subject: [PATCH 12/17] Update libp2p/pubsub/pubsub.py Co-Authored-By: NIC Lin --- libp2p/pubsub/pubsub.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libp2p/pubsub/pubsub.py b/libp2p/pubsub/pubsub.py index 7c33d0b9..7711b080 100644 --- a/libp2p/pubsub/pubsub.py +++ b/libp2p/pubsub/pubsub.py @@ -322,7 +322,7 @@ class Pubsub: msg = rpc_pb2.Message( data=data, topicIDs=[topic_id], - # Origin is myself. + # Origin is ourself. from_id=self.host.get_id().to_bytes(), seqno=self._next_seqno(), ) From ffb39204685445d43aecb38dfce42cdfac6d0a18 Mon Sep 17 00:00:00 2001 From: Kevin Mai-Husan Chia Date: Sun, 28 Jul 2019 16:06:03 +0800 Subject: [PATCH 13/17] Update libp2p/pubsub/floodsub.py Co-Authored-By: NIC Lin --- libp2p/pubsub/floodsub.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libp2p/pubsub/floodsub.py b/libp2p/pubsub/floodsub.py index 40aafc5b..7d2b14de 100644 --- a/libp2p/pubsub/floodsub.py +++ b/libp2p/pubsub/floodsub.py @@ -102,7 +102,7 @@ class FloodSub(IPubsubRouter): origin: ID) -> Iterable[ID]: """ Get the eligible peers to send the data to. - :param src: the peer id of the peer who forwards the message to me. + :param src: peer ID of the peer who forwards the message to us :param origin: the peer id of the peer who originally broadcast the message. :return: a generator of the peer ids who we send data to. """ From a1e20caebefc4bc31b76273b4019f487b2d1fed4 Mon Sep 17 00:00:00 2001 From: Kevin Mai-Husan Chia Date: Sun, 28 Jul 2019 16:07:11 +0800 Subject: [PATCH 14/17] Update libp2p/pubsub/floodsub.py Co-Authored-By: NIC Lin --- libp2p/pubsub/floodsub.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libp2p/pubsub/floodsub.py b/libp2p/pubsub/floodsub.py index 7d2b14de..0b26f389 100644 --- a/libp2p/pubsub/floodsub.py +++ b/libp2p/pubsub/floodsub.py @@ -62,7 +62,7 @@ class FloodSub(IPubsubRouter): so that seen messages are not further forwarded. It also never forwards a message back to the source or the peer that forwarded the message. - :param src: the peer id of the peer who forwards the message to me. + :param src: peer ID of the peer who forwards the message to us :param pubsub_msg: pubsub message in protobuf. """ From 70c5c84f32184a248599e021074b5ae0a8a217ad Mon Sep 17 00:00:00 2001 From: Kevin Mai-Husan Chia Date: Sun, 28 Jul 2019 16:09:01 +0800 Subject: [PATCH 15/17] Update libp2p/pubsub/floodsub.py Co-Authored-By: NIC Lin --- libp2p/pubsub/floodsub.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libp2p/pubsub/floodsub.py b/libp2p/pubsub/floodsub.py index 0b26f389..60e77f0d 100644 --- a/libp2p/pubsub/floodsub.py +++ b/libp2p/pubsub/floodsub.py @@ -103,7 +103,7 @@ class FloodSub(IPubsubRouter): """ Get the eligible peers to send the data to. :param src: peer ID of the peer who forwards the message to us - :param origin: the peer id of the peer who originally broadcast the message. + :param origin: peer id of the peer the message originate from :return: a generator of the peer ids who we send data to. """ for topic in topic_ids: From 74d831d4e23e3df6223a7ce9c9d1406c31602e8f Mon Sep 17 00:00:00 2001 From: mhchia Date: Sun, 28 Jul 2019 18:06:38 +0800 Subject: [PATCH 16/17] Reflect PR feedback --- libp2p/pubsub/floodsub.py | 4 +-- libp2p/pubsub/gossipsub.py | 61 +++++++++++++++++++------------------- 2 files changed, 33 insertions(+), 32 deletions(-) diff --git a/libp2p/pubsub/floodsub.py b/libp2p/pubsub/floodsub.py index 60e77f0d..162ce30a 100644 --- a/libp2p/pubsub/floodsub.py +++ b/libp2p/pubsub/floodsub.py @@ -102,8 +102,8 @@ class FloodSub(IPubsubRouter): origin: ID) -> Iterable[ID]: """ Get the eligible peers to send the data to. - :param src: peer ID of the peer who forwards the message to us - :param origin: peer id of the peer the message originate from + :param src: peer ID of the peer who forwards the message to us. + :param origin: peer id of the peer the message originate from. :return: a generator of the peer ids who we send data to. """ for topic in topic_ids: diff --git a/libp2p/pubsub/gossipsub.py b/libp2p/pubsub/gossipsub.py index d394e95e..20e21a9e 100644 --- a/libp2p/pubsub/gossipsub.py +++ b/libp2p/pubsub/gossipsub.py @@ -149,21 +149,21 @@ class GossipSub(IPubsubRouter): """ Get the eligible peers to send the data to. :param src: the peer id of the peer who forwards the message to me. - :param origin: the peer id of the peer who originally broadcast the message. + :param origin: peer id of the peer the message originate from. :return: a generator of the peer ids who we send data to. """ - to_send: MutableSet[ID] = set() + send_to: MutableSet[ID] = set() for topic in topic_ids: if topic not in self.pubsub.peer_topics: continue # floodsub peers for peer_id_str in self.pubsub.peer_topics[topic]: - peer_id = id_b58_decode(peer_id_str) # FIXME: `gossipsub.peers_floodsub` can be changed to `gossipsub.peers` in go. # This will improve the efficiency when searching for a peer's protocol id. if peer_id_str in self.peers_floodsub: - to_send.add(peer_id) + peer_id = id_b58_decode(peer_id_str) + send_to.add(peer_id) # gossipsub peers # FIXME: Change `str` to `ID` @@ -180,12 +180,16 @@ class GossipSub(IPubsubRouter): # pylint: disable=len-as-condition if (topic not in self.fanout) or (len(self.fanout[topic]) == 0): # If no peers in fanout, choose some peers from gossipsub peers in topic. - self.fanout[topic] = self._get_peers_from_minus(topic, self.degree, []) + self.fanout[topic] = self._get_in_topic_gossipsub_peers_from_minus( + topic, + self.degree, + [], + ) gossipsub_peers = self.fanout[topic] for peer_id_str in gossipsub_peers: - to_send.add(id_b58_decode(peer_id_str)) + send_to.add(id_b58_decode(peer_id_str)) # Excludes `src` and `origin` - yield from to_send.difference([src, origin]) + yield from send_to.difference([src, origin]) async def join(self, topic): # Note: the comments here are the near-exact algorithm description from the spec @@ -208,7 +212,7 @@ class GossipSub(IPubsubRouter): # in the fanout for a topic (or the topic is not in the fanout). # Selects the remaining number of peers (D-x) from peers.gossipsub[topic]. if topic in self.pubsub.peer_topics: - selected_peers = self._get_peers_from_minus( + selected_peers = self._get_in_topic_gossipsub_peers_from_minus( topic, self.degree - fanout_size, fanout_peers, @@ -286,13 +290,10 @@ class GossipSub(IPubsubRouter): num_mesh_peers_in_topic = len(self.mesh[topic]) if num_mesh_peers_in_topic < self.degree_low: - gossipsub_peers_in_topic = [peer for peer in self.pubsub.peer_topics[topic] - if peer in self.peers_gossipsub] - # Select D - |mesh[topic]| peers from peers.gossipsub[topic] - mesh[topic] - selected_peers = GossipSub.select_from_minus( + selected_peers = self._get_in_topic_gossipsub_peers_from_minus( + topic, self.degree - num_mesh_peers_in_topic, - gossipsub_peers_in_topic, self.mesh[topic], ) @@ -334,12 +335,11 @@ class GossipSub(IPubsubRouter): # If |fanout[topic]| < D if num_fanout_peers_in_topic < self.degree: # Select D - |fanout[topic]| peers from peers.gossipsub[topic] - fanout[topic] - gossipsub_peers_in_topic = [peer for peer in self.pubsub.peer_topics[topic] - if peer in self.peers_gossipsub] - selected_peers = \ - GossipSub.select_from_minus(self.degree - num_fanout_peers_in_topic, - gossipsub_peers_in_topic, self.fanout[topic]) - + selected_peers = self._get_in_topic_gossipsub_peers_from_minus( + topic, + self.degree - num_fanout_peers_in_topic, + self.fanout[topic], + ) # Add the peers to fanout[topic] self.fanout[topic].extend(selected_peers) @@ -351,12 +351,12 @@ class GossipSub(IPubsubRouter): # TODO: Make more efficient, possibly using a generator? # Get all pubsub peers in a topic and only add them if they are gossipsub peers too if topic in self.pubsub.peer_topics: - gossipsub_peers_in_topic = [peer for peer in self.pubsub.peer_topics[topic] - if peer in self.peers_gossipsub] - # Select D peers from peers.gossipsub[topic] - peers_to_emit_ihave_to = \ - GossipSub.select_from_minus(self.degree, gossipsub_peers_in_topic, []) + peers_to_emit_ihave_to = self._get_in_topic_gossipsub_peers_from_minus( + topic, + self.degree, + [], + ) for peer in peers_to_emit_ihave_to: # TODO: this line is a monster, can hopefully be simplified @@ -365,6 +365,7 @@ class GossipSub(IPubsubRouter): msg_ids = [str(msg) for msg in msg_ids] await self.emit_ihave(topic, msg_ids, peer) + # TODO: Refactor and Dedup. This section is the roughly the same as the above. # Do the same for fanout, for all topics not already hit in mesh for topic in self.fanout: if topic not in self.mesh: @@ -373,12 +374,12 @@ class GossipSub(IPubsubRouter): # TODO: Make more efficient, possibly using a generator? # Get all pubsub peers in topic and only add if they are gossipsub peers also if topic in self.pubsub.peer_topics: - gossipsub_peers_in_topic = [peer for peer in self.pubsub.peer_topics[topic] - if peer in self.peers_gossipsub] - # Select D peers from peers.gossipsub[topic] - peers_to_emit_ihave_to = \ - GossipSub.select_from_minus(self.degree, gossipsub_peers_in_topic, []) + peers_to_emit_ihave_to = self._get_in_topic_gossipsub_peers_from_minus( + topic, + self.degree, + [], + ) for peer in peers_to_emit_ihave_to: if peer not in self.mesh[topic] and peer not in self.fanout[topic]: @@ -414,7 +415,7 @@ class GossipSub(IPubsubRouter): return selection - def _get_peers_from_minus( + def _get_in_topic_gossipsub_peers_from_minus( self, topic: str, num_to_select: int, From f02d38c0ee5e532261dcfdcabe770e04928cf517 Mon Sep 17 00:00:00 2001 From: mhchia Date: Mon, 29 Jul 2019 12:09:35 +0800 Subject: [PATCH 17/17] Reflect PR feedback * Rename `src` to `msg_forwarder` in pubsub/floodsub/gossipsub * Rename Variables * Sort imports * Clean up --- libp2p/pubsub/floodsub.py | 12 ++++----- libp2p/pubsub/gossipsub.py | 20 +++++++------- libp2p/pubsub/pubsub.py | 8 +++--- libp2p/pubsub/pubsub_router_interface.py | 4 +-- tests/pubsub/configs.py | 2 +- tests/pubsub/dummy_account_node.py | 7 ++--- .../floodsub_integration_test_settings.py | 26 +++++++++---------- tests/pubsub/test_dummyaccount_demo.py | 8 +----- tests/pubsub/test_gossipsub.py | 5 ++-- tests/pubsub/test_mcache.py | 5 +++- tests/pubsub/test_subscription.py | 5 +++- tests/pubsub/utils.py | 7 ++--- 12 files changed, 54 insertions(+), 55 deletions(-) diff --git a/libp2p/pubsub/floodsub.py b/libp2p/pubsub/floodsub.py index 162ce30a..04040213 100644 --- a/libp2p/pubsub/floodsub.py +++ b/libp2p/pubsub/floodsub.py @@ -51,7 +51,7 @@ class FloodSub(IPubsubRouter): :param rpc: rpc message """ - async def publish(self, src: ID, pubsub_msg: rpc_pb2.Message) -> None: + async def publish(self, msg_forwarder: ID, pubsub_msg: rpc_pb2.Message) -> None: """ Invoked to forward a new message that has been validated. This is where the "flooding" part of floodsub happens @@ -62,13 +62,13 @@ class FloodSub(IPubsubRouter): so that seen messages are not further forwarded. It also never forwards a message back to the source or the peer that forwarded the message. - :param src: peer ID of the peer who forwards the message to us + :param msg_forwarder: peer ID of the peer who forwards the message to us :param pubsub_msg: pubsub message in protobuf. """ peers_gen = self._get_peers_to_send( pubsub_msg.topicIDs, - src=src, + msg_forwarder=msg_forwarder, origin=ID(pubsub_msg.from_id), ) rpc_msg = rpc_pb2.RPC( @@ -98,11 +98,11 @@ class FloodSub(IPubsubRouter): def _get_peers_to_send( self, topic_ids: Iterable[str], - src: ID, + msg_forwarder: ID, origin: ID) -> Iterable[ID]: """ Get the eligible peers to send the data to. - :param src: peer ID of the peer who forwards the message to us. + :param msg_forwarder: peer ID of the peer who forwards the message to us. :param origin: peer id of the peer the message originate from. :return: a generator of the peer ids who we send data to. """ @@ -111,7 +111,7 @@ class FloodSub(IPubsubRouter): continue for peer_id_str in self.pubsub.peer_topics[topic]: peer_id = id_b58_decode(peer_id_str) - if peer_id in (src, origin): + if peer_id in (msg_forwarder, origin): continue # FIXME: Should change `self.pubsub.peers` to Dict[PeerID, ...] if str(peer_id) not in self.pubsub.peers: diff --git a/libp2p/pubsub/gossipsub.py b/libp2p/pubsub/gossipsub.py index 20e21a9e..0af5e5ff 100644 --- a/libp2p/pubsub/gossipsub.py +++ b/libp2p/pubsub/gossipsub.py @@ -119,7 +119,7 @@ class GossipSub(IPubsubRouter): for prune in control_message.prune: await self.handle_prune(prune, sender_peer_id) - async def publish(self, src: ID, pubsub_msg: rpc_pb2.Message) -> None: + async def publish(self, msg_forwarder: ID, pubsub_msg: rpc_pb2.Message) -> None: # pylint: disable=too-many-locals """ Invoked to forward a new message that has been validated. @@ -128,7 +128,7 @@ class GossipSub(IPubsubRouter): peers_gen = self._get_peers_to_send( pubsub_msg.topicIDs, - src=src, + msg_forwarder=msg_forwarder, origin=ID(pubsub_msg.from_id), ) rpc_msg = rpc_pb2.RPC( @@ -144,11 +144,11 @@ class GossipSub(IPubsubRouter): def _get_peers_to_send( self, topic_ids: Iterable[str], - src: ID, + msg_forwarder: ID, origin: ID) -> Iterable[ID]: """ Get the eligible peers to send the data to. - :param src: the peer id of the peer who forwards the message to me. + :param msg_forwarder: the peer id of the peer who forwards the message to me. :param origin: peer id of the peer the message originate from. :return: a generator of the peer ids who we send data to. """ @@ -167,10 +167,10 @@ class GossipSub(IPubsubRouter): # gossipsub peers # FIXME: Change `str` to `ID` - gossipsub_peers: List[str] = None + in_topic_gossipsub_peers: List[str] = None # TODO: Do we need to check `topic in self.pubsub.my_topics`? if topic in self.mesh: - gossipsub_peers = self.mesh[topic] + in_topic_gossipsub_peers = self.mesh[topic] else: # TODO(robzajac): Is topic DEFINITELY supposed to be in fanout if we are not # subscribed? @@ -185,11 +185,11 @@ class GossipSub(IPubsubRouter): self.degree, [], ) - gossipsub_peers = self.fanout[topic] - for peer_id_str in gossipsub_peers: + in_topic_gossipsub_peers = self.fanout[topic] + for peer_id_str in in_topic_gossipsub_peers: send_to.add(id_b58_decode(peer_id_str)) - # Excludes `src` and `origin` - yield from send_to.difference([src, origin]) + # Excludes `msg_forwarder` and `origin` + yield from send_to.difference([msg_forwarder, origin]) async def join(self, topic): # Note: the comments here are the near-exact algorithm description from the spec diff --git a/libp2p/pubsub/pubsub.py b/libp2p/pubsub/pubsub.py index 7711b080..2af4e245 100644 --- a/libp2p/pubsub/pubsub.py +++ b/libp2p/pubsub/pubsub.py @@ -142,7 +142,7 @@ class Pubsub: continue # TODO(mhchia): This will block this read_stream loop until all data are pushed. # Should investigate further if this is an issue. - await self.push_msg(src=peer_id, msg=msg) + await self.push_msg(msg_forwarder=peer_id, msg=msg) if rpc_incoming.subscriptions: # deal with RPC.subscriptions @@ -331,10 +331,10 @@ class Pubsub: await self.push_msg(self.host.get_id(), msg) - async def push_msg(self, src: ID, msg: rpc_pb2.Message) -> None: + async def push_msg(self, msg_forwarder: ID, msg: rpc_pb2.Message) -> None: """ Push a pubsub message to others. - :param src: the peer who forward us the message. + :param msg_forwarder: the peer who forward us the message. :param msg: the message we are going to push out. """ # TODO: - Check if the `source` is in the blacklist. If yes, reject. @@ -350,7 +350,7 @@ class Pubsub: self._mark_msg_seen(msg) await self.handle_talk(msg) - await self.router.publish(src, msg) + await self.router.publish(msg_forwarder, msg) def _next_seqno(self) -> bytes: """ diff --git a/libp2p/pubsub/pubsub_router_interface.py b/libp2p/pubsub/pubsub_router_interface.py index 6f69b9b0..8819e5f0 100644 --- a/libp2p/pubsub/pubsub_router_interface.py +++ b/libp2p/pubsub/pubsub_router_interface.py @@ -42,10 +42,10 @@ class IPubsubRouter(ABC): """ @abstractmethod - async def publish(self, src, pubsub_msg) -> None: + async def publish(self, msg_forwarder, pubsub_msg): """ Invoked to forward a new message that has been validated - :param src: peer_id of message sender + :param msg_forwarder: peer_id of message sender :param pubsub_msg: pubsub message to forward """ diff --git a/tests/pubsub/configs.py b/tests/pubsub/configs.py index d6849f7c..99295e47 100644 --- a/tests/pubsub/configs.py +++ b/tests/pubsub/configs.py @@ -2,6 +2,6 @@ import multiaddr FLOODSUB_PROTOCOL_ID = "/floodsub/1.0.0" -SUPPORTED_PROTOCOLS = [FLOODSUB_PROTOCOL_ID] +GOSSIPSUB_PROTOCOL_ID = "/gossipsub/1.0.0" LISTEN_MADDR = multiaddr.Multiaddr("/ip4/127.0.0.1/tcp/0") diff --git a/tests/pubsub/dummy_account_node.py b/tests/pubsub/dummy_account_node.py index f0a95f87..42d17ca6 100644 --- a/tests/pubsub/dummy_account_node.py +++ b/tests/pubsub/dummy_account_node.py @@ -5,13 +5,14 @@ import multiaddr from libp2p import new_node from libp2p.host.host_interface import IHost -from libp2p.pubsub.pubsub import Pubsub from libp2p.pubsub.floodsub import FloodSub +from libp2p.pubsub.pubsub import Pubsub -from .utils import message_id_generator, generate_RPC_packet +from .configs import FLOODSUB_PROTOCOL_ID +from .utils import message_id_generator -SUPPORTED_PUBSUB_PROTOCOLS = ["/floodsub/1.0.0"] +SUPPORTED_PUBSUB_PROTOCOLS = [FLOODSUB_PROTOCOL_ID] CRYPTO_TOPIC = "ethereum" # Message format: diff --git a/tests/pubsub/floodsub_integration_test_settings.py b/tests/pubsub/floodsub_integration_test_settings.py index eac62baf..77d96a71 100644 --- a/tests/pubsub/floodsub_integration_test_settings.py +++ b/tests/pubsub/floodsub_integration_test_settings.py @@ -2,8 +2,6 @@ import asyncio import pytest -import multiaddr - from libp2p import new_node from libp2p.peer.id import ID from libp2p.pubsub.pubsub import Pubsub @@ -13,8 +11,12 @@ from tests.utils import ( connect, ) +from .configs import ( + FLOODSUB_PROTOCOL_ID, + LISTEN_MADDR, +) + -FLOODSUB_PROTOCOL_ID = "/floodsub/1.0.0" SUPPORTED_PROTOCOLS = [FLOODSUB_PROTOCOL_ID] FLOODSUB_PROTOCOL_TEST_CASES = [ @@ -349,7 +351,6 @@ async def perform_test_from_obj(obj, router_factory): or B as a neighbor of A once. Do NOT list both A: ["B"] and B:["A"] as the behavior is undefined (even if it may work) """ - listen_maddr = multiaddr.Multiaddr("/ip4/127.0.0.1/tcp/0") # Step 1) Create graph adj_list = obj["adj_list"] @@ -357,8 +358,8 @@ async def perform_test_from_obj(obj, router_factory): pubsub_map = {} async def add_node(node_id: str) -> None: - node = await new_node(transport_opt=[str(listen_maddr)]) - await node.get_network().listen(listen_maddr) + node = await new_node(transport_opt=[str(LISTEN_MADDR)]) + await node.get_network().listen(LISTEN_MADDR) node_map[node_id] = node pubsub_router = router_factory(protocols=obj["supported_protocols"]) pubsub = Pubsub(node, pubsub_router, ID(node_id.encode())) @@ -417,7 +418,7 @@ async def perform_test_from_obj(obj, router_factory): node_id = msg["node_id"] # Publish message - # FIXME: Should be single RPC package with several topics + # TODO: Should be single RPC package with several topics for topic in topics: tasks_publish.append( pubsub_map[node_id].publish( @@ -426,23 +427,22 @@ async def perform_test_from_obj(obj, router_factory): ) ) - # For each topic in topics, add topic, msg_talk tuple to ordered test list - # TODO: Update message sender to be correct message sender before - # adding msg_talk to this list + # For each topic in topics, add (topic, node_id, data) tuple to ordered test list for topic in topics: - topics_in_msgs_ordered.append((topic, data)) + topics_in_msgs_ordered.append((topic, node_id, data)) # Allow time for publishing before continuing await asyncio.gather(*tasks_publish, asyncio.sleep(2)) # Step 4) Check that all messages were received correctly. - # TODO: Check message sender too - for topic, data in topics_in_msgs_ordered: + for topic, origin_node_id, data in topics_in_msgs_ordered: # Look at each node in each topic for node_id in topic_map[topic]: # Get message from subscription queue msg = await queues_map[node_id][topic].get() assert data == msg.data + # Check the message origin + assert node_map[origin_node_id].get_id().to_bytes() == msg.from_id # Success, terminate pending tasks. await cleanup() diff --git a/tests/pubsub/test_dummyaccount_demo.py b/tests/pubsub/test_dummyaccount_demo.py index b1c4a64a..1efa579f 100644 --- a/tests/pubsub/test_dummyaccount_demo.py +++ b/tests/pubsub/test_dummyaccount_demo.py @@ -1,19 +1,13 @@ import asyncio from threading import Thread -import multiaddr - import pytest -from libp2p import new_node -from libp2p.peer.peerinfo import info_from_p2p_addr -from libp2p.pubsub.pubsub import Pubsub -from libp2p.pubsub.floodsub import FloodSub - from tests.utils import ( cleanup, connect, ) + from .dummy_account_node import DummyAccountNode # pylint: disable=too-many-locals diff --git a/tests/pubsub/test_gossipsub.py b/tests/pubsub/test_gossipsub.py index 7f4c81ca..e0fed92d 100644 --- a/tests/pubsub/test_gossipsub.py +++ b/tests/pubsub/test_gossipsub.py @@ -8,6 +8,7 @@ from tests.utils import ( connect, ) +from .configs import GOSSIPSUB_PROTOCOL_ID from .utils import ( create_libp2p_hosts, create_pubsub_and_gossipsub_instances, @@ -16,7 +17,7 @@ from .utils import ( ) -SUPPORTED_PROTOCOLS = ["/gossipsub/1.0.0"] +SUPPORTED_PROTOCOLS = [GOSSIPSUB_PROTOCOL_ID] @pytest.mark.asyncio @@ -51,7 +52,7 @@ async def test_join(): # Central node publish to the topic so that this topic # is added to central node's fanout # publish from the randomly chosen host - await pubsubs[central_node_index].publish(topic, b"") + await pubsubs[central_node_index].publish(topic, b"data") # Check that the gossipsub of central node has fanout for the topic assert topic in gossipsubs[central_node_index].fanout diff --git a/tests/pubsub/test_mcache.py b/tests/pubsub/test_mcache.py index 5446f74c..0e73222c 100644 --- a/tests/pubsub/test_mcache.py +++ b/tests/pubsub/test_mcache.py @@ -1,14 +1,17 @@ import pytest + from libp2p.pubsub.mcache import MessageCache class Msg: def __init__(self, topicIDs, seqno, from_id): + # pylint: disable=invalid-name self.topicIDs = topicIDs - self.seqno = seqno, + self.seqno = seqno self.from_id = from_id + @pytest.mark.asyncio async def test_mcache(): # Ported from: diff --git a/tests/pubsub/test_subscription.py b/tests/pubsub/test_subscription.py index c8f46f56..0dfdc4dd 100644 --- a/tests/pubsub/test_subscription.py +++ b/tests/pubsub/test_subscription.py @@ -5,7 +5,10 @@ from libp2p import new_node from libp2p.pubsub.pubsub import Pubsub from libp2p.pubsub.floodsub import FloodSub -SUPPORTED_PUBSUB_PROTOCOLS = ["/floodsub/1.0.0"] +from .configs import FLOODSUB_PROTOCOL_ID + + +SUPPORTED_PUBSUB_PROTOCOLS = [FLOODSUB_PROTOCOL_ID] TESTING_TOPIC = "TEST_SUBSCRIBE" diff --git a/tests/pubsub/utils.py b/tests/pubsub/utils.py index 9bfc57e2..93fc2356 100644 --- a/tests/pubsub/utils.py +++ b/tests/pubsub/utils.py @@ -1,19 +1,16 @@ import asyncio -import random import struct from typing import ( Sequence, ) -import uuid import multiaddr from libp2p import new_node -from libp2p.pubsub.pb import rpc_pb2 -from libp2p.peer.peerinfo import info_from_p2p_addr from libp2p.peer.id import ID -from libp2p.pubsub.pubsub import Pubsub from libp2p.pubsub.gossipsub import GossipSub +from libp2p.pubsub.pb import rpc_pb2 +from libp2p.pubsub.pubsub import Pubsub from tests.utils import connect