The Gossipsub PR (#162)

* Add handle_rpc call to pubsub

* Scaffold gossipsub functions

* Add timer

* Implement most of mesh construction

* Implement emit and handle

* Implement fanout heartbeat

* Refactor emit

* some gossipsub cleanup and test

* minor lint stuff, more to come

* Implement publish

* Fix comment

* Modify pubsub/gossipsub so that floodsub tests pass using gossipsub router

* Add floodsub tests to gossipsub

* Handle case where select_from_minus, num_to_select > size(pool-minus)

* Add topic membership

* Implement handle ihave

* Implement most of iwant

* Add mcache.add and comments

* Refactor handle_ihave

* Implement stream write in handle_iwant

* Implement gossip heartbeat

* unresolved vars

* initial mcache code

* documenting mcache

* writing test/debugging mcache

* finished mcache test and debugged

* Make gossipsub backward compatibility its own file

* remove mcache prints

* DEBUGGING

* Add sender_peer_id to handle_rpc to get gossip test passing

* Modify gossipsub to make fanout work

* fanout maintenance test

* debugging gsub GOSSIP

* DEBUGGING

* debugged sender seen cachce

* adding lru, removing prints

* pylint cleanup

* Fix github comments in PR

* minor floodsub possible bugfix
This commit is contained in:
Robert Zajac
2019-05-06 23:44:13 -04:00
committed by GitHub
parent eea6a9fda7
commit 9052e8f8bd
11 changed files with 1663 additions and 13 deletions

View File

@ -1,5 +1,6 @@
# pylint: disable=no-name-in-module
import asyncio
from lru import LRU
from .pb import rpc_pb2
@ -34,8 +35,7 @@ class Pubsub():
for protocol in self.protocols:
self.host.set_stream_handler(protocol, self.stream_handler)
# TODO: determine if these need to be asyncio queues, or if could possibly
# be ordinary blocking queues
# Use asyncio queues for proper context switching
self.incoming_msgs_from_peers = asyncio.Queue()
self.outgoing_messages = asyncio.Queue()
@ -44,9 +44,10 @@ class Pubsub():
self.cache_size = 128
else:
self.cache_size = cache_size
self.seen_messages = LRU(self.cache_size)
# Map of topics we are subscribed to to handler functions
# Map of topics we are subscribed to blocking queues
# for when the given topic receives a message
self.my_topics = {}
@ -96,6 +97,7 @@ class Pubsub():
if id_in_seen_msgs not in self.seen_messages:
should_publish = True
self.seen_messages[id_in_seen_msgs] = 1
await self.handle_talk(message)
if rpc_incoming.subscriptions:
@ -112,6 +114,10 @@ class Pubsub():
# relay message to peers with router
await self.router.publish(peer_id, incoming)
if rpc_incoming.control:
# Pass rpc to router so router could perform custom logic
await self.router.handle_rpc(rpc_incoming, peer_id)
# Force context switch
await asyncio.sleep(0)
@ -180,8 +186,9 @@ class Pubsub():
# Add peer to topic
self.peer_topics[sub_message.topicid].append(origin_id)
else:
# TODO: Remove peer from topic
pass
if sub_message.topicid in self.peer_topics:
if origin_id in self.peer_topics[sub_message.topicid]:
self.peer_topics[sub_message.topicid].remove(origin_id)
async def handle_talk(self, publish_message):
"""
@ -217,7 +224,7 @@ class Pubsub():
await self.message_all_peers(packet.SerializeToString())
# Tell router we are joining this topic
self.router.join(topic_id)
await self.router.join(topic_id)
# Return the asyncio queue for messages on this topic
return self.my_topics[topic_id]
@ -243,7 +250,7 @@ class Pubsub():
await self.message_all_peers(packet.SerializeToString())
# Tell router we are leaving this topic
self.router.leave(topic_id)
await self.router.leave(topic_id)
async def message_all_peers(self, rpc_msg):
"""