Stream rearchitecture (#126)

* Add generic protocol handler

* Add generic protocol handler to stream muxing pipeline

* Modify conn_handler to only deal with connections

* mplex accept stream architecture changes

* Add create generic protocol handler

* Fix minor bugs

* who would win 4 devs or one not

* Debugging

* rearch with handle_incoming infinite loop, seems to work, needs cleanup"

* passing linting, still needs cleanup

* fixing linting again; code still needs cleanup

* fixing tests; code still needs cleanup

* adding test cleanup and task cleanup, removing prints

* linting, and cleanup complete

* storing connections based on peer id

* remove dead code

* remove unnecessary peer_id
This commit is contained in:
Robert Zajac
2019-02-24 20:58:23 -05:00
committed by GitHub
parent 17c778de15
commit 82840b5e6c
14 changed files with 367 additions and 120 deletions

View File

@ -1,6 +1,6 @@
import asyncio
from .utils import encode_uvarint, decode_uvarint_from_stream
from .utils import encode_uvarint, decode_uvarint_from_stream, get_flag
from .mplex_stream import MplexStream
from ..muxed_connection_interface import IMuxedConn
@ -11,25 +11,28 @@ class Mplex(IMuxedConn):
reference: https://github.com/libp2p/go-mplex/blob/master/multiplex.go
"""
def __init__(self, conn):
def __init__(self, conn, generic_protocol_handler):
"""
create a new muxed connection
:param conn: an instance of raw connection
:param initiator: boolean to prevent multiplex with self
:param generic_protocol_handler: generic protocol handler
for new muxed streams
"""
super(Mplex, self).__init__(conn, generic_protocol_handler)
self.raw_conn = conn
self.initiator = conn.initiator
# Store generic protocol handler
self.generic_protocol_handler = generic_protocol_handler
# Mapping from stream ID -> buffer of messages for that stream
self.buffers = {}
self.stream_queue = asyncio.Queue()
self.data_buffer = bytearray()
# The initiator of the raw connection need not read upon construction time.
# It should read when the user decides that it wants to read from the constructed stream.
if not self.initiator:
asyncio.ensure_future(self.handle_incoming(None))
# Kick off reading
asyncio.ensure_future(self.handle_incoming())
def close(self):
"""
@ -49,39 +52,29 @@ class Mplex(IMuxedConn):
:param stream_id: stream id of stream to read from
:return: message read
"""
# Empty buffer or nonexistent stream
# TODO: propagate up timeout exception and catch
if stream_id not in self.buffers or self.buffers[stream_id].empty():
await self.handle_incoming(stream_id)
# TODO: pass down timeout from user and use that
if stream_id in self.buffers:
return await self._read_buffer_exists(stream_id)
try:
data = await asyncio.wait_for(self.buffers[stream_id].get(), timeout=3)
return data
except asyncio.TimeoutError:
return None
# Stream not created yet
return None
async def _read_buffer_exists(self, stream_id):
"""
Reads from raw connection with the assumption that the message buffer for stream_id exsits
:param stream_id: stream id of stream to read from
:return: message read
"""
try:
data = await asyncio.wait_for(self.buffers[stream_id].get(), timeout=5)
return data
except asyncio.TimeoutError:
return None
async def open_stream(self, protocol_id, peer_id, multi_addr):
async def open_stream(self, protocol_id, multi_addr):
"""
creates a new muxed_stream
:param protocol_id: protocol_id of stream
:param stream_id: stream_id of stream
:param peer_id: peer_id that stream connects to
:param multi_addr: multi_addr that stream connects to
:return: a new stream
"""
stream_id = self.raw_conn.next_stream_id()
stream = MplexStream(stream_id, multi_addr, self)
self.buffers[stream_id] = asyncio.Queue()
await self.send_message(get_flag(self.initiator, "NEW_STREAM"), None, stream_id)
return stream
async def accept_stream(self):
@ -89,11 +82,9 @@ class Mplex(IMuxedConn):
accepts a muxed stream opened by the other end
:return: the accepted stream
"""
# TODO update to pull out protocol_id from message
protocol_id = "/echo/1.0.0"
stream_id = await self.stream_queue.get()
stream = MplexStream(stream_id, False, self)
return stream, stream_id, protocol_id
asyncio.ensure_future(self.generic_protocol_handler(stream))
async def send_message(self, flag, data, stream_id):
"""
@ -126,38 +117,29 @@ class Mplex(IMuxedConn):
await self.raw_conn.writer.drain()
return len(_bytes)
async def handle_incoming(self, my_stream_id):
async def handle_incoming(self):
"""
Read a message off of the raw connection and add it to the corresponding message buffer
"""
# TODO Deal with other types of messages using flag (currently _)
continue_reading = True
i = 0
while continue_reading:
i += 1
stream_id, _, message = await self.read_message()
continue_reading = (stream_id is not None and
stream_id != my_stream_id and
my_stream_id is not None)
while True:
stream_id, flag, message = await self.read_message()
if stream_id not in self.buffers:
self.buffers[stream_id] = asyncio.Queue()
await self.stream_queue.put(stream_id)
if stream_id is not None and flag is not None and message is not None:
if stream_id not in self.buffers:
self.buffers[stream_id] = asyncio.Queue()
await self.stream_queue.put(stream_id)
await self.buffers[stream_id].put(message)
if flag is get_flag(True, "NEW_STREAM"):
# new stream detected on connection
await self.accept_stream()
async def read_chunk(self):
"""
Read a chunk of bytes off of the raw connection into data_buffer
"""
# unused now but possibly useful in the future
try:
chunk = await asyncio.wait_for(self.raw_conn.reader.read(-1), timeout=5)
self.data_buffer += chunk
except asyncio.TimeoutError:
print('timeout!')
return
if message:
await self.buffers[stream_id].put(message)
# Force context switch
await asyncio.sleep(0)
async def read_message(self):
"""
@ -167,7 +149,7 @@ class Mplex(IMuxedConn):
# Timeout is set to a relatively small value to alleviate wait time to exit
# loop in handle_incoming
timeout = .1
timeout = 0.1
try:
header = await decode_uvarint_from_stream(self.raw_conn.reader, timeout)
length = await decode_uvarint_from_stream(self.raw_conn.reader, timeout)