The Gossipsub PR (#162)

* Add handle_rpc call to pubsub

* Scaffold gossipsub functions

* Add timer

* Implement most of mesh construction

* Implement emit and handle

* Implement fanout heartbeat

* Refactor emit

* some gossipsub cleanup and test

* minor lint stuff, more to come

* Implement publish

* Fix comment

* Modify pubsub/gossipsub so that floodsub tests pass using gossipsub router

* Add floodsub tests to gossipsub

* Handle case where select_from_minus, num_to_select > size(pool-minus)

* Add topic membership

* Implement handle ihave

* Implement most of iwant

* Add mcache.add and comments

* Refactor handle_ihave

* Implement stream write in handle_iwant

* Implement gossip heartbeat

* unresolved vars

* initial mcache code

* documenting mcache

* writing test/debugging mcache

* finished mcache test and debugged

* Make gossipsub backward compatibility its own file

* remove mcache prints

* DEBUGGING

* Add sender_peer_id to handle_rpc to get gossip test passing

* Modify gossipsub to make fanout work

* fanout maintenance test

* debugging gsub GOSSIP

* DEBUGGING

* debugged sender seen cachce

* adding lru, removing prints

* pylint cleanup

* Fix github comments in PR

* minor floodsub possible bugfix
This commit is contained in:
Robert Zajac
2019-05-06 23:44:13 -04:00
committed by GitHub
parent eea6a9fda7
commit 9052e8f8bd
11 changed files with 1663 additions and 13 deletions

View File

@ -613,4 +613,4 @@ async def test_five_nodes_ring_two_topic_diff_origin_many_msgs_test_obj():
}
]
}
await perform_test_from_obj(test_obj)
await perform_test_from_obj(test_obj)

View File

@ -0,0 +1,271 @@
import asyncio
import pytest
import random
from libp2p.pubsub.gossipsub import GossipSub
from libp2p.pubsub.floodsub import FloodSub
from libp2p.pubsub.pb import rpc_pb2
from libp2p.pubsub.pubsub import Pubsub
from utils import message_id_generator, generate_RPC_packet, \
create_libp2p_hosts, create_pubsub_and_gossipsub_instances, sparse_connect, dense_connect, \
connect
from tests.utils import cleanup
SUPPORTED_PROTOCOLS = ["/gossipsub/1.0.0"]
@pytest.mark.asyncio
async def test_dense():
# Create libp2p hosts
next_msg_id_func = message_id_generator(0)
num_hosts = 10
num_msgs = 5
libp2p_hosts = await create_libp2p_hosts(num_hosts)
# Create pubsub, gossipsub instances
pubsubs, gossipsubs = create_pubsub_and_gossipsub_instances(libp2p_hosts, \
SUPPORTED_PROTOCOLS, \
10, 9, 11, 30, 3, 5, 0.5)
# All pubsub subscribe to foobar
queues = []
for pubsub in pubsubs:
q = await pubsub.subscribe("foobar")
# Add each blocking queue to an array of blocking queues
queues.append(q)
# Sparsely connect libp2p hosts in random way
await dense_connect(libp2p_hosts)
# Wait 2 seconds for heartbeat to allow mesh to connect
await asyncio.sleep(2)
for i in range(num_msgs):
msg_content = "foo " + str(i)
# randomly pick a message origin
origin_idx = random.randint(0, num_hosts - 1)
origin_host = libp2p_hosts[origin_idx]
host_id = str(origin_host.get_id())
# Generate message packet
packet = generate_RPC_packet(host_id, ["foobar"], msg_content, next_msg_id_func())
# publish from the randomly chosen host
await gossipsubs[origin_idx].publish(host_id, packet.SerializeToString())
await asyncio.sleep(0.5)
# Assert that all blocking queues receive the message
items = []
for queue in queues:
msg = await queue.get()
assert msg.data == packet.publish[0].data
items.append(msg.data)
await cleanup()
@pytest.mark.asyncio
async def test_fanout():
# Create libp2p hosts
next_msg_id_func = message_id_generator(0)
num_hosts = 10
num_msgs = 5
libp2p_hosts = await create_libp2p_hosts(num_hosts)
# Create pubsub, gossipsub instances
pubsubs, gossipsubs = create_pubsub_and_gossipsub_instances(libp2p_hosts, \
SUPPORTED_PROTOCOLS, \
10, 9, 11, 30, 3, 5, 0.5)
# All pubsub subscribe to foobar
queues = []
for i in range(1, len(pubsubs)):
q = await pubsubs[i].subscribe("foobar")
# Add each blocking queue to an array of blocking queues
queues.append(q)
# Sparsely connect libp2p hosts in random way
await dense_connect(libp2p_hosts)
# Wait 2 seconds for heartbeat to allow mesh to connect
await asyncio.sleep(2)
# Send messages with origin not subscribed
for i in range(num_msgs):
msg_content = "foo " + str(i)
# Pick the message origin to the node that is not subscribed to 'foobar'
origin_idx = 0
origin_host = libp2p_hosts[origin_idx]
host_id = str(origin_host.get_id())
# Generate message packet
packet = generate_RPC_packet(host_id, ["foobar"], msg_content, next_msg_id_func())
# publish from the randomly chosen host
await gossipsubs[origin_idx].publish(host_id, packet.SerializeToString())
await asyncio.sleep(0.5)
# Assert that all blocking queues receive the message
for queue in queues:
msg = await queue.get()
assert msg.SerializeToString() == packet.publish[0].SerializeToString()
# Subscribe message origin
queues.append(await pubsubs[0].subscribe("foobar"))
# Send messages again
for i in range(num_msgs):
msg_content = "foo " + str(i)
# Pick the message origin to the node that is not subscribed to 'foobar'
origin_idx = 0
origin_host = libp2p_hosts[origin_idx]
host_id = str(origin_host.get_id())
# Generate message packet
packet = generate_RPC_packet(host_id, ["foobar"], msg_content, next_msg_id_func())
# publish from the randomly chosen host
await gossipsubs[origin_idx].publish(host_id, packet.SerializeToString())
await asyncio.sleep(0.5)
# Assert that all blocking queues receive the message
for queue in queues:
msg = await queue.get()
assert msg.SerializeToString() == packet.publish[0].SerializeToString()
await cleanup()
@pytest.mark.asyncio
async def test_fanout_maintenance():
# Create libp2p hosts
next_msg_id_func = message_id_generator(0)
num_hosts = 10
num_msgs = 5
libp2p_hosts = await create_libp2p_hosts(num_hosts)
# Create pubsub, gossipsub instances
pubsubs, gossipsubs = create_pubsub_and_gossipsub_instances(libp2p_hosts, \
SUPPORTED_PROTOCOLS, \
10, 9, 11, 30, 3, 5, 0.5)
# All pubsub subscribe to foobar
queues = []
for i in range(1, len(pubsubs)):
q = await pubsubs[i].subscribe("foobar")
# Add each blocking queue to an array of blocking queues
queues.append(q)
# Sparsely connect libp2p hosts in random way
await dense_connect(libp2p_hosts)
# Wait 2 seconds for heartbeat to allow mesh to connect
await asyncio.sleep(2)
# Send messages with origin not subscribed
for i in range(num_msgs):
msg_content = "foo " + str(i)
# Pick the message origin to the node that is not subscribed to 'foobar'
origin_idx = 0
origin_host = libp2p_hosts[origin_idx]
host_id = str(origin_host.get_id())
# Generate message packet
packet = generate_RPC_packet(host_id, ["foobar"], msg_content, next_msg_id_func())
# publish from the randomly chosen host
await gossipsubs[origin_idx].publish(host_id, packet.SerializeToString())
await asyncio.sleep(0.5)
# Assert that all blocking queues receive the message
for queue in queues:
msg = await queue.get()
assert msg.SerializeToString() == packet.publish[0].SerializeToString()
for sub in pubsubs:
await sub.unsubscribe('foobar')
queues = []
await asyncio.sleep(2)
# Resub and repeat
for i in range(1, len(pubsubs)):
q = await pubsubs[i].subscribe("foobar")
# Add each blocking queue to an array of blocking queues
queues.append(q)
await asyncio.sleep(2)
# Check messages can still be sent
for i in range(num_msgs):
msg_content = "foo " + str(i)
# Pick the message origin to the node that is not subscribed to 'foobar'
origin_idx = 0
origin_host = libp2p_hosts[origin_idx]
host_id = str(origin_host.get_id())
# Generate message packet
packet = generate_RPC_packet(host_id, ["foobar"], msg_content, next_msg_id_func())
# publish from the randomly chosen host
await gossipsubs[origin_idx].publish(host_id, packet.SerializeToString())
await asyncio.sleep(0.5)
# Assert that all blocking queues receive the message
for queue in queues:
msg = await queue.get()
assert msg.SerializeToString() == packet.publish[0].SerializeToString()
await cleanup()
@pytest.mark.asyncio
async def test_gossip_propagation():
# Create libp2p hosts
next_msg_id_func = message_id_generator(0)
num_hosts = 2
libp2p_hosts = await create_libp2p_hosts(num_hosts)
# Create pubsub, gossipsub instances
pubsubs, gossipsubs = create_pubsub_and_gossipsub_instances(libp2p_hosts, \
SUPPORTED_PROTOCOLS, \
1, 0, 2, 30, 50, 100, 0.5)
node1, node2 = libp2p_hosts[0], libp2p_hosts[1]
sub1, sub2 = pubsubs[0], pubsubs[1]
gsub1, gsub2 = gossipsubs[0], gossipsubs[1]
node1_queue = await sub1.subscribe('foo')
# node 1 publish to topic
msg_content = 'foo_msg'
node1_id = str(node1.get_id())
# Generate message packet
packet = generate_RPC_packet(node1_id, ["foo"], msg_content, next_msg_id_func())
# publish from the randomly chosen host
await gsub1.publish(node1_id, packet.SerializeToString())
# now node 2 subscribes
node2_queue = await sub2.subscribe('foo')
await connect(node2, node1)
# wait for gossip heartbeat
await asyncio.sleep(2)
# should be able to read message
msg = await node2_queue.get()
assert msg.SerializeToString() == packet.publish[0].SerializeToString()
await cleanup()

View File

@ -0,0 +1,519 @@
import asyncio
import multiaddr
import pytest
from libp2p import new_node
from libp2p.peer.peerinfo import info_from_p2p_addr
from libp2p.pubsub.gossipsub import GossipSub
from libp2p.pubsub.floodsub import FloodSub
from libp2p.pubsub.pb import rpc_pb2
from libp2p.pubsub.pubsub import Pubsub
from utils import message_id_generator, generate_RPC_packet
from tests.utils import cleanup
# pylint: disable=too-many-locals
async def connect(node1, node2):
"""
Connect node1 to node2
"""
addr = node2.get_addrs()[0]
info = info_from_p2p_addr(addr)
await node1.connect(info)
@pytest.mark.asyncio
async def test_init():
node = await new_node(transport_opt=["/ip4/127.1/tcp/0"])
await node.get_network().listen(multiaddr.Multiaddr("/ip4/127.1/tcp/0"))
supported_protocols = ["/gossipsub/1.0.0"]
gossipsub = GossipSub(supported_protocols, 3, 2, 4, 30)
pubsub = Pubsub(node, gossipsub, "a")
# Did it work?
assert gossipsub and pubsub
await cleanup()
async def perform_test_from_obj(obj):
"""
Perform a floodsub test from a test obj.
test obj are composed as follows:
{
"supported_protocols": ["supported/protocol/1.0.0",...],
"adj_list": {
"node1": ["neighbor1_of_node1", "neighbor2_of_node1", ...],
"node2": ["neighbor1_of_node2", "neighbor2_of_node2", ...],
...
},
"topic_map": {
"topic1": ["node1_subscribed_to_topic1", "node2_subscribed_to_topic1", ...]
},
"messages": [
{
"topics": ["topic1_for_message", "topic2_for_message", ...],
"data": "some contents of the message (newlines are not supported)",
"node_id": "message sender node id"
},
...
]
}
NOTE: In adj_list, for any neighbors A and B, only list B as a neighbor of A
or B as a neighbor of A once. Do NOT list both A: ["B"] and B:["A"] as the behavior
is undefined (even if it may work)
"""
# Step 1) Create graph
adj_list = obj["adj_list"]
node_map = {}
gossipsub_map = {}
pubsub_map = {}
supported_protocols = obj["supported_protocols"]
tasks_connect = []
for start_node_id in adj_list:
# Create node if node does not yet exist
if start_node_id not in node_map:
node = await new_node(transport_opt=["/ip4/127.0.0.1/tcp/0"])
await node.get_network().listen(multiaddr.Multiaddr("/ip4/127.0.0.1/tcp/0"))
node_map[start_node_id] = node
gossipsub = GossipSub(supported_protocols, 3, 2, 4, 30)
gossipsub_map[start_node_id] = gossipsub
pubsub = Pubsub(node, gossipsub, start_node_id)
pubsub_map[start_node_id] = pubsub
# For each neighbor of start_node, create if does not yet exist,
# then connect start_node to neighbor
for neighbor_id in adj_list[start_node_id]:
# Create neighbor if neighbor does not yet exist
if neighbor_id not in node_map:
neighbor_node = await new_node(transport_opt=["/ip4/127.0.0.1/tcp/0"])
await neighbor_node.get_network().listen(multiaddr.Multiaddr("/ip4/127.0.0.1/tcp/0"))
node_map[neighbor_id] = neighbor_node
gossipsub = GossipSub(supported_protocols, 3, 2, 4, 30)
gossipsub_map[neighbor_id] = gossipsub
pubsub = Pubsub(neighbor_node, gossipsub, neighbor_id)
pubsub_map[neighbor_id] = pubsub
# Connect node and neighbor
tasks_connect.append(asyncio.ensure_future(connect(node_map[start_node_id], node_map[neighbor_id])))
tasks_connect.append(asyncio.sleep(2))
await asyncio.gather(*tasks_connect)
# Allow time for graph creation before continuing
# await asyncio.sleep(0.25)
# Step 2) Subscribe to topics
queues_map = {}
topic_map = obj["topic_map"]
tasks_topic = []
tasks_topic_data = []
for topic in topic_map:
for node_id in topic_map[topic]:
"""
# Subscribe node to topic
q = await pubsub_map[node_id].subscribe(topic)
# Create topic-queue map for node_id if one does not yet exist
if node_id not in queues_map:
queues_map[node_id] = {}
# Store queue in topic-queue map for node
queues_map[node_id][topic] = q
"""
tasks_topic.append(asyncio.ensure_future(pubsub_map[node_id].subscribe(topic)))
tasks_topic_data.append((node_id, topic))
tasks_topic.append(asyncio.sleep(2))
# Gather is like Promise.all
responses = await asyncio.gather(*tasks_topic, return_exceptions=True)
for i in range(len(responses) - 1):
q = responses[i]
node_id, topic = tasks_topic_data[i]
if node_id not in queues_map:
queues_map[node_id] = {}
# Store queue in topic-queue map for node
queues_map[node_id][topic] = q
# Allow time for subscribing before continuing
# await asyncio.sleep(0.01)
# Step 3) Publish messages
topics_in_msgs_ordered = []
messages = obj["messages"]
tasks_publish = []
next_msg_id_func = message_id_generator(0)
for msg in messages:
topics = msg["topics"]
data = msg["data"]
node_id = msg["node_id"]
# Get actual id for sender node (not the id from the test obj)
actual_node_id = str(node_map[node_id].get_id())
# Create correctly formatted message
msg_talk = generate_RPC_packet(actual_node_id, topics, data, next_msg_id_func())
# Publish message
tasks_publish.append(asyncio.ensure_future(gossipsub_map[node_id].publish(\
actual_node_id, msg_talk.SerializeToString())))
# For each topic in topics, add topic, msg_talk tuple to ordered test list
# TODO: Update message sender to be correct message sender before
# adding msg_talk to this list
for topic in topics:
topics_in_msgs_ordered.append((topic, msg_talk))
# Allow time for publishing before continuing
# await asyncio.sleep(0.4)
tasks_publish.append(asyncio.sleep(2))
await asyncio.gather(*tasks_publish)
# Step 4) Check that all messages were received correctly.
# TODO: Check message sender too
for i in range(len(topics_in_msgs_ordered)):
topic, actual_msg = topics_in_msgs_ordered[i]
# Look at each node in each topic
for node_id in topic_map[topic]:
# Get message from subscription queue
msg_on_node = await queues_map[node_id][topic].get()
assert actual_msg.publish[0].SerializeToString() == msg_on_node.SerializeToString()
# Success, terminate pending tasks.
await cleanup()
@pytest.mark.asyncio
async def test_simple_two_nodes_test_obj():
test_obj = {
"supported_protocols": ["/floodsub/1.0.0"],
"adj_list": {
"A": ["B"]
},
"topic_map": {
"topic1": ["B"]
},
"messages": [
{
"topics": ["topic1"],
"data": "foo",
"node_id": "A"
}
]
}
await perform_test_from_obj(test_obj)
@pytest.mark.asyncio
async def test_three_nodes_two_topics_test_obj():
test_obj = {
"supported_protocols": ["/floodsub/1.0.0"],
"adj_list": {
"A": ["B"],
"B": ["C"]
},
"topic_map": {
"topic1": ["B", "C"],
"topic2": ["B", "C"]
},
"messages": [
{
"topics": ["topic1"],
"data": "foo",
"node_id": "A"
},
{
"topics": ["topic2"],
"data": "Alex is tall",
"node_id": "A"
}
]
}
await perform_test_from_obj(test_obj)
@pytest.mark.asyncio
async def test_two_nodes_one_topic_single_subscriber_is_sender_test_obj():
test_obj = {
"supported_protocols": ["/floodsub/1.0.0"],
"adj_list": {
"A": ["B"]
},
"topic_map": {
"topic1": ["B"]
},
"messages": [
{
"topics": ["topic1"],
"data": "Alex is tall",
"node_id": "B"
}
]
}
await perform_test_from_obj(test_obj)
@pytest.mark.asyncio
async def test_two_nodes_one_topic_two_msgs_test_obj():
test_obj = {
"supported_protocols": ["/floodsub/1.0.0"],
"adj_list": {
"A": ["B"]
},
"topic_map": {
"topic1": ["B"]
},
"messages": [
{
"topics": ["topic1"],
"data": "Alex is tall",
"node_id": "B"
},
{
"topics": ["topic1"],
"data": "foo",
"node_id": "A"
}
]
}
await perform_test_from_obj(test_obj)
@pytest.mark.asyncio
async def test_seven_nodes_tree_one_topics_test_obj():
test_obj = {
"supported_protocols": ["/floodsub/1.0.0"],
"adj_list": {
"1": ["2", "3"],
"2": ["4", "5"],
"3": ["6", "7"]
},
"topic_map": {
"astrophysics": ["2", "3", "4", "5", "6", "7"]
},
"messages": [
{
"topics": ["astrophysics"],
"data": "e=mc^2",
"node_id": "1"
}
]
}
await perform_test_from_obj(test_obj)
@pytest.mark.asyncio
async def test_seven_nodes_tree_three_topics_test_obj():
test_obj = {
"supported_protocols": ["/floodsub/1.0.0"],
"adj_list": {
"1": ["2", "3"],
"2": ["4", "5"],
"3": ["6", "7"]
},
"topic_map": {
"astrophysics": ["2", "3", "4", "5", "6", "7"],
"space": ["2", "3", "4", "5", "6", "7"],
"onions": ["2", "3", "4", "5", "6", "7"]
},
"messages": [
{
"topics": ["astrophysics"],
"data": "e=mc^2",
"node_id": "1"
},
{
"topics": ["space"],
"data": "foobar",
"node_id": "1"
},
{
"topics": ["onions"],
"data": "I am allergic",
"node_id": "1"
}
]
}
await perform_test_from_obj(test_obj)
@pytest.mark.asyncio
async def test_seven_nodes_tree_three_topics_diff_origin_test_obj():
test_obj = {
"supported_protocols": ["/floodsub/1.0.0"],
"adj_list": {
"1": ["2", "3"],
"2": ["4", "5"],
"3": ["6", "7"]
},
"topic_map": {
"astrophysics": ["1", "2", "3", "4", "5", "6", "7"],
"space": ["1", "2", "3", "4", "5", "6", "7"],
"onions": ["1", "2", "3", "4", "5", "6", "7"]
},
"messages": [
{
"topics": ["astrophysics"],
"data": "e=mc^2",
"node_id": "1"
},
{
"topics": ["space"],
"data": "foobar",
"node_id": "4"
},
{
"topics": ["onions"],
"data": "I am allergic",
"node_id": "7"
}
]
}
await perform_test_from_obj(test_obj)
@pytest.mark.asyncio
async def test_three_nodes_clique_two_topic_diff_origin_test_obj():
test_obj = {
"supported_protocols": ["/floodsub/1.0.0"],
"adj_list": {
"1": ["2", "3"],
"2": ["3"]
},
"topic_map": {
"astrophysics": ["1", "2", "3"],
"school": ["1", "2", "3"]
},
"messages": [
{
"topics": ["astrophysics"],
"data": "e=mc^2",
"node_id": "1"
},
{
"topics": ["school"],
"data": "foobar",
"node_id": "2"
},
{
"topics": ["astrophysics"],
"data": "I am allergic",
"node_id": "1"
}
]
}
await perform_test_from_obj(test_obj)
@pytest.mark.asyncio
async def test_four_nodes_clique_two_topic_diff_origin_many_msgs_test_obj():
test_obj = {
"supported_protocols": ["/floodsub/1.0.0"],
"adj_list": {
"1": ["2", "3", "4"],
"2": ["1", "3", "4"],
"3": ["1", "2", "4"],
"4": ["1", "2", "3"]
},
"topic_map": {
"astrophysics": ["1", "2", "3", "4"],
"school": ["1", "2", "3", "4"]
},
"messages": [
{
"topics": ["astrophysics"],
"data": "e=mc^2",
"node_id": "1"
},
{
"topics": ["school"],
"data": "foobar",
"node_id": "2"
},
{
"topics": ["astrophysics"],
"data": "I am allergic",
"node_id": "1"
},
{
"topics": ["school"],
"data": "foobar2",
"node_id": "2"
},
{
"topics": ["astrophysics"],
"data": "I am allergic2",
"node_id": "1"
},
{
"topics": ["school"],
"data": "foobar3",
"node_id": "2"
},
{
"topics": ["astrophysics"],
"data": "I am allergic3",
"node_id": "1"
}
]
}
await perform_test_from_obj(test_obj)
@pytest.mark.asyncio
async def test_five_nodes_ring_two_topic_diff_origin_many_msgs_test_obj():
test_obj = {
"supported_protocols": ["/floodsub/1.0.0"],
"adj_list": {
"1": ["2"],
"2": ["3"],
"3": ["4"],
"4": ["5"],
"5": ["1"]
},
"topic_map": {
"astrophysics": ["1", "2", "3", "4", "5"],
"school": ["1", "2", "3", "4", "5"]
},
"messages": [
{
"topics": ["astrophysics"],
"data": "e=mc^2",
"node_id": "1"
},
{
"topics": ["school"],
"data": "foobar",
"node_id": "2"
},
{
"topics": ["astrophysics"],
"data": "I am allergic",
"node_id": "1"
},
{
"topics": ["school"],
"data": "foobar2",
"node_id": "2"
},
{
"topics": ["astrophysics"],
"data": "I am allergic2",
"node_id": "1"
},
{
"topics": ["school"],
"data": "foobar3",
"node_id": "2"
},
{
"topics": ["astrophysics"],
"data": "I am allergic3",
"node_id": "1"
}
]
}
await perform_test_from_obj(test_obj)

129
tests/pubsub/test_mcache.py Normal file
View File

@ -0,0 +1,129 @@
import pytest
from libp2p.pubsub.mcache import MessageCache
class Msg:
def __init__(self, topicIDs, seqno, from_id):
self.topicIDs = topicIDs
self.seqno = seqno,
self.from_id = from_id
@pytest.mark.asyncio
async def test_mcache():
# Ported from:
# https://github.com/libp2p/go-libp2p-pubsub
# /blob/51b7501433411b5096cac2b4994a36a68515fc03/mcache_test.go
mcache = MessageCache(3, 5)
msgs = []
for i in range(60):
msgs.append(Msg(["test"], i, "test"))
for i in range(10):
mcache.put(msgs[i])
for i in range(10):
msg = msgs[i]
mid = (msg.seqno, msg.from_id)
get_msg = mcache.get(mid)
# successful read
assert get_msg == msg
gids = mcache.window('test')
assert len(gids) == 10
for i in range(10):
msg = msgs[i]
mid = (msg.seqno, msg.from_id)
assert mid == gids[i]
mcache.shift()
for i in range(10, 20):
mcache.put(msgs[i])
for i in range(20):
msg = msgs[i]
mid = (msg.seqno, msg.from_id)
get_msg = mcache.get(mid)
assert get_msg == msg
gids = mcache.window('test')
assert len(gids) == 20
for i in range(10):
msg = msgs[i]
mid = (msg.seqno, msg.from_id)
assert mid == gids[10 + i]
for i in range(10, 20):
msg = msgs[i]
mid = (msg.seqno, msg.from_id)
assert mid == gids[i - 10]
mcache.shift()
for i in range(20, 30):
mcache.put(msgs[i])
mcache.shift()
for i in range(30, 40):
mcache.put(msgs[i])
mcache.shift()
for i in range(40, 50):
mcache.put(msgs[i])
mcache.shift()
for i in range(50, 60):
mcache.put(msgs[i])
assert len(mcache.msgs) == 50
for i in range(10):
msg = msgs[i]
mid = (msg.seqno, msg.from_id)
get_msg = mcache.get(mid)
# Should be evicted from cache
assert not get_msg
for i in range(10, 60):
msg = msgs[i]
mid = (msg.seqno, msg.from_id)
get_msg = mcache.get(mid)
assert get_msg == msg
gids = mcache.window('test')
assert len(gids) == 30
for i in range(10):
msg = msgs[50 + i]
mid = (msg.seqno, msg.from_id)
assert mid == gids[i]
for i in range(10, 20):
msg = msgs[30 + i]
mid = (msg.seqno, msg.from_id)
assert mid == gids[i]
for i in range(20, 30):
msg = msgs[10 + i]
mid = (msg.seqno, msg.from_id)
assert mid == gids[i]

View File

@ -1,6 +1,13 @@
import asyncio
import multiaddr
import uuid
import random
import struct
from libp2p import new_node
from libp2p.pubsub.pb import rpc_pb2
from libp2p.peer.peerinfo import info_from_p2p_addr
from libp2p.pubsub.pubsub import Pubsub
from libp2p.pubsub.gossipsub import GossipSub
def message_id_generator(start_val):
@ -42,3 +49,76 @@ def generate_RPC_packet(origin_id, topics, msg_content, msg_id):
packet.publish.extend([message])
return packet
async def connect(node1, node2):
"""
Connect node1 to node2
"""
addr = node2.get_addrs()[0]
info = info_from_p2p_addr(addr)
await node1.connect(info)
async def create_libp2p_hosts(num_hosts):
"""
Create libp2p hosts
:param num_hosts: number of hosts to create
"""
hosts = []
tasks_create = []
for i in range(0, num_hosts):
# Create node
tasks_create.append(asyncio.ensure_future(new_node(transport_opt=["/ip4/127.0.0.1/tcp/0"])))
hosts = await asyncio.gather(*tasks_create)
tasks_listen = []
for node in hosts:
# Start listener
tasks_listen.append(asyncio.ensure_future(node.get_network().listen(multiaddr.Multiaddr("/ip4/127.0.0.1/tcp/0"))))
await asyncio.gather(*tasks_listen)
return hosts
def create_pubsub_and_gossipsub_instances(libp2p_hosts, supported_protocols, degree, degree_low, \
degree_high, time_to_live, gossip_window, gossip_history, heartbeat_interval):
pubsubs = []
gossipsubs = []
for node in libp2p_hosts:
gossipsub = GossipSub(supported_protocols, degree,
degree_low, degree_high, time_to_live,
gossip_window, gossip_history,
heartbeat_interval)
pubsub = Pubsub(node, gossipsub, "a")
pubsubs.append(pubsub)
gossipsubs.append(gossipsub)
return pubsubs, gossipsubs
async def sparse_connect(hosts):
await connect_some(hosts, 3)
async def dense_connect(hosts):
await connect_some(hosts, 10)
async def connect_some(hosts, degree):
for i, host in enumerate(hosts):
for j, host2 in enumerate(hosts):
if i != j and i < j:
await connect(host, host2)
# TODO: USE THE CODE BELOW
# for i, host in enumerate(hosts):
# j = 0
# while j < degree:
# n = random.randint(0, len(hosts) - 1)
# if n == i:
# j -= 1
# continue
# neighbor = hosts[n]
# await connect(host, neighbor)
# j += 1