mirror of
https://github.com/varun-r-mallya/py-libp2p.git
synced 2026-02-10 15:10:54 +00:00
Merge branch 'master' into feature/porting-to-trio
This commit is contained in:
23
.bumpversion.cfg
Normal file
23
.bumpversion.cfg
Normal file
@ -0,0 +1,23 @@
|
|||||||
|
[bumpversion]
|
||||||
|
current_version = 0.1.4
|
||||||
|
commit = True
|
||||||
|
tag = True
|
||||||
|
parse = (?P<major>\d+)\.(?P<minor>\d+)\.(?P<patch>\d+)(-(?P<stage>[^.]*)\.(?P<devnum>\d+))?
|
||||||
|
serialize =
|
||||||
|
{major}.{minor}.{patch}-{stage}.{devnum}
|
||||||
|
{major}.{minor}.{patch}
|
||||||
|
|
||||||
|
[bumpversion:part:stage]
|
||||||
|
optional_value = stable
|
||||||
|
first_value = stable
|
||||||
|
values =
|
||||||
|
alpha
|
||||||
|
beta
|
||||||
|
stable
|
||||||
|
|
||||||
|
[bumpversion:part:devnum]
|
||||||
|
|
||||||
|
[bumpversion:file:setup.py]
|
||||||
|
search = version="{current_version}",
|
||||||
|
replace = version="{new_version}",
|
||||||
|
|
||||||
77
.circleci/config.yml
Normal file
77
.circleci/config.yml
Normal file
@ -0,0 +1,77 @@
|
|||||||
|
version: 2.0
|
||||||
|
|
||||||
|
# heavily inspired by https://raw.githubusercontent.com/pinax/pinax-wiki/6bd2a99ab6f702e300d708532a6d1d9aa638b9f8/.circleci/config.yml
|
||||||
|
|
||||||
|
common: &common
|
||||||
|
working_directory: ~/repo
|
||||||
|
steps:
|
||||||
|
- checkout
|
||||||
|
- run:
|
||||||
|
name: merge pull request base
|
||||||
|
command: ./.circleci/merge_pr.sh
|
||||||
|
- run:
|
||||||
|
name: merge pull request base (2nd try)
|
||||||
|
command: ./.circleci/merge_pr.sh
|
||||||
|
when: on_fail
|
||||||
|
- run:
|
||||||
|
name: merge pull request base (3nd try)
|
||||||
|
command: ./.circleci/merge_pr.sh
|
||||||
|
when: on_fail
|
||||||
|
- restore_cache:
|
||||||
|
keys:
|
||||||
|
- cache-{{ .Environment.CIRCLE_JOB }}-{{ checksum "setup.py" }}-{{ checksum "tox.ini" }}
|
||||||
|
- run:
|
||||||
|
name: install dependencies
|
||||||
|
command: pip install --user tox
|
||||||
|
- run:
|
||||||
|
name: run tox
|
||||||
|
command: ~/.local/bin/tox -r
|
||||||
|
- save_cache:
|
||||||
|
paths:
|
||||||
|
- .hypothesis
|
||||||
|
- .tox
|
||||||
|
- ~/.cache/pip
|
||||||
|
- ~/.local
|
||||||
|
- ./eggs
|
||||||
|
key: cache-{{ .Environment.CIRCLE_JOB }}-{{ checksum "setup.py" }}-{{ checksum "tox.ini" }}
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
docs:
|
||||||
|
<<: *common
|
||||||
|
docker:
|
||||||
|
- image: circleci/python:3.6
|
||||||
|
environment:
|
||||||
|
TOXENV: docs
|
||||||
|
lint:
|
||||||
|
<<: *common
|
||||||
|
docker:
|
||||||
|
- image: circleci/python:3.6
|
||||||
|
environment:
|
||||||
|
TOXENV: lint
|
||||||
|
py36-core:
|
||||||
|
<<: *common
|
||||||
|
docker:
|
||||||
|
- image: circleci/python:3.6
|
||||||
|
environment:
|
||||||
|
TOXENV: py36-core
|
||||||
|
py37-core:
|
||||||
|
<<: *common
|
||||||
|
docker:
|
||||||
|
- image: circleci/python:3.7
|
||||||
|
environment:
|
||||||
|
TOXENV: py37-core
|
||||||
|
pypy3-core:
|
||||||
|
<<: *common
|
||||||
|
docker:
|
||||||
|
- image: pypy
|
||||||
|
environment:
|
||||||
|
TOXENV: pypy3-core
|
||||||
|
workflows:
|
||||||
|
version: 2
|
||||||
|
test:
|
||||||
|
jobs:
|
||||||
|
- docs
|
||||||
|
- lint
|
||||||
|
- py36-core
|
||||||
|
- py37-core
|
||||||
|
- pypy3-core
|
||||||
12
.circleci/merge_pr.sh
Executable file
12
.circleci/merge_pr.sh
Executable file
@ -0,0 +1,12 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
if [[ -n "${CIRCLE_PR_NUMBER}" ]]; then
|
||||||
|
PR_INFO_URL=https://api.github.com/repos/$CIRCLE_PROJECT_USERNAME/$CIRCLE_PROJECT_REPONAME/pulls/$CIRCLE_PR_NUMBER
|
||||||
|
PR_BASE_BRANCH=$(curl -L "$PR_INFO_URL" | python -c 'import json, sys; obj = json.load(sys.stdin); sys.stdout.write(obj["base"]["ref"])')
|
||||||
|
git fetch origin +"$PR_BASE_BRANCH":circleci/pr-base
|
||||||
|
# We need these config values or git complains when creating the
|
||||||
|
# merge commit
|
||||||
|
git config --global user.name "Circle CI"
|
||||||
|
git config --global user.email "circleci@example.com"
|
||||||
|
git merge --no-edit circleci/pr-base
|
||||||
|
fi
|
||||||
38
.github/ISSUE_TEMPLATE.md
vendored
Normal file
38
.github/ISSUE_TEMPLATE.md
vendored
Normal file
@ -0,0 +1,38 @@
|
|||||||
|
_If this is a bug report, please fill in the following sections.
|
||||||
|
If this is a feature request, delete and describe what you would like with examples._
|
||||||
|
|
||||||
|
## What was wrong?
|
||||||
|
|
||||||
|
### Code that produced the error
|
||||||
|
|
||||||
|
```py
|
||||||
|
CODE_TO_REPRODUCE
|
||||||
|
```
|
||||||
|
|
||||||
|
### Full error output
|
||||||
|
|
||||||
|
```sh
|
||||||
|
ERROR_HERE
|
||||||
|
```
|
||||||
|
|
||||||
|
### Expected Result
|
||||||
|
|
||||||
|
_This section may be deleted if the expectation is "don't crash"._
|
||||||
|
|
||||||
|
```sh
|
||||||
|
EXPECTED_RESULT
|
||||||
|
```
|
||||||
|
|
||||||
|
### Environment
|
||||||
|
|
||||||
|
```sh
|
||||||
|
# run this:
|
||||||
|
$ python -m eth_utils
|
||||||
|
|
||||||
|
# then copy the output here:
|
||||||
|
OUTPUT_HERE
|
||||||
|
```
|
||||||
|
|
||||||
|
## How can it be fixed?
|
||||||
|
|
||||||
|
Fill this section in if you know how this could or should be fixed.
|
||||||
21
.github/PULL_REQUEST_TEMPLATE.md
vendored
Normal file
21
.github/PULL_REQUEST_TEMPLATE.md
vendored
Normal file
@ -0,0 +1,21 @@
|
|||||||
|
## What was wrong?
|
||||||
|
|
||||||
|
Issue #
|
||||||
|
|
||||||
|
## How was it fixed?
|
||||||
|
|
||||||
|
Summary of approach.
|
||||||
|
|
||||||
|
### To-Do
|
||||||
|
|
||||||
|
[//]: # (Stay ahead of things, add list items here!)
|
||||||
|
- [ ] Clean up commit history
|
||||||
|
|
||||||
|
[//]: # (For important changes that should go into the release notes please add a newsfragment file as explained here: https://github.com/libp2p/py-libp2p/blob/master/newsfragments/README.md)
|
||||||
|
|
||||||
|
[//]: # (See: https://py-libp2p.readthedocs.io/en/latest/contributing.html#pull-requests)
|
||||||
|
- [ ] Add entry to the [release notes](https://github.com/libp2p/py-libp2p/blob/master/newsfragments/README.md)
|
||||||
|
|
||||||
|
#### Cute Animal Picture
|
||||||
|
|
||||||
|
![put a cute animal picture link inside the parentheses]()
|
||||||
141
.gitignore
vendored
141
.gitignore
vendored
@ -1,57 +1,133 @@
|
|||||||
# Byte-compiled / optimized / DLL files
|
# Byte-compiled / optimized / DLL files
|
||||||
__pycache__/
|
|
||||||
*.py[cod]
|
*.py[cod]
|
||||||
|
__pycache__/
|
||||||
*$py.class
|
*$py.class
|
||||||
|
|
||||||
# C extensions
|
# C extensions
|
||||||
*.so
|
*.so
|
||||||
|
|
||||||
# Distribution / packaging
|
# Distribution / packaging
|
||||||
.Python
|
|
||||||
build/
|
|
||||||
develop-eggs/
|
|
||||||
dist/
|
|
||||||
downloads/
|
|
||||||
eggs/
|
|
||||||
.eggs/
|
|
||||||
lib/
|
|
||||||
lib64/
|
|
||||||
parts/
|
|
||||||
sdist/
|
|
||||||
var/
|
|
||||||
wheels/
|
|
||||||
*.egg-info/
|
|
||||||
.installed.cfg
|
|
||||||
*.egg
|
*.egg
|
||||||
|
*.egg-info
|
||||||
|
dist
|
||||||
|
build
|
||||||
|
eggs
|
||||||
|
.eggs
|
||||||
|
parts
|
||||||
|
bin
|
||||||
|
var
|
||||||
|
sdist
|
||||||
|
develop-eggs
|
||||||
|
.installed.cfg
|
||||||
|
lib
|
||||||
|
lib64
|
||||||
|
venv*
|
||||||
|
.Python
|
||||||
|
downloads/
|
||||||
|
wheels/
|
||||||
MANIFEST
|
MANIFEST
|
||||||
pip-wheel-metadata
|
pip-wheel-metadata
|
||||||
|
|
||||||
# PyInstaller
|
|
||||||
# Usually these files are written by a python script from a template
|
|
||||||
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
|
||||||
*.manifest
|
|
||||||
*.spec
|
|
||||||
|
|
||||||
# Installer logs
|
# Installer logs
|
||||||
pip-log.txt
|
pip-log.txt
|
||||||
pip-delete-this-directory.txt
|
pip-delete-this-directory.txt
|
||||||
|
|
||||||
# Unit test / coverage reports
|
# Unit test / coverage reports
|
||||||
htmlcov/
|
|
||||||
.tox/
|
|
||||||
.coverage
|
.coverage
|
||||||
.coverage.*
|
.tox
|
||||||
.cache
|
|
||||||
nosetests.xml
|
nosetests.xml
|
||||||
|
htmlcov/
|
||||||
|
.coverage.*
|
||||||
coverage.xml
|
coverage.xml
|
||||||
*.cover
|
*.cover
|
||||||
.hypothesis/
|
|
||||||
.pytest_cache/
|
.pytest_cache/
|
||||||
|
|
||||||
# Translations
|
# Translations
|
||||||
*.mo
|
*.mo
|
||||||
*.pot
|
*.pot
|
||||||
|
|
||||||
|
# Mr Developer
|
||||||
|
.mr.developer.cfg
|
||||||
|
.project
|
||||||
|
.pydevproject
|
||||||
|
|
||||||
|
# Complexity
|
||||||
|
output/*.html
|
||||||
|
output/*/index.html
|
||||||
|
|
||||||
|
# Sphinx
|
||||||
|
docs/_build
|
||||||
|
docs/modules.rst
|
||||||
|
docs/*.internal.rst
|
||||||
|
docs/*._utils.*
|
||||||
|
|
||||||
|
# Hypothese Property base testing
|
||||||
|
.hypothesis
|
||||||
|
|
||||||
|
# tox/pytest cache
|
||||||
|
.cache
|
||||||
|
|
||||||
|
# Test output logs
|
||||||
|
logs
|
||||||
|
|
||||||
|
### JetBrains template
|
||||||
|
# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and Webstorm
|
||||||
|
# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
|
||||||
|
|
||||||
|
# User-specific stuff:
|
||||||
|
.idea/workspace.xml
|
||||||
|
.idea/tasks.xml
|
||||||
|
.idea/dictionaries
|
||||||
|
.idea/vcs.xml
|
||||||
|
.idea/jsLibraryMappings.xml
|
||||||
|
|
||||||
|
# Sensitive or high-churn files:
|
||||||
|
.idea/dataSources.ids
|
||||||
|
.idea/dataSources.xml
|
||||||
|
.idea/dataSources.local.xml
|
||||||
|
.idea/sqlDataSources.xml
|
||||||
|
.idea/dynamic.xml
|
||||||
|
.idea/uiDesigner.xml
|
||||||
|
|
||||||
|
# Gradle:
|
||||||
|
.idea/gradle.xml
|
||||||
|
.idea/libraries
|
||||||
|
|
||||||
|
# Mongo Explorer plugin:
|
||||||
|
.idea/mongoSettings.xml
|
||||||
|
|
||||||
|
# VIM temp files
|
||||||
|
*.sw[op]
|
||||||
|
|
||||||
|
# mypy
|
||||||
|
.mypy_cache
|
||||||
|
|
||||||
|
## File-based project format:
|
||||||
|
*.iws
|
||||||
|
|
||||||
|
## Plugin-specific files:
|
||||||
|
|
||||||
|
# IntelliJ
|
||||||
|
/out/
|
||||||
|
|
||||||
|
# mpeltonen/sbt-idea plugin
|
||||||
|
.idea_modules/
|
||||||
|
|
||||||
|
# JIRA plugin
|
||||||
|
atlassian-ide-plugin.xml
|
||||||
|
|
||||||
|
# Crashlytics plugin (for Android Studio and IntelliJ)
|
||||||
|
com_crashlytics_export_strings.xml
|
||||||
|
crashlytics.properties
|
||||||
|
crashlytics-build.properties
|
||||||
|
fabric.properties
|
||||||
|
|
||||||
|
# PyInstaller
|
||||||
|
# Usually these files are written by a python script from a template
|
||||||
|
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
||||||
|
*.manifest
|
||||||
|
*.spec
|
||||||
|
|
||||||
# Django stuff:
|
# Django stuff:
|
||||||
*.log
|
*.log
|
||||||
local_settings.py
|
local_settings.py
|
||||||
@ -64,9 +140,6 @@ instance/
|
|||||||
# Scrapy stuff:
|
# Scrapy stuff:
|
||||||
.scrapy
|
.scrapy
|
||||||
|
|
||||||
# Sphinx documentation
|
|
||||||
docs/_build/
|
|
||||||
|
|
||||||
# PyBuilder
|
# PyBuilder
|
||||||
target/
|
target/
|
||||||
|
|
||||||
@ -86,10 +159,8 @@ celerybeat-schedule
|
|||||||
.env
|
.env
|
||||||
.venv
|
.venv
|
||||||
env/
|
env/
|
||||||
venv/
|
|
||||||
ENV/
|
ENV/
|
||||||
env.bak/
|
env.bak/
|
||||||
venv.bak/
|
|
||||||
|
|
||||||
# Spyder project settings
|
# Spyder project settings
|
||||||
.spyderproject
|
.spyderproject
|
||||||
@ -101,11 +172,5 @@ venv.bak/
|
|||||||
# mkdocs documentation
|
# mkdocs documentation
|
||||||
/site
|
/site
|
||||||
|
|
||||||
# mypy
|
|
||||||
.mypy_cache/
|
|
||||||
|
|
||||||
# pycharm
|
|
||||||
.idea/
|
|
||||||
|
|
||||||
# vscode
|
# vscode
|
||||||
.vscode/
|
.vscode/
|
||||||
|
|||||||
48
.project-template/fill_template_vars.sh
Executable file
48
.project-template/fill_template_vars.sh
Executable file
@ -0,0 +1,48 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -o errexit
|
||||||
|
set -o nounset
|
||||||
|
set -o pipefail
|
||||||
|
|
||||||
|
PROJECT_ROOT=$(dirname $(dirname $(python -c 'import os, sys; sys.stdout.write(os.path.realpath(sys.argv[1]))' "$0")))
|
||||||
|
|
||||||
|
echo "What is your python module name?"
|
||||||
|
read MODULE_NAME
|
||||||
|
|
||||||
|
echo "What is your pypi package name? (default: $MODULE_NAME)"
|
||||||
|
read PYPI_INPUT
|
||||||
|
PYPI_NAME=${PYPI_INPUT:-$MODULE_NAME}
|
||||||
|
|
||||||
|
echo "What is your github project name? (default: $PYPI_NAME)"
|
||||||
|
read REPO_INPUT
|
||||||
|
REPO_NAME=${REPO_INPUT:-$PYPI_NAME}
|
||||||
|
|
||||||
|
echo "What is your readthedocs.org project name? (default: $PYPI_NAME)"
|
||||||
|
read RTD_INPUT
|
||||||
|
RTD_NAME=${RTD_INPUT:-$PYPI_NAME}
|
||||||
|
|
||||||
|
echo "What is your project name (ex: at the top of the README)? (default: $REPO_NAME)"
|
||||||
|
read PROJECT_INPUT
|
||||||
|
PROJECT_NAME=${PROJECT_INPUT:-$REPO_NAME}
|
||||||
|
|
||||||
|
echo "What is a one-liner describing the project?"
|
||||||
|
read SHORT_DESCRIPTION
|
||||||
|
|
||||||
|
_replace() {
|
||||||
|
local find_cmd=(find "$PROJECT_ROOT" ! -perm -u=x ! -path '*/.git/*' -type f)
|
||||||
|
|
||||||
|
if [[ $(uname) == Darwin ]]; then
|
||||||
|
"${find_cmd[@]}" -exec sed -i '' "$1" {} +
|
||||||
|
else
|
||||||
|
"${find_cmd[@]}" -exec sed -i "$1" {} +
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
_replace "s/<MODULE_NAME>/$MODULE_NAME/g"
|
||||||
|
_replace "s/<PYPI_NAME>/$PYPI_NAME/g"
|
||||||
|
_replace "s/<REPO_NAME>/$REPO_NAME/g"
|
||||||
|
_replace "s/<RTD_NAME>/$RTD_NAME/g"
|
||||||
|
_replace "s/<PROJECT_NAME>/$PROJECT_NAME/g"
|
||||||
|
_replace "s/<SHORT_DESCRIPTION>/$SHORT_DESCRIPTION/g"
|
||||||
|
|
||||||
|
mkdir -p "$PROJECT_ROOT/$MODULE_NAME"
|
||||||
|
touch "$PROJECT_ROOT/$MODULE_NAME/__init__.py"
|
||||||
2
.project-template/refill_template_vars.sh
Executable file
2
.project-template/refill_template_vars.sh
Executable file
@ -0,0 +1,2 @@
|
|||||||
|
TEMPLATE_DIR=$(dirname $(readlink -f "$0"))
|
||||||
|
<"$TEMPLATE_DIR/template_vars.txt" "$TEMPLATE_DIR/fill_template_vars.sh"
|
||||||
6
.project-template/template_vars.txt
Normal file
6
.project-template/template_vars.txt
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
libp2p
|
||||||
|
libp2p
|
||||||
|
py-libp2p
|
||||||
|
py-libp2p
|
||||||
|
py-libp2p
|
||||||
|
The Python implementation of the libp2p networking stack
|
||||||
30
.pydocstyle.ini
Normal file
30
.pydocstyle.ini
Normal file
@ -0,0 +1,30 @@
|
|||||||
|
[pydocstyle]
|
||||||
|
; All error codes found here:
|
||||||
|
; http://www.pydocstyle.org/en/3.0.0/error_codes.html
|
||||||
|
;
|
||||||
|
; Ignored:
|
||||||
|
; D1 - Missing docstring error codes
|
||||||
|
;
|
||||||
|
; Selected:
|
||||||
|
; D2 - Whitespace error codes
|
||||||
|
; D3 - Quote error codes
|
||||||
|
; D4 - Content related error codes
|
||||||
|
select=D2,D3,D4
|
||||||
|
|
||||||
|
; Extra ignores:
|
||||||
|
; D200 - One-line docstring should fit on one line with quotes
|
||||||
|
; D203 - 1 blank line required before class docstring
|
||||||
|
; D204 - 1 blank line required after class docstring
|
||||||
|
; D205 - 1 blank line required between summary line and description
|
||||||
|
; D212 - Multi-line docstring summary should start at the first line
|
||||||
|
; D302 - Use u""" for Unicode docstrings
|
||||||
|
; D400 - First line should end with a period
|
||||||
|
; D401 - First line should be in imperative mood
|
||||||
|
; D412 - No blank lines allowed between a section header and its content
|
||||||
|
add-ignore=D200,D203,D204,D205,D212,D302,D400,D401,D412
|
||||||
|
|
||||||
|
; Explanation:
|
||||||
|
; D400 - Enabling this error code seems to make it a requirement that the first
|
||||||
|
; sentence in a docstring is not split across two lines. It also makes it a
|
||||||
|
; requirement that no docstring can have a multi-sentence description without a
|
||||||
|
; summary line. Neither one of those requirements seem appropriate.
|
||||||
@ -2,12 +2,18 @@ language: python
|
|||||||
|
|
||||||
matrix:
|
matrix:
|
||||||
include:
|
include:
|
||||||
|
- python: 3.6-dev
|
||||||
|
dist: xenial
|
||||||
|
env: TOXENV=py36-test
|
||||||
- python: 3.7-dev
|
- python: 3.7-dev
|
||||||
dist: xenial
|
dist: xenial
|
||||||
env: TOXENV=py37-test
|
env: TOXENV=py37-test
|
||||||
- python: 3.7-dev
|
- python: 3.7-dev
|
||||||
dist: xenial
|
dist: xenial
|
||||||
env: TOXENV=lint
|
env: TOXENV=lint
|
||||||
|
- python: 3.7-dev
|
||||||
|
dist: xenial
|
||||||
|
env: TOXENV=docs
|
||||||
- python: 3.7-dev
|
- python: 3.7-dev
|
||||||
dist: xenial
|
dist: xenial
|
||||||
env: TOXENV=py37-interop
|
env: TOXENV=py37-interop
|
||||||
|
|||||||
21
LICENSE
Normal file
21
LICENSE
Normal file
@ -0,0 +1,21 @@
|
|||||||
|
The MIT License (MIT)
|
||||||
|
|
||||||
|
Copyright (c) 2019 The Ethereum Foundation
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
SOFTWARE.
|
||||||
99
Makefile
99
Makefile
@ -1,3 +1,17 @@
|
|||||||
|
CURRENT_SIGN_SETTING := $(shell git config commit.gpgSign)
|
||||||
|
|
||||||
|
.PHONY: clean-pyc clean-build docs
|
||||||
|
|
||||||
|
help:
|
||||||
|
@echo "clean-build - remove build artifacts"
|
||||||
|
@echo "clean-pyc - remove Python file artifacts"
|
||||||
|
@echo "lint - check style with flake8, etc"
|
||||||
|
@echo "lint-roll - auto-correct styles with isort, black, docformatter, etc"
|
||||||
|
@echo "test - run tests quickly with the default Python"
|
||||||
|
@echo "testall - run tests on every Python version with tox"
|
||||||
|
@echo "release - package and upload a release"
|
||||||
|
@echo "dist - package"
|
||||||
|
|
||||||
FILES_TO_LINT = libp2p tests tests_interop examples setup.py
|
FILES_TO_LINT = libp2p tests tests_interop examples setup.py
|
||||||
PB = libp2p/crypto/pb/crypto.proto \
|
PB = libp2p/crypto/pb/crypto.proto \
|
||||||
libp2p/pubsub/pb/rpc.proto \
|
libp2p/pubsub/pb/rpc.proto \
|
||||||
@ -10,18 +24,6 @@ PYI = $(PB:.proto=_pb2.pyi)
|
|||||||
# Set default to `protobufs`, otherwise `format` is called when typing only `make`
|
# Set default to `protobufs`, otherwise `format` is called when typing only `make`
|
||||||
all: protobufs
|
all: protobufs
|
||||||
|
|
||||||
format:
|
|
||||||
black $(FILES_TO_LINT)
|
|
||||||
isort --recursive $(FILES_TO_LINT)
|
|
||||||
docformatter -ir --pre-summary-newline $(FILES_TO_LINT)
|
|
||||||
|
|
||||||
lintroll:
|
|
||||||
mypy -p libp2p -p examples --config-file mypy.ini
|
|
||||||
black --check $(FILES_TO_LINT)
|
|
||||||
isort --recursive --check-only $(FILES_TO_LINT)
|
|
||||||
docformatter --pre-summary-newline --check --recursive $(FILES_TO_LINT)
|
|
||||||
flake8 $(FILES_TO_LINT)
|
|
||||||
|
|
||||||
protobufs: $(PY)
|
protobufs: $(PY)
|
||||||
|
|
||||||
%_pb2.py: %.proto
|
%_pb2.py: %.proto
|
||||||
@ -30,12 +32,81 @@ protobufs: $(PY)
|
|||||||
clean-proto:
|
clean-proto:
|
||||||
rm -f $(PY) $(PYI)
|
rm -f $(PY) $(PYI)
|
||||||
|
|
||||||
clean:
|
clean: clean-build clean-pyc
|
||||||
find . -name '__pycache__' -exec rm -rf {} +
|
|
||||||
|
clean-build:
|
||||||
rm -fr build/
|
rm -fr build/
|
||||||
rm -fr dist/
|
rm -fr dist/
|
||||||
rm -fr *.egg-info
|
rm -fr *.egg-info
|
||||||
|
|
||||||
|
clean-pyc:
|
||||||
|
find . -name '*.pyc' -exec rm -f {} +
|
||||||
|
find . -name '*.pyo' -exec rm -f {} +
|
||||||
|
find . -name '*~' -exec rm -f {} +
|
||||||
|
find . -name '__pycache__' -exec rm -rf {} +
|
||||||
|
|
||||||
|
lint:
|
||||||
|
mypy -p libp2p -p examples --config-file mypy.ini
|
||||||
|
flake8 $(FILES_TO_LINT)
|
||||||
|
black --check $(FILES_TO_LINT)
|
||||||
|
isort --recursive --check-only --diff $(FILES_TO_LINT)
|
||||||
|
docformatter --pre-summary-newline --check --recursive $(FILES_TO_LINT)
|
||||||
|
tox -e lint # This is probably redundant, but just in case...
|
||||||
|
|
||||||
|
lint-roll:
|
||||||
|
isort --recursive $(FILES_TO_LINT)
|
||||||
|
black $(FILES_TO_LINT)
|
||||||
|
docformatter -ir --pre-summary-newline $(FILES_TO_LINT)
|
||||||
|
$(MAKE) lint
|
||||||
|
|
||||||
|
test:
|
||||||
|
pytest tests
|
||||||
|
|
||||||
|
test-all:
|
||||||
|
tox
|
||||||
|
|
||||||
|
build-docs:
|
||||||
|
sphinx-apidoc -o docs/ . setup.py "*conftest*" "libp2p/tools/interop*"
|
||||||
|
$(MAKE) -C docs clean
|
||||||
|
$(MAKE) -C docs html
|
||||||
|
$(MAKE) -C docs doctest
|
||||||
|
./newsfragments/validate_files.py
|
||||||
|
towncrier --draft --version preview
|
||||||
|
|
||||||
|
docs: build-docs
|
||||||
|
open docs/_build/html/index.html
|
||||||
|
|
||||||
|
linux-docs: build-docs
|
||||||
|
xdg-open docs/_build/html/index.html
|
||||||
|
|
||||||
package: clean
|
package: clean
|
||||||
python setup.py sdist bdist_wheel
|
python setup.py sdist bdist_wheel
|
||||||
python scripts/release/test_package.py
|
python scripts/release/test_package.py
|
||||||
|
|
||||||
|
notes:
|
||||||
|
# Let UPCOMING_VERSION be the version that is used for the current bump
|
||||||
|
$(eval UPCOMING_VERSION=$(shell bumpversion $(bump) --dry-run --list | grep new_version= | sed 's/new_version=//g'))
|
||||||
|
# Now generate the release notes to have them included in the release commit
|
||||||
|
towncrier --yes --version $(UPCOMING_VERSION)
|
||||||
|
# Before we bump the version, make sure that the towncrier-generated docs will build
|
||||||
|
make build-docs
|
||||||
|
git commit -m "Compile release notes"
|
||||||
|
|
||||||
|
release: clean
|
||||||
|
# require that you be on a branch that's linked to upstream/master
|
||||||
|
git status -s -b | head -1 | grep "\.\.upstream/master"
|
||||||
|
# verify that docs build correctly
|
||||||
|
./newsfragments/validate_files.py is-empty
|
||||||
|
make build-docs
|
||||||
|
CURRENT_SIGN_SETTING=$(git config commit.gpgSign)
|
||||||
|
git config commit.gpgSign true
|
||||||
|
bumpversion $(bump)
|
||||||
|
git push upstream && git push upstream --tags
|
||||||
|
python setup.py sdist bdist_wheel
|
||||||
|
twine upload dist/*
|
||||||
|
git config commit.gpgSign "$(CURRENT_SIGN_SETTING)"
|
||||||
|
|
||||||
|
|
||||||
|
dist: clean
|
||||||
|
python setup.py sdist bdist_wheel
|
||||||
|
ls -l dist
|
||||||
|
|||||||
64
README.md
64
README.md
@ -1,42 +1,82 @@
|
|||||||
# py-libp2p [](https://travis-ci.com/libp2p/py-libp2p) [](https://gitter.im/py-libp2p/Lobby) [](https://webchat.freenode.net/?channels=%23libp2p) [](https://riot.permaweb.io/#/room/#libp2p:permaweb.io) [](https://discord.gg/66KBrm2)
|
# py-libp2p
|
||||||
|
|
||||||
|
[](https://gitter.im/py-libp2p/Lobby)
|
||||||
|
[](https://travis-ci.com/libp2p/py-libp2p)
|
||||||
|
[](https://badge.fury.io/py/libp2p)
|
||||||
|
[](https://pypi.python.org/pypi/libp2p)
|
||||||
|
[](http://py-libp2p.readthedocs.io/en/latest/?badge=latest)
|
||||||
|
[](https://webchat.freenode.net/?channels=%23libp2p)
|
||||||
|
[](https://riot.permaweb.io/#/room/#libp2p:permaweb.io)
|
||||||
|
[](https://discord.gg/66KBrm2)
|
||||||
|
|
||||||
|
|
||||||
<h1 align="center">
|
<h1 align="center">
|
||||||
<img width="250" align="center" src="https://github.com/libp2p/py-libp2p/blob/master/assets/py-libp2p-logo.png?raw=true" alt="py-libp2p hex logo" />
|
<img width="250" align="center" src="https://github.com/libp2p/py-libp2p/blob/master/assets/py-libp2p-logo.png?raw=true" alt="py-libp2p hex logo" />
|
||||||
</h1>
|
</h1>
|
||||||
|
|
||||||
## WARNING
|
## WARNING
|
||||||
py-libp2p is an experimental and work-in-progress repo under heavy development. We do not yet recommend using py-libp2p in production environments.
|
py-libp2p is an experimental and work-in-progress repo under heavy development. We do not yet recommend using py-libp2p in production environments.
|
||||||
|
|
||||||
|
The Python implementation of the libp2p networking stack
|
||||||
|
|
||||||
|
Read more in the [documentation on ReadTheDocs](https://py-libp2p.readthedocs.io/). [View the change log](https://py-libp2p.readthedocs.io/en/latest/releases.html).
|
||||||
|
|
||||||
## Sponsorship
|
## Sponsorship
|
||||||
This project is graciously sponsored by the Ethereum Foundation through [Wave 5 of their Grants Program](https://blog.ethereum.org/2019/02/21/ethereum-foundation-grants-program-wave-5/).
|
This project is graciously sponsored by the Ethereum Foundation through [Wave 5 of their Grants Program](https://blog.ethereum.org/2019/02/21/ethereum-foundation-grants-program-wave-5/).
|
||||||
|
|
||||||
## Maintainers
|
## Maintainers
|
||||||
The py-libp2p team consists of:
|
The py-libp2p team consists of:
|
||||||
|
|
||||||
[@zixuanzh](https://github.com/zixuanzh) [@alexh](https://github.com/alexh) [@stuckinaboot](https://github.com/stuckinaboot) [@robzajac](https://github.com/robzajac)
|
[@zixuanzh](https://github.com/zixuanzh) [@alexh](https://github.com/alexh) [@stuckinaboot](https://github.com/stuckinaboot) [@robzajac](https://github.com/robzajac) [@carver](https://github.com/carver)
|
||||||
|
|
||||||
## Development
|
## Development
|
||||||
|
|
||||||
py-libp2p requires Python 3.7 and the best way to guarantee a clean Python 3.7 environment is with [`virtualenv`](https://virtualenv.pypa.io/en/stable/)
|
py-libp2p requires Python 3.7 and the best way to guarantee a clean Python 3.7 environment is with [`virtualenv`](https://virtualenv.pypa.io/en/stable/)
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
|
git clone git@github.com:libp2p/py-libp2p.git
|
||||||
|
cd py-libp2p
|
||||||
virtualenv -p python3.7 venv
|
virtualenv -p python3.7 venv
|
||||||
. venv/bin/activate
|
. venv/bin/activate
|
||||||
pip3 install -r requirements_dev.txt
|
pip install -e .[dev]
|
||||||
python setup.py develop
|
|
||||||
```
|
```
|
||||||
|
|
||||||
## Testing
|
### Testing Setup
|
||||||
|
|
||||||
|
During development, you might like to have tests run on every file save.
|
||||||
|
|
||||||
|
Show flake8 errors on file change:
|
||||||
|
|
||||||
After installing our requirements (see above), you can:
|
|
||||||
```sh
|
```sh
|
||||||
cd tests
|
# Test flake8
|
||||||
pytest
|
when-changed -v -s -r -1 libp2p/ tests/ -c "clear; flake8 libp2p tests && echo 'flake8 success' || echo 'error'"
|
||||||
```
|
```
|
||||||
|
|
||||||
|
Run multi-process tests in one command, but without color:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
# in the project root:
|
||||||
|
pytest --numprocesses=4 --looponfail --maxfail=1
|
||||||
|
# the same thing, succinctly:
|
||||||
|
pytest -n 4 -f --maxfail=1
|
||||||
|
```
|
||||||
|
|
||||||
|
Run in one thread, with color and desktop notifications:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
cd venv
|
||||||
|
ptw --onfail "notify-send -t 5000 'Test failure ⚠⚠⚠⚠⚠' 'python 3 test on py-libp2p failed'" ../tests ../libp2p
|
||||||
|
```
|
||||||
|
|
||||||
Note that tests/libp2p/test_libp2p.py contains an end-to-end messaging test between two libp2p hosts, which is the bulk of our proof of concept.
|
Note that tests/libp2p/test_libp2p.py contains an end-to-end messaging test between two libp2p hosts, which is the bulk of our proof of concept.
|
||||||
|
|
||||||
|
|
||||||
|
### Release setup
|
||||||
|
|
||||||
|
Releases follow the same basic pattern as releases of some tangentially-related projects,
|
||||||
|
like Trinity. See [Trinity's release instructions](
|
||||||
|
https://trinity-client.readthedocs.io/en/latest/contributing.html#releasing).
|
||||||
|
|
||||||
## Requirements
|
## Requirements
|
||||||
|
|
||||||
The protobuf description in this repository was generated by `protoc` at version `3.7.1`.
|
The protobuf description in this repository was generated by `protoc` at version `3.7.1`.
|
||||||
@ -99,7 +139,7 @@ py-libp2p aims for conformity with [the standard libp2p modules](https://github.
|
|||||||
| Peer Discovery | Status |
|
| Peer Discovery | Status |
|
||||||
| -------------------------------------------- | :-----------: |
|
| -------------------------------------------- | :-----------: |
|
||||||
| **`bootstrap list`** | :tomato: |
|
| **`bootstrap list`** | :tomato: |
|
||||||
| **`Kademlia DHT`** | :lemon: |
|
| **`Kademlia DHT`** | :chestnut: |
|
||||||
| **`mDNS`** | :chestnut: |
|
| **`mDNS`** | :chestnut: |
|
||||||
| **`PEX`** | :chestnut: |
|
| **`PEX`** | :chestnut: |
|
||||||
| **`DNS`** | :chestnut: |
|
| **`DNS`** | :chestnut: |
|
||||||
@ -107,7 +147,7 @@ py-libp2p aims for conformity with [the standard libp2p modules](https://github.
|
|||||||
|
|
||||||
| Content Routing | Status |
|
| Content Routing | Status |
|
||||||
| -------------------------------------------- | :-----------: |
|
| -------------------------------------------- | :-----------: |
|
||||||
| **`Kademlia DHT`** | :lemon: |
|
| **`Kademlia DHT`** | :chestnut: |
|
||||||
| **`floodsub`** | :green_apple: |
|
| **`floodsub`** | :green_apple: |
|
||||||
| **`gossipsub`** | :green_apple: |
|
| **`gossipsub`** | :green_apple: |
|
||||||
| **`PHT`** | :chestnut: |
|
| **`PHT`** | :chestnut: |
|
||||||
@ -115,7 +155,7 @@ py-libp2p aims for conformity with [the standard libp2p modules](https://github.
|
|||||||
|
|
||||||
| Peer Routing | Status |
|
| Peer Routing | Status |
|
||||||
| -------------------------------------------- | :-----------: |
|
| -------------------------------------------- | :-----------: |
|
||||||
| **`Kademlia DHT`** | :green_apple: |
|
| **`Kademlia DHT`** | :chestnut: |
|
||||||
| **`floodsub`** | :green_apple: |
|
| **`floodsub`** | :green_apple: |
|
||||||
| **`gossipsub`** | :green_apple: |
|
| **`gossipsub`** | :green_apple: |
|
||||||
| **`PHT`** | :chestnut: |
|
| **`PHT`** | :chestnut: |
|
||||||
|
|||||||
177
docs/Makefile
Normal file
177
docs/Makefile
Normal file
@ -0,0 +1,177 @@
|
|||||||
|
# Makefile for Sphinx documentation
|
||||||
|
#
|
||||||
|
|
||||||
|
# You can set these variables from the command line.
|
||||||
|
SPHINXOPTS = -W
|
||||||
|
SPHINXBUILD = sphinx-build
|
||||||
|
PAPER =
|
||||||
|
BUILDDIR = _build
|
||||||
|
|
||||||
|
# User-friendly check for sphinx-build
|
||||||
|
ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1)
|
||||||
|
$(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/)
|
||||||
|
endif
|
||||||
|
|
||||||
|
# Internal variables.
|
||||||
|
PAPEROPT_a4 = -D latex_paper_size=a4
|
||||||
|
PAPEROPT_letter = -D latex_paper_size=letter
|
||||||
|
ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
|
||||||
|
# the i18n builder cannot share the environment and doctrees with the others
|
||||||
|
I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
|
||||||
|
|
||||||
|
.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext
|
||||||
|
|
||||||
|
help:
|
||||||
|
@echo "Please use \`make <target>' where <target> is one of"
|
||||||
|
@echo " html to make standalone HTML files"
|
||||||
|
@echo " dirhtml to make HTML files named index.html in directories"
|
||||||
|
@echo " singlehtml to make a single large HTML file"
|
||||||
|
@echo " pickle to make pickle files"
|
||||||
|
@echo " json to make JSON files"
|
||||||
|
@echo " htmlhelp to make HTML files and a HTML help project"
|
||||||
|
@echo " qthelp to make HTML files and a qthelp project"
|
||||||
|
@echo " devhelp to make HTML files and a Devhelp project"
|
||||||
|
@echo " epub to make an epub"
|
||||||
|
@echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
|
||||||
|
@echo " latexpdf to make LaTeX files and run them through pdflatex"
|
||||||
|
@echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx"
|
||||||
|
@echo " text to make text files"
|
||||||
|
@echo " man to make manual pages"
|
||||||
|
@echo " texinfo to make Texinfo files"
|
||||||
|
@echo " info to make Texinfo files and run them through makeinfo"
|
||||||
|
@echo " gettext to make PO message catalogs"
|
||||||
|
@echo " changes to make an overview of all changed/added/deprecated items"
|
||||||
|
@echo " xml to make Docutils-native XML files"
|
||||||
|
@echo " pseudoxml to make pseudoxml-XML files for display purposes"
|
||||||
|
@echo " linkcheck to check all external links for integrity"
|
||||||
|
@echo " doctest to run all doctests embedded in the documentation (if enabled)"
|
||||||
|
|
||||||
|
clean:
|
||||||
|
rm -rf $(BUILDDIR)/*
|
||||||
|
|
||||||
|
html:
|
||||||
|
$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
|
||||||
|
@echo
|
||||||
|
@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
|
||||||
|
|
||||||
|
dirhtml:
|
||||||
|
$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
|
||||||
|
@echo
|
||||||
|
@echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
|
||||||
|
|
||||||
|
singlehtml:
|
||||||
|
$(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
|
||||||
|
@echo
|
||||||
|
@echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
|
||||||
|
|
||||||
|
pickle:
|
||||||
|
$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
|
||||||
|
@echo
|
||||||
|
@echo "Build finished; now you can process the pickle files."
|
||||||
|
|
||||||
|
json:
|
||||||
|
$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
|
||||||
|
@echo
|
||||||
|
@echo "Build finished; now you can process the JSON files."
|
||||||
|
|
||||||
|
htmlhelp:
|
||||||
|
$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
|
||||||
|
@echo
|
||||||
|
@echo "Build finished; now you can run HTML Help Workshop with the" \
|
||||||
|
".hhp project file in $(BUILDDIR)/htmlhelp."
|
||||||
|
|
||||||
|
qthelp:
|
||||||
|
$(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
|
||||||
|
@echo
|
||||||
|
@echo "Build finished; now you can run "qcollectiongenerator" with the" \
|
||||||
|
".qhcp project file in $(BUILDDIR)/qthelp, like this:"
|
||||||
|
@echo "# qcollectiongenerator $(BUILDDIR)/qthelp/web3.qhcp"
|
||||||
|
@echo "To view the help file:"
|
||||||
|
@echo "# assistant -collectionFile $(BUILDDIR)/qthelp/web3.qhc"
|
||||||
|
|
||||||
|
devhelp:
|
||||||
|
$(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
|
||||||
|
@echo
|
||||||
|
@echo "Build finished."
|
||||||
|
@echo "To view the help file:"
|
||||||
|
@echo "# mkdir -p $$HOME/.local/share/devhelp/web3"
|
||||||
|
@echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/web3"
|
||||||
|
@echo "# devhelp"
|
||||||
|
|
||||||
|
epub:
|
||||||
|
$(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
|
||||||
|
@echo
|
||||||
|
@echo "Build finished. The epub file is in $(BUILDDIR)/epub."
|
||||||
|
|
||||||
|
latex:
|
||||||
|
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
|
||||||
|
@echo
|
||||||
|
@echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
|
||||||
|
@echo "Run \`make' in that directory to run these through (pdf)latex" \
|
||||||
|
"(use \`make latexpdf' here to do that automatically)."
|
||||||
|
|
||||||
|
latexpdf:
|
||||||
|
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
|
||||||
|
@echo "Running LaTeX files through pdflatex..."
|
||||||
|
$(MAKE) -C $(BUILDDIR)/latex all-pdf
|
||||||
|
@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
|
||||||
|
|
||||||
|
latexpdfja:
|
||||||
|
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
|
||||||
|
@echo "Running LaTeX files through platex and dvipdfmx..."
|
||||||
|
$(MAKE) -C $(BUILDDIR)/latex all-pdf-ja
|
||||||
|
@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
|
||||||
|
|
||||||
|
text:
|
||||||
|
$(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
|
||||||
|
@echo
|
||||||
|
@echo "Build finished. The text files are in $(BUILDDIR)/text."
|
||||||
|
|
||||||
|
man:
|
||||||
|
$(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
|
||||||
|
@echo
|
||||||
|
@echo "Build finished. The manual pages are in $(BUILDDIR)/man."
|
||||||
|
|
||||||
|
texinfo:
|
||||||
|
$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
|
||||||
|
@echo
|
||||||
|
@echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo."
|
||||||
|
@echo "Run \`make' in that directory to run these through makeinfo" \
|
||||||
|
"(use \`make info' here to do that automatically)."
|
||||||
|
|
||||||
|
info:
|
||||||
|
$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
|
||||||
|
@echo "Running Texinfo files through makeinfo..."
|
||||||
|
make -C $(BUILDDIR)/texinfo info
|
||||||
|
@echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo."
|
||||||
|
|
||||||
|
gettext:
|
||||||
|
$(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale
|
||||||
|
@echo
|
||||||
|
@echo "Build finished. The message catalogs are in $(BUILDDIR)/locale."
|
||||||
|
|
||||||
|
changes:
|
||||||
|
$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
|
||||||
|
@echo
|
||||||
|
@echo "The overview file is in $(BUILDDIR)/changes."
|
||||||
|
|
||||||
|
linkcheck:
|
||||||
|
$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
|
||||||
|
@echo
|
||||||
|
@echo "Link check complete; look for any errors in the above output " \
|
||||||
|
"or in $(BUILDDIR)/linkcheck/output.txt."
|
||||||
|
|
||||||
|
doctest:
|
||||||
|
$(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
|
||||||
|
@echo "Testing of doctests in the sources finished, look at the " \
|
||||||
|
"results in $(BUILDDIR)/doctest/output.txt."
|
||||||
|
|
||||||
|
xml:
|
||||||
|
$(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml
|
||||||
|
@echo
|
||||||
|
@echo "Build finished. The XML files are in $(BUILDDIR)/xml."
|
||||||
|
|
||||||
|
pseudoxml:
|
||||||
|
$(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml
|
||||||
|
@echo
|
||||||
|
@echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml."
|
||||||
304
docs/conf.py
Normal file
304
docs/conf.py
Normal file
@ -0,0 +1,304 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
#
|
||||||
|
# py-libp2p documentation build configuration file, created by
|
||||||
|
# sphinx-quickstart on Thu Oct 16 20:43:24 2014.
|
||||||
|
#
|
||||||
|
# This file is execfile()d with the current directory set to its
|
||||||
|
# containing dir.
|
||||||
|
#
|
||||||
|
# Note that not all possible configuration values are present in this
|
||||||
|
# autogenerated file.
|
||||||
|
#
|
||||||
|
# All configuration values have a default; values that are commented out
|
||||||
|
# serve to show the default.
|
||||||
|
|
||||||
|
# If extensions (or modules to document with autodoc) are in another directory,
|
||||||
|
# add these directories to sys.path here. If the directory is relative to the
|
||||||
|
# documentation root, use os.path.abspath to make it absolute, like shown here.
|
||||||
|
#sys.path.insert(0, os.path.abspath('.'))
|
||||||
|
|
||||||
|
import os
|
||||||
|
|
||||||
|
DIR = os.path.dirname('__file__')
|
||||||
|
with open (os.path.join(DIR, '../setup.py'), 'r') as f:
|
||||||
|
for line in f:
|
||||||
|
if 'version=' in line:
|
||||||
|
setup_version = line.split('"')[1]
|
||||||
|
break
|
||||||
|
|
||||||
|
# -- General configuration ------------------------------------------------
|
||||||
|
|
||||||
|
# If your documentation needs a minimal Sphinx version, state it here.
|
||||||
|
#needs_sphinx = '1.0'
|
||||||
|
|
||||||
|
# Add any Sphinx extension module names here, as strings. They can be
|
||||||
|
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
|
||||||
|
# ones.
|
||||||
|
extensions = [
|
||||||
|
'sphinx.ext.autodoc',
|
||||||
|
'sphinx.ext.doctest',
|
||||||
|
'sphinx.ext.intersphinx',
|
||||||
|
]
|
||||||
|
|
||||||
|
# Add any paths that contain templates here, relative to this directory.
|
||||||
|
templates_path = ['_templates']
|
||||||
|
|
||||||
|
# The suffix of source filenames.
|
||||||
|
source_suffix = '.rst'
|
||||||
|
|
||||||
|
# The encoding of source files.
|
||||||
|
#source_encoding = 'utf-8-sig'
|
||||||
|
|
||||||
|
# The master toctree document.
|
||||||
|
master_doc = 'index'
|
||||||
|
|
||||||
|
# General information about the project.
|
||||||
|
project = 'py-libp2p'
|
||||||
|
copyright = '2019, The Ethereum Foundation'
|
||||||
|
|
||||||
|
__version__ = setup_version
|
||||||
|
# The version info for the project you're documenting, acts as replacement for
|
||||||
|
# |version| and |release|, also used in various other places throughout the
|
||||||
|
# built documents.
|
||||||
|
#
|
||||||
|
# The short X.Y version.
|
||||||
|
version = '.'.join(__version__.split('.')[:2])
|
||||||
|
# The full version, including alpha/beta/rc tags.
|
||||||
|
release = __version__
|
||||||
|
|
||||||
|
# The language for content autogenerated by Sphinx. Refer to documentation
|
||||||
|
# for a list of supported languages.
|
||||||
|
#language = None
|
||||||
|
|
||||||
|
# There are two options for replacing |today|: either, you set today to some
|
||||||
|
# non-false value, then it is used:
|
||||||
|
#today = ''
|
||||||
|
# Else, today_fmt is used as the format for a strftime call.
|
||||||
|
#today_fmt = '%B %d, %Y'
|
||||||
|
|
||||||
|
# List of patterns, relative to source directory, that match files and
|
||||||
|
# directories to ignore when looking for source files.
|
||||||
|
exclude_patterns = [
|
||||||
|
'_build',
|
||||||
|
'modules.rst',
|
||||||
|
]
|
||||||
|
|
||||||
|
# The reST default role (used for this markup: `text`) to use for all
|
||||||
|
# documents.
|
||||||
|
#default_role = None
|
||||||
|
|
||||||
|
# If true, '()' will be appended to :func: etc. cross-reference text.
|
||||||
|
#add_function_parentheses = True
|
||||||
|
|
||||||
|
# If true, the current module name will be prepended to all description
|
||||||
|
# unit titles (such as .. function::).
|
||||||
|
#add_module_names = True
|
||||||
|
|
||||||
|
# If true, sectionauthor and moduleauthor directives will be shown in the
|
||||||
|
# output. They are ignored by default.
|
||||||
|
#show_authors = False
|
||||||
|
|
||||||
|
# The name of the Pygments (syntax highlighting) style to use.
|
||||||
|
pygments_style = 'sphinx'
|
||||||
|
|
||||||
|
# A list of ignored prefixes for module index sorting.
|
||||||
|
#modindex_common_prefix = []
|
||||||
|
|
||||||
|
# If true, keep warnings as "system message" paragraphs in the built documents.
|
||||||
|
#keep_warnings = False
|
||||||
|
|
||||||
|
|
||||||
|
# -- Options for HTML output ----------------------------------------------
|
||||||
|
|
||||||
|
# The theme to use for HTML and HTML Help pages. See the documentation for
|
||||||
|
# a list of builtin themes.
|
||||||
|
html_theme = 'sphinx_rtd_theme'
|
||||||
|
|
||||||
|
# Theme options are theme-specific and customize the look and feel of a theme
|
||||||
|
# further. For a list of options available for each theme, see the
|
||||||
|
# documentation.
|
||||||
|
#html_theme_options = {}
|
||||||
|
|
||||||
|
# Add any paths that contain custom themes here, relative to this directory.
|
||||||
|
|
||||||
|
# The name for this set of Sphinx documents. If None, it defaults to
|
||||||
|
# "<project> v<release> documentation".
|
||||||
|
#html_title = None
|
||||||
|
|
||||||
|
# A shorter title for the navigation bar. Default is the same as html_title.
|
||||||
|
#html_short_title = None
|
||||||
|
|
||||||
|
# The name of an image file (relative to this directory) to place at the top
|
||||||
|
# of the sidebar.
|
||||||
|
#html_logo = None
|
||||||
|
|
||||||
|
# The name of an image file (within the static path) to use as favicon of the
|
||||||
|
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
|
||||||
|
# pixels large.
|
||||||
|
#html_favicon = None
|
||||||
|
|
||||||
|
# Add any paths that contain custom static files (such as style sheets) here,
|
||||||
|
# relative to this directory. They are copied after the builtin static files,
|
||||||
|
# so a file named "default.css" will overwrite the builtin "default.css".
|
||||||
|
html_static_path = ['_static']
|
||||||
|
|
||||||
|
# Add any extra paths that contain custom files (such as robots.txt or
|
||||||
|
# .htaccess) here, relative to this directory. These files are copied
|
||||||
|
# directly to the root of the documentation.
|
||||||
|
#html_extra_path = []
|
||||||
|
|
||||||
|
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
|
||||||
|
# using the given strftime format.
|
||||||
|
#html_last_updated_fmt = '%b %d, %Y'
|
||||||
|
|
||||||
|
# If true, SmartyPants will be used to convert quotes and dashes to
|
||||||
|
# typographically correct entities.
|
||||||
|
#html_use_smartypants = True
|
||||||
|
|
||||||
|
# Custom sidebar templates, maps document names to template names.
|
||||||
|
#html_sidebars = {}
|
||||||
|
|
||||||
|
# Additional templates that should be rendered to pages, maps page names to
|
||||||
|
# template names.
|
||||||
|
#html_additional_pages = {}
|
||||||
|
|
||||||
|
# If false, no module index is generated.
|
||||||
|
#html_domain_indices = True
|
||||||
|
|
||||||
|
# If false, no index is generated.
|
||||||
|
#html_use_index = True
|
||||||
|
|
||||||
|
# If true, the index is split into individual pages for each letter.
|
||||||
|
#html_split_index = False
|
||||||
|
|
||||||
|
# If true, links to the reST sources are added to the pages.
|
||||||
|
#html_show_sourcelink = True
|
||||||
|
|
||||||
|
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
|
||||||
|
#html_show_sphinx = True
|
||||||
|
|
||||||
|
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
|
||||||
|
#html_show_copyright = True
|
||||||
|
|
||||||
|
# If true, an OpenSearch description file will be output, and all pages will
|
||||||
|
# contain a <link> tag referring to it. The value of this option must be the
|
||||||
|
# base URL from which the finished HTML is served.
|
||||||
|
#html_use_opensearch = ''
|
||||||
|
|
||||||
|
# This is the file name suffix for HTML files (e.g. ".xhtml").
|
||||||
|
#html_file_suffix = None
|
||||||
|
|
||||||
|
# Output file base name for HTML help builder.
|
||||||
|
htmlhelp_basename = 'libp2pdoc'
|
||||||
|
|
||||||
|
|
||||||
|
# -- Options for LaTeX output ---------------------------------------------
|
||||||
|
|
||||||
|
latex_elements = {
|
||||||
|
# The paper size ('letterpaper' or 'a4paper').
|
||||||
|
#'papersize': 'letterpaper',
|
||||||
|
|
||||||
|
# The font size ('10pt', '11pt' or '12pt').
|
||||||
|
#'pointsize': '10pt',
|
||||||
|
|
||||||
|
# Additional stuff for the LaTeX preamble.
|
||||||
|
#'preamble': '',
|
||||||
|
}
|
||||||
|
|
||||||
|
# Grouping the document tree into LaTeX files. List of tuples
|
||||||
|
# (source start file, target name, title,
|
||||||
|
# author, documentclass [howto, manual, or own class]).
|
||||||
|
latex_documents = [
|
||||||
|
('index', 'libp2p.tex', 'py-libp2p Documentation',
|
||||||
|
'The Ethereum Foundation', 'manual'),
|
||||||
|
]
|
||||||
|
|
||||||
|
# The name of an image file (relative to this directory) to place at the top of
|
||||||
|
# the title page.
|
||||||
|
#latex_logo = None
|
||||||
|
|
||||||
|
# For "manual" documents, if this is true, then toplevel headings are parts,
|
||||||
|
# not chapters.
|
||||||
|
#latex_use_parts = False
|
||||||
|
|
||||||
|
# If true, show page references after internal links.
|
||||||
|
#latex_show_pagerefs = False
|
||||||
|
|
||||||
|
# If true, show URL addresses after external links.
|
||||||
|
#latex_show_urls = False
|
||||||
|
|
||||||
|
# Documents to append as an appendix to all manuals.
|
||||||
|
#latex_appendices = []
|
||||||
|
|
||||||
|
# If false, no module index is generated.
|
||||||
|
#latex_domain_indices = True
|
||||||
|
|
||||||
|
|
||||||
|
# -- Options for manual page output ---------------------------------------
|
||||||
|
|
||||||
|
# One entry per manual page. List of tuples
|
||||||
|
# (source start file, name, description, authors, manual section).
|
||||||
|
man_pages = [
|
||||||
|
('index', 'libp2p', 'py-libp2p Documentation',
|
||||||
|
['The Ethereum Foundation'], 1)
|
||||||
|
]
|
||||||
|
|
||||||
|
# If true, show URL addresses after external links.
|
||||||
|
#man_show_urls = False
|
||||||
|
|
||||||
|
|
||||||
|
# -- Options for Texinfo output -------------------------------------------
|
||||||
|
|
||||||
|
# Grouping the document tree into Texinfo files. List of tuples
|
||||||
|
# (source start file, target name, title, author,
|
||||||
|
# dir menu entry, description, category)
|
||||||
|
texinfo_documents = [
|
||||||
|
('index', 'py-libp2p', 'py-libp2p Documentation',
|
||||||
|
'The Ethereum Foundation', 'py-libp2p', 'The Python implementation of the libp2p networking stack',
|
||||||
|
'Miscellaneous'),
|
||||||
|
]
|
||||||
|
|
||||||
|
# Documents to append as an appendix to all manuals.
|
||||||
|
#texinfo_appendices = []
|
||||||
|
|
||||||
|
# If false, no module index is generated.
|
||||||
|
#texinfo_domain_indices = True
|
||||||
|
|
||||||
|
# How to display URL addresses: 'footnote', 'no', or 'inline'.
|
||||||
|
#texinfo_show_urls = 'footnote'
|
||||||
|
|
||||||
|
# If true, do not generate a @detailmenu in the "Top" node's menu.
|
||||||
|
#texinfo_no_detailmenu = False
|
||||||
|
|
||||||
|
# -- Intersphinx configuration ------------------------------------------------
|
||||||
|
|
||||||
|
intersphinx_mapping = {
|
||||||
|
'python': ('https://docs.python.org/3.6', None),
|
||||||
|
}
|
||||||
|
|
||||||
|
# -- Doctest configuration ----------------------------------------
|
||||||
|
|
||||||
|
import doctest
|
||||||
|
|
||||||
|
doctest_default_flags = (0
|
||||||
|
| doctest.DONT_ACCEPT_TRUE_FOR_1
|
||||||
|
| doctest.ELLIPSIS
|
||||||
|
| doctest.IGNORE_EXCEPTION_DETAIL
|
||||||
|
| doctest.NORMALIZE_WHITESPACE
|
||||||
|
)
|
||||||
|
|
||||||
|
# -- Mocked dependencies ----------------------------------------
|
||||||
|
|
||||||
|
# Mock out dependencies that are unbuildable on readthedocs, as recommended here:
|
||||||
|
# https://docs.readthedocs.io/en/rel/faq.html#i-get-import-errors-on-libraries-that-depend-on-c-modules
|
||||||
|
|
||||||
|
import sys
|
||||||
|
from unittest.mock import MagicMock
|
||||||
|
|
||||||
|
# Add new modules to mock here (it should be the same list as those excluded in setup.py)
|
||||||
|
MOCK_MODULES = [
|
||||||
|
"fastecdsa",
|
||||||
|
"fastecdsa.encoding",
|
||||||
|
"fastecdsa.encoding.sec1",
|
||||||
|
]
|
||||||
|
sys.modules.update((mod_name, MagicMock()) for mod_name in MOCK_MODULES)
|
||||||
22
docs/examples.chat.rst
Normal file
22
docs/examples.chat.rst
Normal file
@ -0,0 +1,22 @@
|
|||||||
|
examples.chat package
|
||||||
|
=====================
|
||||||
|
|
||||||
|
Submodules
|
||||||
|
----------
|
||||||
|
|
||||||
|
examples.chat.chat module
|
||||||
|
-------------------------
|
||||||
|
|
||||||
|
.. automodule:: examples.chat.chat
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
|
|
||||||
|
|
||||||
|
Module contents
|
||||||
|
---------------
|
||||||
|
|
||||||
|
.. automodule:: examples.chat
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
17
docs/examples.rst
Normal file
17
docs/examples.rst
Normal file
@ -0,0 +1,17 @@
|
|||||||
|
examples package
|
||||||
|
================
|
||||||
|
|
||||||
|
Subpackages
|
||||||
|
-----------
|
||||||
|
|
||||||
|
.. toctree::
|
||||||
|
|
||||||
|
examples.chat
|
||||||
|
|
||||||
|
Module contents
|
||||||
|
---------------
|
||||||
|
|
||||||
|
.. automodule:: examples
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
21
docs/index.rst
Normal file
21
docs/index.rst
Normal file
@ -0,0 +1,21 @@
|
|||||||
|
py-libp2p
|
||||||
|
==============================
|
||||||
|
|
||||||
|
The Python implementation of the libp2p networking stack
|
||||||
|
|
||||||
|
Contents
|
||||||
|
--------
|
||||||
|
|
||||||
|
.. toctree::
|
||||||
|
:maxdepth: 3
|
||||||
|
|
||||||
|
libp2p
|
||||||
|
release_notes
|
||||||
|
examples
|
||||||
|
|
||||||
|
|
||||||
|
Indices and tables
|
||||||
|
------------------
|
||||||
|
|
||||||
|
* :ref:`genindex`
|
||||||
|
* :ref:`modindex`
|
||||||
22
docs/libp2p.crypto.pb.rst
Normal file
22
docs/libp2p.crypto.pb.rst
Normal file
@ -0,0 +1,22 @@
|
|||||||
|
libp2p.crypto.pb package
|
||||||
|
========================
|
||||||
|
|
||||||
|
Submodules
|
||||||
|
----------
|
||||||
|
|
||||||
|
libp2p.crypto.pb.crypto\_pb2 module
|
||||||
|
-----------------------------------
|
||||||
|
|
||||||
|
.. automodule:: libp2p.crypto.pb.crypto_pb2
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
|
|
||||||
|
|
||||||
|
Module contents
|
||||||
|
---------------
|
||||||
|
|
||||||
|
.. automodule:: libp2p.crypto.pb
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
93
docs/libp2p.crypto.rst
Normal file
93
docs/libp2p.crypto.rst
Normal file
@ -0,0 +1,93 @@
|
|||||||
|
libp2p.crypto package
|
||||||
|
=====================
|
||||||
|
|
||||||
|
Subpackages
|
||||||
|
-----------
|
||||||
|
|
||||||
|
.. toctree::
|
||||||
|
|
||||||
|
libp2p.crypto.pb
|
||||||
|
|
||||||
|
Submodules
|
||||||
|
----------
|
||||||
|
|
||||||
|
libp2p.crypto.authenticated\_encryption module
|
||||||
|
----------------------------------------------
|
||||||
|
|
||||||
|
.. automodule:: libp2p.crypto.authenticated_encryption
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
|
|
||||||
|
libp2p.crypto.ecc module
|
||||||
|
------------------------
|
||||||
|
|
||||||
|
.. automodule:: libp2p.crypto.ecc
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
|
|
||||||
|
libp2p.crypto.ed25519 module
|
||||||
|
----------------------------
|
||||||
|
|
||||||
|
.. automodule:: libp2p.crypto.ed25519
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
|
|
||||||
|
libp2p.crypto.exceptions module
|
||||||
|
-------------------------------
|
||||||
|
|
||||||
|
.. automodule:: libp2p.crypto.exceptions
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
|
|
||||||
|
libp2p.crypto.key\_exchange module
|
||||||
|
----------------------------------
|
||||||
|
|
||||||
|
.. automodule:: libp2p.crypto.key_exchange
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
|
|
||||||
|
libp2p.crypto.keys module
|
||||||
|
-------------------------
|
||||||
|
|
||||||
|
.. automodule:: libp2p.crypto.keys
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
|
|
||||||
|
libp2p.crypto.rsa module
|
||||||
|
------------------------
|
||||||
|
|
||||||
|
.. automodule:: libp2p.crypto.rsa
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
|
|
||||||
|
libp2p.crypto.secp256k1 module
|
||||||
|
------------------------------
|
||||||
|
|
||||||
|
.. automodule:: libp2p.crypto.secp256k1
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
|
|
||||||
|
libp2p.crypto.serialization module
|
||||||
|
----------------------------------
|
||||||
|
|
||||||
|
.. automodule:: libp2p.crypto.serialization
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
|
|
||||||
|
|
||||||
|
Module contents
|
||||||
|
---------------
|
||||||
|
|
||||||
|
.. automodule:: libp2p.crypto
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
62
docs/libp2p.host.rst
Normal file
62
docs/libp2p.host.rst
Normal file
@ -0,0 +1,62 @@
|
|||||||
|
libp2p.host package
|
||||||
|
===================
|
||||||
|
|
||||||
|
Submodules
|
||||||
|
----------
|
||||||
|
|
||||||
|
libp2p.host.basic\_host module
|
||||||
|
------------------------------
|
||||||
|
|
||||||
|
.. automodule:: libp2p.host.basic_host
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
|
|
||||||
|
libp2p.host.defaults module
|
||||||
|
---------------------------
|
||||||
|
|
||||||
|
.. automodule:: libp2p.host.defaults
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
|
|
||||||
|
libp2p.host.exceptions module
|
||||||
|
-----------------------------
|
||||||
|
|
||||||
|
.. automodule:: libp2p.host.exceptions
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
|
|
||||||
|
libp2p.host.host\_interface module
|
||||||
|
----------------------------------
|
||||||
|
|
||||||
|
.. automodule:: libp2p.host.host_interface
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
|
|
||||||
|
libp2p.host.ping module
|
||||||
|
-----------------------
|
||||||
|
|
||||||
|
.. automodule:: libp2p.host.ping
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
|
|
||||||
|
libp2p.host.routed\_host module
|
||||||
|
-------------------------------
|
||||||
|
|
||||||
|
.. automodule:: libp2p.host.routed_host
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
|
|
||||||
|
|
||||||
|
Module contents
|
||||||
|
---------------
|
||||||
|
|
||||||
|
.. automodule:: libp2p.host
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
22
docs/libp2p.identity.identify.pb.rst
Normal file
22
docs/libp2p.identity.identify.pb.rst
Normal file
@ -0,0 +1,22 @@
|
|||||||
|
libp2p.identity.identify.pb package
|
||||||
|
===================================
|
||||||
|
|
||||||
|
Submodules
|
||||||
|
----------
|
||||||
|
|
||||||
|
libp2p.identity.identify.pb.identify\_pb2 module
|
||||||
|
------------------------------------------------
|
||||||
|
|
||||||
|
.. automodule:: libp2p.identity.identify.pb.identify_pb2
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
|
|
||||||
|
|
||||||
|
Module contents
|
||||||
|
---------------
|
||||||
|
|
||||||
|
.. automodule:: libp2p.identity.identify.pb
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
29
docs/libp2p.identity.identify.rst
Normal file
29
docs/libp2p.identity.identify.rst
Normal file
@ -0,0 +1,29 @@
|
|||||||
|
libp2p.identity.identify package
|
||||||
|
================================
|
||||||
|
|
||||||
|
Subpackages
|
||||||
|
-----------
|
||||||
|
|
||||||
|
.. toctree::
|
||||||
|
|
||||||
|
libp2p.identity.identify.pb
|
||||||
|
|
||||||
|
Submodules
|
||||||
|
----------
|
||||||
|
|
||||||
|
libp2p.identity.identify.protocol module
|
||||||
|
----------------------------------------
|
||||||
|
|
||||||
|
.. automodule:: libp2p.identity.identify.protocol
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
|
|
||||||
|
|
||||||
|
Module contents
|
||||||
|
---------------
|
||||||
|
|
||||||
|
.. automodule:: libp2p.identity.identify
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
17
docs/libp2p.identity.rst
Normal file
17
docs/libp2p.identity.rst
Normal file
@ -0,0 +1,17 @@
|
|||||||
|
libp2p.identity package
|
||||||
|
=======================
|
||||||
|
|
||||||
|
Subpackages
|
||||||
|
-----------
|
||||||
|
|
||||||
|
.. toctree::
|
||||||
|
|
||||||
|
libp2p.identity.identify
|
||||||
|
|
||||||
|
Module contents
|
||||||
|
---------------
|
||||||
|
|
||||||
|
.. automodule:: libp2p.identity
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
46
docs/libp2p.io.rst
Normal file
46
docs/libp2p.io.rst
Normal file
@ -0,0 +1,46 @@
|
|||||||
|
libp2p.io package
|
||||||
|
=================
|
||||||
|
|
||||||
|
Submodules
|
||||||
|
----------
|
||||||
|
|
||||||
|
libp2p.io.abc module
|
||||||
|
--------------------
|
||||||
|
|
||||||
|
.. automodule:: libp2p.io.abc
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
|
|
||||||
|
libp2p.io.exceptions module
|
||||||
|
---------------------------
|
||||||
|
|
||||||
|
.. automodule:: libp2p.io.exceptions
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
|
|
||||||
|
libp2p.io.msgio module
|
||||||
|
----------------------
|
||||||
|
|
||||||
|
.. automodule:: libp2p.io.msgio
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
|
|
||||||
|
libp2p.io.utils module
|
||||||
|
----------------------
|
||||||
|
|
||||||
|
.. automodule:: libp2p.io.utils
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
|
|
||||||
|
|
||||||
|
Module contents
|
||||||
|
---------------
|
||||||
|
|
||||||
|
.. automodule:: libp2p.io
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
54
docs/libp2p.network.connection.rst
Normal file
54
docs/libp2p.network.connection.rst
Normal file
@ -0,0 +1,54 @@
|
|||||||
|
libp2p.network.connection package
|
||||||
|
=================================
|
||||||
|
|
||||||
|
Submodules
|
||||||
|
----------
|
||||||
|
|
||||||
|
libp2p.network.connection.exceptions module
|
||||||
|
-------------------------------------------
|
||||||
|
|
||||||
|
.. automodule:: libp2p.network.connection.exceptions
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
|
|
||||||
|
libp2p.network.connection.net\_connection\_interface module
|
||||||
|
-----------------------------------------------------------
|
||||||
|
|
||||||
|
.. automodule:: libp2p.network.connection.net_connection_interface
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
|
|
||||||
|
libp2p.network.connection.raw\_connection module
|
||||||
|
------------------------------------------------
|
||||||
|
|
||||||
|
.. automodule:: libp2p.network.connection.raw_connection
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
|
|
||||||
|
libp2p.network.connection.raw\_connection\_interface module
|
||||||
|
-----------------------------------------------------------
|
||||||
|
|
||||||
|
.. automodule:: libp2p.network.connection.raw_connection_interface
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
|
|
||||||
|
libp2p.network.connection.swarm\_connection module
|
||||||
|
--------------------------------------------------
|
||||||
|
|
||||||
|
.. automodule:: libp2p.network.connection.swarm_connection
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
|
|
||||||
|
|
||||||
|
Module contents
|
||||||
|
---------------
|
||||||
|
|
||||||
|
.. automodule:: libp2p.network.connection
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
54
docs/libp2p.network.rst
Normal file
54
docs/libp2p.network.rst
Normal file
@ -0,0 +1,54 @@
|
|||||||
|
libp2p.network package
|
||||||
|
======================
|
||||||
|
|
||||||
|
Subpackages
|
||||||
|
-----------
|
||||||
|
|
||||||
|
.. toctree::
|
||||||
|
|
||||||
|
libp2p.network.connection
|
||||||
|
libp2p.network.stream
|
||||||
|
|
||||||
|
Submodules
|
||||||
|
----------
|
||||||
|
|
||||||
|
libp2p.network.exceptions module
|
||||||
|
--------------------------------
|
||||||
|
|
||||||
|
.. automodule:: libp2p.network.exceptions
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
|
|
||||||
|
libp2p.network.network\_interface module
|
||||||
|
----------------------------------------
|
||||||
|
|
||||||
|
.. automodule:: libp2p.network.network_interface
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
|
|
||||||
|
libp2p.network.notifee\_interface module
|
||||||
|
----------------------------------------
|
||||||
|
|
||||||
|
.. automodule:: libp2p.network.notifee_interface
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
|
|
||||||
|
libp2p.network.swarm module
|
||||||
|
---------------------------
|
||||||
|
|
||||||
|
.. automodule:: libp2p.network.swarm
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
|
|
||||||
|
|
||||||
|
Module contents
|
||||||
|
---------------
|
||||||
|
|
||||||
|
.. automodule:: libp2p.network
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
38
docs/libp2p.network.stream.rst
Normal file
38
docs/libp2p.network.stream.rst
Normal file
@ -0,0 +1,38 @@
|
|||||||
|
libp2p.network.stream package
|
||||||
|
=============================
|
||||||
|
|
||||||
|
Submodules
|
||||||
|
----------
|
||||||
|
|
||||||
|
libp2p.network.stream.exceptions module
|
||||||
|
---------------------------------------
|
||||||
|
|
||||||
|
.. automodule:: libp2p.network.stream.exceptions
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
|
|
||||||
|
libp2p.network.stream.net\_stream module
|
||||||
|
----------------------------------------
|
||||||
|
|
||||||
|
.. automodule:: libp2p.network.stream.net_stream
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
|
|
||||||
|
libp2p.network.stream.net\_stream\_interface module
|
||||||
|
---------------------------------------------------
|
||||||
|
|
||||||
|
.. automodule:: libp2p.network.stream.net_stream_interface
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
|
|
||||||
|
|
||||||
|
Module contents
|
||||||
|
---------------
|
||||||
|
|
||||||
|
.. automodule:: libp2p.network.stream
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
78
docs/libp2p.peer.rst
Normal file
78
docs/libp2p.peer.rst
Normal file
@ -0,0 +1,78 @@
|
|||||||
|
libp2p.peer package
|
||||||
|
===================
|
||||||
|
|
||||||
|
Submodules
|
||||||
|
----------
|
||||||
|
|
||||||
|
libp2p.peer.addrbook\_interface module
|
||||||
|
--------------------------------------
|
||||||
|
|
||||||
|
.. automodule:: libp2p.peer.addrbook_interface
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
|
|
||||||
|
libp2p.peer.id module
|
||||||
|
---------------------
|
||||||
|
|
||||||
|
.. automodule:: libp2p.peer.id
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
|
|
||||||
|
libp2p.peer.peerdata module
|
||||||
|
---------------------------
|
||||||
|
|
||||||
|
.. automodule:: libp2p.peer.peerdata
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
|
|
||||||
|
libp2p.peer.peerdata\_interface module
|
||||||
|
--------------------------------------
|
||||||
|
|
||||||
|
.. automodule:: libp2p.peer.peerdata_interface
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
|
|
||||||
|
libp2p.peer.peerinfo module
|
||||||
|
---------------------------
|
||||||
|
|
||||||
|
.. automodule:: libp2p.peer.peerinfo
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
|
|
||||||
|
libp2p.peer.peermetadata\_interface module
|
||||||
|
------------------------------------------
|
||||||
|
|
||||||
|
.. automodule:: libp2p.peer.peermetadata_interface
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
|
|
||||||
|
libp2p.peer.peerstore module
|
||||||
|
----------------------------
|
||||||
|
|
||||||
|
.. automodule:: libp2p.peer.peerstore
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
|
|
||||||
|
libp2p.peer.peerstore\_interface module
|
||||||
|
---------------------------------------
|
||||||
|
|
||||||
|
.. automodule:: libp2p.peer.peerstore_interface
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
|
|
||||||
|
|
||||||
|
Module contents
|
||||||
|
---------------
|
||||||
|
|
||||||
|
.. automodule:: libp2p.peer
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
70
docs/libp2p.protocol_muxer.rst
Normal file
70
docs/libp2p.protocol_muxer.rst
Normal file
@ -0,0 +1,70 @@
|
|||||||
|
libp2p.protocol\_muxer package
|
||||||
|
==============================
|
||||||
|
|
||||||
|
Submodules
|
||||||
|
----------
|
||||||
|
|
||||||
|
libp2p.protocol\_muxer.exceptions module
|
||||||
|
----------------------------------------
|
||||||
|
|
||||||
|
.. automodule:: libp2p.protocol_muxer.exceptions
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
|
|
||||||
|
libp2p.protocol\_muxer.multiselect module
|
||||||
|
-----------------------------------------
|
||||||
|
|
||||||
|
.. automodule:: libp2p.protocol_muxer.multiselect
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
|
|
||||||
|
libp2p.protocol\_muxer.multiselect\_client module
|
||||||
|
-------------------------------------------------
|
||||||
|
|
||||||
|
.. automodule:: libp2p.protocol_muxer.multiselect_client
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
|
|
||||||
|
libp2p.protocol\_muxer.multiselect\_client\_interface module
|
||||||
|
------------------------------------------------------------
|
||||||
|
|
||||||
|
.. automodule:: libp2p.protocol_muxer.multiselect_client_interface
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
|
|
||||||
|
libp2p.protocol\_muxer.multiselect\_communicator module
|
||||||
|
-------------------------------------------------------
|
||||||
|
|
||||||
|
.. automodule:: libp2p.protocol_muxer.multiselect_communicator
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
|
|
||||||
|
libp2p.protocol\_muxer.multiselect\_communicator\_interface module
|
||||||
|
------------------------------------------------------------------
|
||||||
|
|
||||||
|
.. automodule:: libp2p.protocol_muxer.multiselect_communicator_interface
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
|
|
||||||
|
libp2p.protocol\_muxer.multiselect\_muxer\_interface module
|
||||||
|
-----------------------------------------------------------
|
||||||
|
|
||||||
|
.. automodule:: libp2p.protocol_muxer.multiselect_muxer_interface
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
|
|
||||||
|
|
||||||
|
Module contents
|
||||||
|
---------------
|
||||||
|
|
||||||
|
.. automodule:: libp2p.protocol_muxer
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
22
docs/libp2p.pubsub.pb.rst
Normal file
22
docs/libp2p.pubsub.pb.rst
Normal file
@ -0,0 +1,22 @@
|
|||||||
|
libp2p.pubsub.pb package
|
||||||
|
========================
|
||||||
|
|
||||||
|
Submodules
|
||||||
|
----------
|
||||||
|
|
||||||
|
libp2p.pubsub.pb.rpc\_pb2 module
|
||||||
|
--------------------------------
|
||||||
|
|
||||||
|
.. automodule:: libp2p.pubsub.pb.rpc_pb2
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
|
|
||||||
|
|
||||||
|
Module contents
|
||||||
|
---------------
|
||||||
|
|
||||||
|
.. automodule:: libp2p.pubsub.pb
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
77
docs/libp2p.pubsub.rst
Normal file
77
docs/libp2p.pubsub.rst
Normal file
@ -0,0 +1,77 @@
|
|||||||
|
libp2p.pubsub package
|
||||||
|
=====================
|
||||||
|
|
||||||
|
Subpackages
|
||||||
|
-----------
|
||||||
|
|
||||||
|
.. toctree::
|
||||||
|
|
||||||
|
libp2p.pubsub.pb
|
||||||
|
|
||||||
|
Submodules
|
||||||
|
----------
|
||||||
|
|
||||||
|
libp2p.pubsub.floodsub module
|
||||||
|
-----------------------------
|
||||||
|
|
||||||
|
.. automodule:: libp2p.pubsub.floodsub
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
|
|
||||||
|
libp2p.pubsub.gossipsub module
|
||||||
|
------------------------------
|
||||||
|
|
||||||
|
.. automodule:: libp2p.pubsub.gossipsub
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
|
|
||||||
|
libp2p.pubsub.mcache module
|
||||||
|
---------------------------
|
||||||
|
|
||||||
|
.. automodule:: libp2p.pubsub.mcache
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
|
|
||||||
|
libp2p.pubsub.pubsub module
|
||||||
|
---------------------------
|
||||||
|
|
||||||
|
.. automodule:: libp2p.pubsub.pubsub
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
|
|
||||||
|
libp2p.pubsub.pubsub\_notifee module
|
||||||
|
------------------------------------
|
||||||
|
|
||||||
|
.. automodule:: libp2p.pubsub.pubsub_notifee
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
|
|
||||||
|
libp2p.pubsub.pubsub\_router\_interface module
|
||||||
|
----------------------------------------------
|
||||||
|
|
||||||
|
.. automodule:: libp2p.pubsub.pubsub_router_interface
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
|
|
||||||
|
libp2p.pubsub.validators module
|
||||||
|
-------------------------------
|
||||||
|
|
||||||
|
.. automodule:: libp2p.pubsub.validators
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
|
|
||||||
|
|
||||||
|
Module contents
|
||||||
|
---------------
|
||||||
|
|
||||||
|
.. automodule:: libp2p.pubsub
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
23
docs/libp2p.routing.rst
Normal file
23
docs/libp2p.routing.rst
Normal file
@ -0,0 +1,23 @@
|
|||||||
|
libp2p.routing package
|
||||||
|
======================
|
||||||
|
|
||||||
|
|
||||||
|
Submodules
|
||||||
|
----------
|
||||||
|
|
||||||
|
libp2p.routing.interfaces module
|
||||||
|
--------------------------------
|
||||||
|
|
||||||
|
.. automodule:: libp2p.routing.interfaces
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
|
|
||||||
|
|
||||||
|
Module contents
|
||||||
|
---------------
|
||||||
|
|
||||||
|
.. automodule:: libp2p.routing
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
57
docs/libp2p.rst
Normal file
57
docs/libp2p.rst
Normal file
@ -0,0 +1,57 @@
|
|||||||
|
libp2p package
|
||||||
|
==============
|
||||||
|
|
||||||
|
Subpackages
|
||||||
|
-----------
|
||||||
|
|
||||||
|
.. toctree::
|
||||||
|
|
||||||
|
libp2p.crypto
|
||||||
|
libp2p.host
|
||||||
|
libp2p.identity
|
||||||
|
libp2p.io
|
||||||
|
libp2p.network
|
||||||
|
libp2p.peer
|
||||||
|
libp2p.protocol_muxer
|
||||||
|
libp2p.pubsub
|
||||||
|
libp2p.routing
|
||||||
|
libp2p.security
|
||||||
|
libp2p.stream_muxer
|
||||||
|
libp2p.tools
|
||||||
|
libp2p.transport
|
||||||
|
|
||||||
|
Submodules
|
||||||
|
----------
|
||||||
|
|
||||||
|
libp2p.exceptions module
|
||||||
|
------------------------
|
||||||
|
|
||||||
|
.. automodule:: libp2p.exceptions
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
|
|
||||||
|
libp2p.typing module
|
||||||
|
--------------------
|
||||||
|
|
||||||
|
.. automodule:: libp2p.typing
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
|
|
||||||
|
libp2p.utils module
|
||||||
|
-------------------
|
||||||
|
|
||||||
|
.. automodule:: libp2p.utils
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
|
|
||||||
|
|
||||||
|
Module contents
|
||||||
|
---------------
|
||||||
|
|
||||||
|
.. automodule:: libp2p
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
22
docs/libp2p.security.insecure.pb.rst
Normal file
22
docs/libp2p.security.insecure.pb.rst
Normal file
@ -0,0 +1,22 @@
|
|||||||
|
libp2p.security.insecure.pb package
|
||||||
|
===================================
|
||||||
|
|
||||||
|
Submodules
|
||||||
|
----------
|
||||||
|
|
||||||
|
libp2p.security.insecure.pb.plaintext\_pb2 module
|
||||||
|
-------------------------------------------------
|
||||||
|
|
||||||
|
.. automodule:: libp2p.security.insecure.pb.plaintext_pb2
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
|
|
||||||
|
|
||||||
|
Module contents
|
||||||
|
---------------
|
||||||
|
|
||||||
|
.. automodule:: libp2p.security.insecure.pb
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
29
docs/libp2p.security.insecure.rst
Normal file
29
docs/libp2p.security.insecure.rst
Normal file
@ -0,0 +1,29 @@
|
|||||||
|
libp2p.security.insecure package
|
||||||
|
================================
|
||||||
|
|
||||||
|
Subpackages
|
||||||
|
-----------
|
||||||
|
|
||||||
|
.. toctree::
|
||||||
|
|
||||||
|
libp2p.security.insecure.pb
|
||||||
|
|
||||||
|
Submodules
|
||||||
|
----------
|
||||||
|
|
||||||
|
libp2p.security.insecure.transport module
|
||||||
|
-----------------------------------------
|
||||||
|
|
||||||
|
.. automodule:: libp2p.security.insecure.transport
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
|
|
||||||
|
|
||||||
|
Module contents
|
||||||
|
---------------
|
||||||
|
|
||||||
|
.. automodule:: libp2p.security.insecure
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
70
docs/libp2p.security.rst
Normal file
70
docs/libp2p.security.rst
Normal file
@ -0,0 +1,70 @@
|
|||||||
|
libp2p.security package
|
||||||
|
=======================
|
||||||
|
|
||||||
|
Subpackages
|
||||||
|
-----------
|
||||||
|
|
||||||
|
.. toctree::
|
||||||
|
|
||||||
|
libp2p.security.insecure
|
||||||
|
libp2p.security.secio
|
||||||
|
|
||||||
|
Submodules
|
||||||
|
----------
|
||||||
|
|
||||||
|
libp2p.security.base\_session module
|
||||||
|
------------------------------------
|
||||||
|
|
||||||
|
.. automodule:: libp2p.security.base_session
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
|
|
||||||
|
libp2p.security.base\_transport module
|
||||||
|
--------------------------------------
|
||||||
|
|
||||||
|
.. automodule:: libp2p.security.base_transport
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
|
|
||||||
|
libp2p.security.exceptions module
|
||||||
|
---------------------------------
|
||||||
|
|
||||||
|
.. automodule:: libp2p.security.exceptions
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
|
|
||||||
|
libp2p.security.secure\_conn\_interface module
|
||||||
|
----------------------------------------------
|
||||||
|
|
||||||
|
.. automodule:: libp2p.security.secure_conn_interface
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
|
|
||||||
|
libp2p.security.secure\_transport\_interface module
|
||||||
|
---------------------------------------------------
|
||||||
|
|
||||||
|
.. automodule:: libp2p.security.secure_transport_interface
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
|
|
||||||
|
libp2p.security.security\_multistream module
|
||||||
|
--------------------------------------------
|
||||||
|
|
||||||
|
.. automodule:: libp2p.security.security_multistream
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
|
|
||||||
|
|
||||||
|
Module contents
|
||||||
|
---------------
|
||||||
|
|
||||||
|
.. automodule:: libp2p.security
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
22
docs/libp2p.security.secio.pb.rst
Normal file
22
docs/libp2p.security.secio.pb.rst
Normal file
@ -0,0 +1,22 @@
|
|||||||
|
libp2p.security.secio.pb package
|
||||||
|
================================
|
||||||
|
|
||||||
|
Submodules
|
||||||
|
----------
|
||||||
|
|
||||||
|
libp2p.security.secio.pb.spipe\_pb2 module
|
||||||
|
------------------------------------------
|
||||||
|
|
||||||
|
.. automodule:: libp2p.security.secio.pb.spipe_pb2
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
|
|
||||||
|
|
||||||
|
Module contents
|
||||||
|
---------------
|
||||||
|
|
||||||
|
.. automodule:: libp2p.security.secio.pb
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
37
docs/libp2p.security.secio.rst
Normal file
37
docs/libp2p.security.secio.rst
Normal file
@ -0,0 +1,37 @@
|
|||||||
|
libp2p.security.secio package
|
||||||
|
=============================
|
||||||
|
|
||||||
|
Subpackages
|
||||||
|
-----------
|
||||||
|
|
||||||
|
.. toctree::
|
||||||
|
|
||||||
|
libp2p.security.secio.pb
|
||||||
|
|
||||||
|
Submodules
|
||||||
|
----------
|
||||||
|
|
||||||
|
libp2p.security.secio.exceptions module
|
||||||
|
---------------------------------------
|
||||||
|
|
||||||
|
.. automodule:: libp2p.security.secio.exceptions
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
|
|
||||||
|
libp2p.security.secio.transport module
|
||||||
|
--------------------------------------
|
||||||
|
|
||||||
|
.. automodule:: libp2p.security.secio.transport
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
|
|
||||||
|
|
||||||
|
Module contents
|
||||||
|
---------------
|
||||||
|
|
||||||
|
.. automodule:: libp2p.security.secio
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
54
docs/libp2p.stream_muxer.mplex.rst
Normal file
54
docs/libp2p.stream_muxer.mplex.rst
Normal file
@ -0,0 +1,54 @@
|
|||||||
|
libp2p.stream\_muxer.mplex package
|
||||||
|
==================================
|
||||||
|
|
||||||
|
Submodules
|
||||||
|
----------
|
||||||
|
|
||||||
|
libp2p.stream\_muxer.mplex.constants module
|
||||||
|
-------------------------------------------
|
||||||
|
|
||||||
|
.. automodule:: libp2p.stream_muxer.mplex.constants
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
|
|
||||||
|
libp2p.stream\_muxer.mplex.datastructures module
|
||||||
|
------------------------------------------------
|
||||||
|
|
||||||
|
.. automodule:: libp2p.stream_muxer.mplex.datastructures
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
|
|
||||||
|
libp2p.stream\_muxer.mplex.exceptions module
|
||||||
|
--------------------------------------------
|
||||||
|
|
||||||
|
.. automodule:: libp2p.stream_muxer.mplex.exceptions
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
|
|
||||||
|
libp2p.stream\_muxer.mplex.mplex module
|
||||||
|
---------------------------------------
|
||||||
|
|
||||||
|
.. automodule:: libp2p.stream_muxer.mplex.mplex
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
|
|
||||||
|
libp2p.stream\_muxer.mplex.mplex\_stream module
|
||||||
|
-----------------------------------------------
|
||||||
|
|
||||||
|
.. automodule:: libp2p.stream_muxer.mplex.mplex_stream
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
|
|
||||||
|
|
||||||
|
Module contents
|
||||||
|
---------------
|
||||||
|
|
||||||
|
.. automodule:: libp2p.stream_muxer.mplex
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
45
docs/libp2p.stream_muxer.rst
Normal file
45
docs/libp2p.stream_muxer.rst
Normal file
@ -0,0 +1,45 @@
|
|||||||
|
libp2p.stream\_muxer package
|
||||||
|
============================
|
||||||
|
|
||||||
|
Subpackages
|
||||||
|
-----------
|
||||||
|
|
||||||
|
.. toctree::
|
||||||
|
|
||||||
|
libp2p.stream_muxer.mplex
|
||||||
|
|
||||||
|
Submodules
|
||||||
|
----------
|
||||||
|
|
||||||
|
libp2p.stream\_muxer.abc module
|
||||||
|
-------------------------------
|
||||||
|
|
||||||
|
.. automodule:: libp2p.stream_muxer.abc
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
|
|
||||||
|
libp2p.stream\_muxer.exceptions module
|
||||||
|
--------------------------------------
|
||||||
|
|
||||||
|
.. automodule:: libp2p.stream_muxer.exceptions
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
|
|
||||||
|
libp2p.stream\_muxer.muxer\_multistream module
|
||||||
|
----------------------------------------------
|
||||||
|
|
||||||
|
.. automodule:: libp2p.stream_muxer.muxer_multistream
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
|
|
||||||
|
|
||||||
|
Module contents
|
||||||
|
---------------
|
||||||
|
|
||||||
|
.. automodule:: libp2p.stream_muxer
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
38
docs/libp2p.tools.pubsub.rst
Normal file
38
docs/libp2p.tools.pubsub.rst
Normal file
@ -0,0 +1,38 @@
|
|||||||
|
libp2p.tools.pubsub package
|
||||||
|
===========================
|
||||||
|
|
||||||
|
Submodules
|
||||||
|
----------
|
||||||
|
|
||||||
|
libp2p.tools.pubsub.dummy\_account\_node module
|
||||||
|
-----------------------------------------------
|
||||||
|
|
||||||
|
.. automodule:: libp2p.tools.pubsub.dummy_account_node
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
|
|
||||||
|
libp2p.tools.pubsub.floodsub\_integration\_test\_settings module
|
||||||
|
----------------------------------------------------------------
|
||||||
|
|
||||||
|
.. automodule:: libp2p.tools.pubsub.floodsub_integration_test_settings
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
|
|
||||||
|
libp2p.tools.pubsub.utils module
|
||||||
|
--------------------------------
|
||||||
|
|
||||||
|
.. automodule:: libp2p.tools.pubsub.utils
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
|
|
||||||
|
|
||||||
|
Module contents
|
||||||
|
---------------
|
||||||
|
|
||||||
|
.. automodule:: libp2p.tools.pubsub
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
47
docs/libp2p.tools.rst
Normal file
47
docs/libp2p.tools.rst
Normal file
@ -0,0 +1,47 @@
|
|||||||
|
libp2p.tools package
|
||||||
|
====================
|
||||||
|
|
||||||
|
Subpackages
|
||||||
|
-----------
|
||||||
|
|
||||||
|
.. toctree::
|
||||||
|
|
||||||
|
libp2p.tools.pubsub
|
||||||
|
|
||||||
|
The interop module is left out for now, because of the extra dependencies it requires.
|
||||||
|
|
||||||
|
Submodules
|
||||||
|
----------
|
||||||
|
|
||||||
|
libp2p.tools.constants module
|
||||||
|
-----------------------------
|
||||||
|
|
||||||
|
.. automodule:: libp2p.tools.constants
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
|
|
||||||
|
libp2p.tools.factories module
|
||||||
|
-----------------------------
|
||||||
|
|
||||||
|
.. automodule:: libp2p.tools.factories
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
|
|
||||||
|
libp2p.tools.utils module
|
||||||
|
-------------------------
|
||||||
|
|
||||||
|
.. automodule:: libp2p.tools.utils
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
|
|
||||||
|
|
||||||
|
Module contents
|
||||||
|
---------------
|
||||||
|
|
||||||
|
.. automodule:: libp2p.tools
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
61
docs/libp2p.transport.rst
Normal file
61
docs/libp2p.transport.rst
Normal file
@ -0,0 +1,61 @@
|
|||||||
|
libp2p.transport package
|
||||||
|
========================
|
||||||
|
|
||||||
|
Subpackages
|
||||||
|
-----------
|
||||||
|
|
||||||
|
.. toctree::
|
||||||
|
|
||||||
|
libp2p.transport.tcp
|
||||||
|
|
||||||
|
Submodules
|
||||||
|
----------
|
||||||
|
|
||||||
|
libp2p.transport.exceptions module
|
||||||
|
----------------------------------
|
||||||
|
|
||||||
|
.. automodule:: libp2p.transport.exceptions
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
|
|
||||||
|
libp2p.transport.listener\_interface module
|
||||||
|
-------------------------------------------
|
||||||
|
|
||||||
|
.. automodule:: libp2p.transport.listener_interface
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
|
|
||||||
|
libp2p.transport.transport\_interface module
|
||||||
|
--------------------------------------------
|
||||||
|
|
||||||
|
.. automodule:: libp2p.transport.transport_interface
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
|
|
||||||
|
libp2p.transport.typing module
|
||||||
|
------------------------------
|
||||||
|
|
||||||
|
.. automodule:: libp2p.transport.typing
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
|
|
||||||
|
libp2p.transport.upgrader module
|
||||||
|
--------------------------------
|
||||||
|
|
||||||
|
.. automodule:: libp2p.transport.upgrader
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
|
|
||||||
|
|
||||||
|
Module contents
|
||||||
|
---------------
|
||||||
|
|
||||||
|
.. automodule:: libp2p.transport
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
22
docs/libp2p.transport.tcp.rst
Normal file
22
docs/libp2p.transport.tcp.rst
Normal file
@ -0,0 +1,22 @@
|
|||||||
|
libp2p.transport.tcp package
|
||||||
|
============================
|
||||||
|
|
||||||
|
Submodules
|
||||||
|
----------
|
||||||
|
|
||||||
|
libp2p.transport.tcp.tcp module
|
||||||
|
-------------------------------
|
||||||
|
|
||||||
|
.. automodule:: libp2p.transport.tcp.tcp
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
|
|
||||||
|
|
||||||
|
Module contents
|
||||||
|
---------------
|
||||||
|
|
||||||
|
.. automodule:: libp2p.transport.tcp
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
58
docs/release_notes.rst
Normal file
58
docs/release_notes.rst
Normal file
@ -0,0 +1,58 @@
|
|||||||
|
Release Notes
|
||||||
|
=============
|
||||||
|
|
||||||
|
.. towncrier release notes start
|
||||||
|
|
||||||
|
libp2p v0.1.4 (2019-12-12)
|
||||||
|
--------------------------
|
||||||
|
|
||||||
|
Features
|
||||||
|
~~~~~~~~
|
||||||
|
|
||||||
|
- Added support for Python 3.6 (`#372 <https://github.com/libp2p/py-libp2p/issues/372>`__)
|
||||||
|
- Add signing and verification to pubsub (`#362 <https://github.com/libp2p/py-libp2p/issues/362>`__)
|
||||||
|
|
||||||
|
|
||||||
|
Internal Changes - for py-libp2p Contributors
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
- Refactor and cleanup gossipsub (`#373 <https://github.com/libp2p/py-libp2p/issues/373>`__)
|
||||||
|
|
||||||
|
|
||||||
|
libp2p v0.1.3 (2019-11-27)
|
||||||
|
--------------------------
|
||||||
|
|
||||||
|
Bugfixes
|
||||||
|
~~~~~~~~
|
||||||
|
|
||||||
|
- Handle Stream* errors (like ``StreamClosed``) during calls to ``stream.write()`` and
|
||||||
|
``stream.read()`` (`#350 <https://github.com/libp2p/py-libp2p/issues/350>`__)
|
||||||
|
- Relax the protobuf dependency to play nicely with other libraries. It was pinned to 3.9.0, and now
|
||||||
|
permits v3.10 up to (but not including) v4. (`#354 <https://github.com/libp2p/py-libp2p/issues/354>`__)
|
||||||
|
- Fixes KeyError when peer in a stream accidentally closes and resets the stream, because handlers
|
||||||
|
for both will try to ``del streams[stream_id]`` without checking if the entry still exists. (`#355 <https://github.com/libp2p/py-libp2p/issues/355>`__)
|
||||||
|
|
||||||
|
|
||||||
|
Improved Documentation
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
- Use Sphinx & autodoc to generate docs, now available on `py-libp2p.readthedocs.io <https://py-libp2p.readthedocs.io>`_ (`#318 <https://github.com/libp2p/py-libp2p/issues/318>`__)
|
||||||
|
|
||||||
|
|
||||||
|
Internal Changes - for py-libp2p Contributors
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
- Added Makefile target to test a packaged version of libp2p before release. (`#353 <https://github.com/libp2p/py-libp2p/issues/353>`__)
|
||||||
|
- Move helper tools from ``tests/`` to ``libp2p/tools/``, and some mildly-related cleanups. (`#356 <https://github.com/libp2p/py-libp2p/issues/356>`__)
|
||||||
|
|
||||||
|
|
||||||
|
Miscellaneous changes
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
- `#357 <https://github.com/libp2p/py-libp2p/issues/357>`__
|
||||||
|
|
||||||
|
|
||||||
|
v0.1.2
|
||||||
|
--------------
|
||||||
|
|
||||||
|
Welcome to the great beyond, where changes were not tracked by release...
|
||||||
@ -5,15 +5,12 @@ from libp2p.crypto.rsa import create_new_key_pair
|
|||||||
from libp2p.host.basic_host import BasicHost
|
from libp2p.host.basic_host import BasicHost
|
||||||
from libp2p.host.host_interface import IHost
|
from libp2p.host.host_interface import IHost
|
||||||
from libp2p.host.routed_host import RoutedHost
|
from libp2p.host.routed_host import RoutedHost
|
||||||
from libp2p.kademlia.network import KademliaServer
|
|
||||||
from libp2p.kademlia.storage import IStorage
|
|
||||||
from libp2p.network.network_interface import INetwork
|
from libp2p.network.network_interface import INetwork
|
||||||
from libp2p.network.swarm import Swarm
|
from libp2p.network.swarm import Swarm
|
||||||
from libp2p.peer.id import ID
|
from libp2p.peer.id import ID
|
||||||
from libp2p.peer.peerstore import PeerStore
|
from libp2p.peer.peerstore import PeerStore
|
||||||
from libp2p.peer.peerstore_interface import IPeerStore
|
from libp2p.peer.peerstore_interface import IPeerStore
|
||||||
from libp2p.routing.interfaces import IPeerRouting
|
from libp2p.routing.interfaces import IPeerRouting
|
||||||
from libp2p.routing.kademlia.kademlia_peer_router import KadmeliaPeerRouter
|
|
||||||
from libp2p.security.insecure.transport import PLAINTEXT_PROTOCOL_ID, InsecureTransport
|
from libp2p.security.insecure.transport import PLAINTEXT_PROTOCOL_ID, InsecureTransport
|
||||||
import libp2p.security.secio.transport as secio
|
import libp2p.security.secio.transport as secio
|
||||||
from libp2p.stream_muxer.mplex.mplex import MPLEX_PROTOCOL_ID, Mplex
|
from libp2p.stream_muxer.mplex.mplex import MPLEX_PROTOCOL_ID, Mplex
|
||||||
@ -32,31 +29,6 @@ def generate_peer_id_from(key_pair: KeyPair) -> ID:
|
|||||||
return ID.from_pubkey(public_key)
|
return ID.from_pubkey(public_key)
|
||||||
|
|
||||||
|
|
||||||
def initialize_default_kademlia_router(
|
|
||||||
ksize: int = 20, alpha: int = 3, id_opt: ID = None, storage: IStorage = None
|
|
||||||
) -> KadmeliaPeerRouter:
|
|
||||||
"""
|
|
||||||
initialize kadmelia router when no kademlia router is passed in.
|
|
||||||
|
|
||||||
:param ksize: The k parameter from the paper
|
|
||||||
:param alpha: The alpha parameter from the paper
|
|
||||||
:param id_opt: optional id for host
|
|
||||||
:param storage: An instance that implements
|
|
||||||
:interface:`~kademlia.storage.IStorage`
|
|
||||||
:return: return a default kademlia instance
|
|
||||||
"""
|
|
||||||
if not id_opt:
|
|
||||||
key_pair = generate_new_rsa_identity()
|
|
||||||
id_opt = generate_peer_id_from(key_pair)
|
|
||||||
|
|
||||||
node_id = id_opt.to_bytes()
|
|
||||||
# ignore type for Kademlia module
|
|
||||||
server = KademliaServer( # type: ignore
|
|
||||||
ksize=ksize, alpha=alpha, node_id=node_id, storage=storage
|
|
||||||
)
|
|
||||||
return KadmeliaPeerRouter(server)
|
|
||||||
|
|
||||||
|
|
||||||
def initialize_default_swarm(
|
def initialize_default_swarm(
|
||||||
key_pair: KeyPair,
|
key_pair: KeyPair,
|
||||||
id_opt: ID = None,
|
id_opt: ID = None,
|
||||||
@ -92,6 +64,9 @@ def initialize_default_swarm(
|
|||||||
)
|
)
|
||||||
|
|
||||||
peerstore = peerstore_opt or PeerStore()
|
peerstore = peerstore_opt or PeerStore()
|
||||||
|
# Store our key pair in peerstore
|
||||||
|
peerstore.add_key_pair(id_opt, key_pair)
|
||||||
|
|
||||||
# TODO: Initialize discovery if not presented
|
# TODO: Initialize discovery if not presented
|
||||||
return Swarm(id_opt, peerstore, upgrader, transport)
|
return Swarm(id_opt, peerstore, upgrader, transport)
|
||||||
|
|
||||||
@ -138,8 +113,8 @@ def new_node(
|
|||||||
# TODO routing unimplemented
|
# TODO routing unimplemented
|
||||||
host: IHost # If not explicitly typed, MyPy raises error
|
host: IHost # If not explicitly typed, MyPy raises error
|
||||||
if disc_opt:
|
if disc_opt:
|
||||||
host = RoutedHost(key_pair.public_key, swarm_opt, disc_opt)
|
host = RoutedHost(swarm_opt, disc_opt)
|
||||||
else:
|
else:
|
||||||
host = BasicHost(key_pair.public_key, swarm_opt)
|
host = BasicHost(swarm_opt)
|
||||||
|
|
||||||
return host
|
return host
|
||||||
|
|||||||
@ -1,12 +1,14 @@
|
|||||||
from typing import Callable, Tuple, cast
|
from typing import Callable, Tuple, cast
|
||||||
|
|
||||||
from fastecdsa.encoding.util import int_bytelen
|
from fastecdsa.encoding import util
|
||||||
|
|
||||||
from libp2p.crypto.ecc import ECCPrivateKey, ECCPublicKey, create_new_key_pair
|
from libp2p.crypto.ecc import ECCPrivateKey, ECCPublicKey, create_new_key_pair
|
||||||
from libp2p.crypto.keys import PublicKey
|
from libp2p.crypto.keys import PublicKey
|
||||||
|
|
||||||
SharedKeyGenerator = Callable[[bytes], bytes]
|
SharedKeyGenerator = Callable[[bytes], bytes]
|
||||||
|
|
||||||
|
int_bytelen = util.int_bytelen
|
||||||
|
|
||||||
|
|
||||||
def create_ephemeral_key_pair(curve_type: str) -> Tuple[PublicKey, SharedKeyGenerator]:
|
def create_ephemeral_key_pair(curve_type: str) -> Tuple[PublicKey, SharedKeyGenerator]:
|
||||||
"""Facilitates ECDH key exchange."""
|
"""Facilitates ECDH key exchange."""
|
||||||
|
|||||||
@ -8,3 +8,9 @@ class ValidationError(BaseLibp2pError):
|
|||||||
|
|
||||||
class ParseError(BaseLibp2pError):
|
class ParseError(BaseLibp2pError):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class MultiError(BaseLibp2pError):
|
||||||
|
"""Raised with multiple exceptions."""
|
||||||
|
|
||||||
|
# todo: find some way for this to fancy-print all encapsulated errors
|
||||||
|
|||||||
@ -3,7 +3,7 @@ from typing import TYPE_CHECKING, List, Sequence
|
|||||||
|
|
||||||
import multiaddr
|
import multiaddr
|
||||||
|
|
||||||
from libp2p.crypto.keys import PublicKey
|
from libp2p.crypto.keys import PrivateKey, PublicKey
|
||||||
from libp2p.host.defaults import get_default_protocols
|
from libp2p.host.defaults import get_default_protocols
|
||||||
from libp2p.host.exceptions import StreamFailure
|
from libp2p.host.exceptions import StreamFailure
|
||||||
from libp2p.network.network_interface import INetwork
|
from libp2p.network.network_interface import INetwork
|
||||||
@ -39,7 +39,6 @@ class BasicHost(IHost):
|
|||||||
right after a stream is initialized.
|
right after a stream is initialized.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
_public_key: PublicKey
|
|
||||||
_network: INetwork
|
_network: INetwork
|
||||||
peerstore: IPeerStore
|
peerstore: IPeerStore
|
||||||
|
|
||||||
@ -48,11 +47,9 @@ class BasicHost(IHost):
|
|||||||
|
|
||||||
def __init__(
|
def __init__(
|
||||||
self,
|
self,
|
||||||
public_key: PublicKey,
|
|
||||||
network: INetwork,
|
network: INetwork,
|
||||||
default_protocols: "OrderedDict[TProtocol, StreamHandlerFn]" = None,
|
default_protocols: "OrderedDict[TProtocol, StreamHandlerFn]" = None,
|
||||||
) -> None:
|
) -> None:
|
||||||
self._public_key = public_key
|
|
||||||
self._network = network
|
self._network = network
|
||||||
self._network.set_stream_handler(self._swarm_stream_handler)
|
self._network.set_stream_handler(self._swarm_stream_handler)
|
||||||
self.peerstore = self._network.peerstore
|
self.peerstore = self._network.peerstore
|
||||||
@ -68,7 +65,10 @@ class BasicHost(IHost):
|
|||||||
return self._network.get_peer_id()
|
return self._network.get_peer_id()
|
||||||
|
|
||||||
def get_public_key(self) -> PublicKey:
|
def get_public_key(self) -> PublicKey:
|
||||||
return self._public_key
|
return self.peerstore.pubkey(self.get_id())
|
||||||
|
|
||||||
|
def get_private_key(self) -> PrivateKey:
|
||||||
|
return self.peerstore.privkey(self.get_id())
|
||||||
|
|
||||||
def get_network(self) -> INetwork:
|
def get_network(self) -> INetwork:
|
||||||
"""
|
"""
|
||||||
|
|||||||
@ -3,7 +3,7 @@ from typing import Any, List, Sequence
|
|||||||
|
|
||||||
import multiaddr
|
import multiaddr
|
||||||
|
|
||||||
from libp2p.crypto.keys import PublicKey
|
from libp2p.crypto.keys import PrivateKey, PublicKey
|
||||||
from libp2p.network.network_interface import INetwork
|
from libp2p.network.network_interface import INetwork
|
||||||
from libp2p.network.stream.net_stream_interface import INetStream
|
from libp2p.network.stream.net_stream_interface import INetStream
|
||||||
from libp2p.peer.id import ID
|
from libp2p.peer.id import ID
|
||||||
@ -24,6 +24,12 @@ class IHost(ABC):
|
|||||||
:return: the public key belonging to the peer
|
:return: the public key belonging to the peer
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def get_private_key(self) -> PrivateKey:
|
||||||
|
"""
|
||||||
|
:return: the private key belonging to the peer
|
||||||
|
"""
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def get_network(self) -> INetwork:
|
def get_network(self) -> INetwork:
|
||||||
"""
|
"""
|
||||||
|
|||||||
@ -1,4 +1,3 @@
|
|||||||
from libp2p.crypto.keys import PublicKey
|
|
||||||
from libp2p.host.basic_host import BasicHost
|
from libp2p.host.basic_host import BasicHost
|
||||||
from libp2p.host.exceptions import ConnectionFailure
|
from libp2p.host.exceptions import ConnectionFailure
|
||||||
from libp2p.network.network_interface import INetwork
|
from libp2p.network.network_interface import INetwork
|
||||||
@ -11,8 +10,8 @@ from libp2p.routing.interfaces import IPeerRouting
|
|||||||
class RoutedHost(BasicHost):
|
class RoutedHost(BasicHost):
|
||||||
_router: IPeerRouting
|
_router: IPeerRouting
|
||||||
|
|
||||||
def __init__(self, public_key: PublicKey, network: INetwork, router: IPeerRouting):
|
def __init__(self, network: INetwork, router: IPeerRouting):
|
||||||
super().__init__(public_key, network)
|
super().__init__(network)
|
||||||
self._router = router
|
self._router = router
|
||||||
|
|
||||||
async def connect(self, peer_info: PeerInfo) -> None:
|
async def connect(self, peer_info: PeerInfo) -> None:
|
||||||
|
|||||||
@ -1,173 +0,0 @@
|
|||||||
from collections import Counter
|
|
||||||
import logging
|
|
||||||
|
|
||||||
from .kad_peerinfo import KadPeerHeap, create_kad_peerinfo
|
|
||||||
from .utils import gather_dict
|
|
||||||
|
|
||||||
log = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
class SpiderCrawl:
|
|
||||||
"""Crawl the network and look for given 160-bit keys."""
|
|
||||||
|
|
||||||
def __init__(self, protocol, node, peers, ksize, alpha):
|
|
||||||
"""
|
|
||||||
Create a new C{SpiderCrawl}er.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
protocol: A :class:`~kademlia.protocol.KademliaProtocol` instance.
|
|
||||||
node: A :class:`~kademlia.node.Node` representing the key we're
|
|
||||||
looking for
|
|
||||||
peers: A list of :class:`~kademlia.node.Node` instances that
|
|
||||||
provide the entry point for the network
|
|
||||||
ksize: The value for k based on the paper
|
|
||||||
alpha: The value for alpha based on the paper
|
|
||||||
"""
|
|
||||||
self.protocol = protocol
|
|
||||||
self.ksize = ksize
|
|
||||||
self.alpha = alpha
|
|
||||||
self.node = node
|
|
||||||
self.nearest = KadPeerHeap(self.node, self.ksize)
|
|
||||||
self.last_ids_crawled = []
|
|
||||||
log.info("creating spider with peers: %s", peers)
|
|
||||||
self.nearest.push(peers)
|
|
||||||
|
|
||||||
async def _find(self, rpcmethod):
|
|
||||||
"""
|
|
||||||
Get either a value or list of nodes.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
rpcmethod: The protocol's callfindValue or call_find_node.
|
|
||||||
|
|
||||||
The process:
|
|
||||||
1. calls find_* to current ALPHA nearest not already queried nodes,
|
|
||||||
adding results to current nearest list of k nodes.
|
|
||||||
2. current nearest list needs to keep track of who has been queried
|
|
||||||
already sort by nearest, keep KSIZE
|
|
||||||
3. if list is same as last time, next call should be to everyone not
|
|
||||||
yet queried
|
|
||||||
4. repeat, unless nearest list has all been queried, then ur done
|
|
||||||
"""
|
|
||||||
log.info("crawling network with nearest: %s", str(tuple(self.nearest)))
|
|
||||||
count = self.alpha
|
|
||||||
if self.nearest.get_ids() == self.last_ids_crawled:
|
|
||||||
count = len(self.nearest)
|
|
||||||
self.last_ids_crawled = self.nearest.get_ids()
|
|
||||||
|
|
||||||
dicts = {}
|
|
||||||
for peer in self.nearest.get_uncontacted()[:count]:
|
|
||||||
dicts[peer.peer_id_bytes] = rpcmethod(peer, self.node)
|
|
||||||
self.nearest.mark_contacted(peer)
|
|
||||||
found = await gather_dict(dicts)
|
|
||||||
return await self._nodes_found(found)
|
|
||||||
|
|
||||||
async def _nodes_found(self, responses):
|
|
||||||
raise NotImplementedError
|
|
||||||
|
|
||||||
|
|
||||||
class ValueSpiderCrawl(SpiderCrawl):
|
|
||||||
def __init__(self, protocol, node, peers, ksize, alpha):
|
|
||||||
SpiderCrawl.__init__(self, protocol, node, peers, ksize, alpha)
|
|
||||||
# keep track of the single nearest node without value - per
|
|
||||||
# section 2.3 so we can set the key there if found
|
|
||||||
self.nearest_without_value = KadPeerHeap(self.node, 1)
|
|
||||||
|
|
||||||
async def find(self):
|
|
||||||
"""Find either the closest nodes or the value requested."""
|
|
||||||
return await self._find(self.protocol.call_find_value)
|
|
||||||
|
|
||||||
async def _nodes_found(self, responses):
|
|
||||||
"""Handle the result of an iteration in _find."""
|
|
||||||
toremove = []
|
|
||||||
found_values = []
|
|
||||||
for peerid, response in responses.items():
|
|
||||||
response = RPCFindResponse(response)
|
|
||||||
if not response.happened():
|
|
||||||
toremove.append(peerid)
|
|
||||||
elif response.has_value():
|
|
||||||
found_values.append(response.get_value())
|
|
||||||
else:
|
|
||||||
peer = self.nearest.get_node(peerid)
|
|
||||||
self.nearest_without_value.push(peer)
|
|
||||||
self.nearest.push(response.get_node_list())
|
|
||||||
self.nearest.remove(toremove)
|
|
||||||
|
|
||||||
if found_values:
|
|
||||||
return await self._handle_found_values(found_values)
|
|
||||||
if self.nearest.have_contacted_all():
|
|
||||||
# not found!
|
|
||||||
return None
|
|
||||||
return await self.find()
|
|
||||||
|
|
||||||
async def _handle_found_values(self, values):
|
|
||||||
"""
|
|
||||||
We got some values!
|
|
||||||
|
|
||||||
Exciting. But let's make sure they're all the same or freak out
|
|
||||||
a little bit. Also, make sure we tell the nearest node that
|
|
||||||
*didn't* have the value to store it.
|
|
||||||
"""
|
|
||||||
value_counts = Counter(values)
|
|
||||||
if len(value_counts) != 1:
|
|
||||||
log.warning(
|
|
||||||
"Got multiple values for key %i: %s", self.node.xor_id, str(values)
|
|
||||||
)
|
|
||||||
value = value_counts.most_common(1)[0][0]
|
|
||||||
|
|
||||||
peer = self.nearest_without_value.popleft()
|
|
||||||
if peer:
|
|
||||||
await self.protocol.call_store(peer, self.node.peer_id_bytes, value)
|
|
||||||
return value
|
|
||||||
|
|
||||||
|
|
||||||
class NodeSpiderCrawl(SpiderCrawl):
|
|
||||||
async def find(self):
|
|
||||||
"""Find the closest nodes."""
|
|
||||||
return await self._find(self.protocol.call_find_node)
|
|
||||||
|
|
||||||
async def _nodes_found(self, responses):
|
|
||||||
"""Handle the result of an iteration in _find."""
|
|
||||||
toremove = []
|
|
||||||
for peerid, response in responses.items():
|
|
||||||
response = RPCFindResponse(response)
|
|
||||||
if not response.happened():
|
|
||||||
toremove.append(peerid)
|
|
||||||
else:
|
|
||||||
self.nearest.push(response.get_node_list())
|
|
||||||
self.nearest.remove(toremove)
|
|
||||||
|
|
||||||
if self.nearest.have_contacted_all():
|
|
||||||
return list(self.nearest)
|
|
||||||
return await self.find()
|
|
||||||
|
|
||||||
|
|
||||||
class RPCFindResponse:
|
|
||||||
def __init__(self, response):
|
|
||||||
"""
|
|
||||||
A wrapper for the result of a RPC find.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
response: This will be a tuple of (<response received>, <value>)
|
|
||||||
where <value> will be a list of tuples if not found or
|
|
||||||
a dictionary of {'value': v} where v is the value desired
|
|
||||||
"""
|
|
||||||
self.response = response
|
|
||||||
|
|
||||||
def happened(self):
|
|
||||||
"""Did the other host actually respond?"""
|
|
||||||
return self.response[0]
|
|
||||||
|
|
||||||
def has_value(self):
|
|
||||||
return isinstance(self.response[1], dict)
|
|
||||||
|
|
||||||
def get_value(self):
|
|
||||||
return self.response[1]["value"]
|
|
||||||
|
|
||||||
def get_node_list(self):
|
|
||||||
"""
|
|
||||||
Get the node list in the response.
|
|
||||||
|
|
||||||
If there's no value, this should be set.
|
|
||||||
"""
|
|
||||||
nodelist = self.response[1] or []
|
|
||||||
return [create_kad_peerinfo(*nodeple) for nodeple in nodelist]
|
|
||||||
@ -1,153 +0,0 @@
|
|||||||
import heapq
|
|
||||||
from operator import itemgetter
|
|
||||||
import random
|
|
||||||
from typing import List
|
|
||||||
|
|
||||||
from multiaddr import Multiaddr
|
|
||||||
|
|
||||||
from libp2p.peer.id import ID
|
|
||||||
from libp2p.peer.peerinfo import PeerInfo
|
|
||||||
|
|
||||||
from .utils import digest
|
|
||||||
|
|
||||||
P_IP = "ip4"
|
|
||||||
P_UDP = "udp"
|
|
||||||
|
|
||||||
|
|
||||||
class KadPeerInfo(PeerInfo):
|
|
||||||
def __init__(self, peer_id, addrs):
|
|
||||||
super(KadPeerInfo, self).__init__(peer_id, addrs)
|
|
||||||
|
|
||||||
self.peer_id_bytes = peer_id.to_bytes()
|
|
||||||
self.xor_id = peer_id.xor_id
|
|
||||||
|
|
||||||
self.addrs = addrs
|
|
||||||
|
|
||||||
self.ip = self.addrs[0].value_for_protocol(P_IP) if addrs else None
|
|
||||||
self.port = int(self.addrs[0].value_for_protocol(P_UDP)) if addrs else None
|
|
||||||
|
|
||||||
def same_home_as(self, node):
|
|
||||||
return sorted(self.addrs) == sorted(node.addrs)
|
|
||||||
|
|
||||||
def distance_to(self, node):
|
|
||||||
"""Get the distance between this node and another."""
|
|
||||||
return self.xor_id ^ node.xor_id
|
|
||||||
|
|
||||||
def __iter__(self):
|
|
||||||
"""
|
|
||||||
Enables use of Node as a tuple - i.e., tuple(node) works.
|
|
||||||
"""
|
|
||||||
return iter([self.peer_id_bytes, self.ip, self.port])
|
|
||||||
|
|
||||||
def __repr__(self):
|
|
||||||
return repr([self.xor_id, self.ip, self.port, self.peer_id_bytes])
|
|
||||||
|
|
||||||
def __str__(self):
|
|
||||||
return "%s:%s" % (self.ip, str(self.port))
|
|
||||||
|
|
||||||
def encode(self):
|
|
||||||
return (
|
|
||||||
str(self.peer_id_bytes)
|
|
||||||
+ "\n"
|
|
||||||
+ str("/ip4/" + str(self.ip) + "/udp/" + str(self.port))
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class KadPeerHeap:
|
|
||||||
"""A heap of peers ordered by distance to a given node."""
|
|
||||||
|
|
||||||
def __init__(self, node, maxsize):
|
|
||||||
"""
|
|
||||||
Constructor.
|
|
||||||
|
|
||||||
@param node: The node to measure all distnaces from.
|
|
||||||
@param maxsize: The maximum size that this heap can grow to.
|
|
||||||
"""
|
|
||||||
self.node = node
|
|
||||||
self.heap = []
|
|
||||||
self.contacted = set()
|
|
||||||
self.maxsize = maxsize
|
|
||||||
|
|
||||||
def remove(self, peers):
|
|
||||||
"""
|
|
||||||
Remove a list of peer ids from this heap.
|
|
||||||
|
|
||||||
Note that while this heap retains a constant visible size (based
|
|
||||||
on the iterator), it's actual size may be quite a bit larger
|
|
||||||
than what's exposed. Therefore, removal of nodes may not change
|
|
||||||
the visible size as previously added nodes suddenly become
|
|
||||||
visible.
|
|
||||||
"""
|
|
||||||
peers = set(peers)
|
|
||||||
if not peers:
|
|
||||||
return
|
|
||||||
nheap = []
|
|
||||||
for distance, node in self.heap:
|
|
||||||
if node.peer_id_bytes not in peers:
|
|
||||||
heapq.heappush(nheap, (distance, node))
|
|
||||||
self.heap = nheap
|
|
||||||
|
|
||||||
def get_node(self, node_id):
|
|
||||||
for _, node in self.heap:
|
|
||||||
if node.peer_id_bytes == node_id:
|
|
||||||
return node
|
|
||||||
return None
|
|
||||||
|
|
||||||
def have_contacted_all(self):
|
|
||||||
return len(self.get_uncontacted()) == 0
|
|
||||||
|
|
||||||
def get_ids(self):
|
|
||||||
return [n.peer_id_bytes for n in self]
|
|
||||||
|
|
||||||
def mark_contacted(self, node):
|
|
||||||
self.contacted.add(node.peer_id_bytes)
|
|
||||||
|
|
||||||
def popleft(self):
|
|
||||||
return heapq.heappop(self.heap)[1] if self else None
|
|
||||||
|
|
||||||
def push(self, nodes):
|
|
||||||
"""
|
|
||||||
Push nodes onto heap.
|
|
||||||
|
|
||||||
@param nodes: This can be a single item or a C{list}.
|
|
||||||
"""
|
|
||||||
if not isinstance(nodes, list):
|
|
||||||
nodes = [nodes]
|
|
||||||
|
|
||||||
for node in nodes:
|
|
||||||
if node not in self:
|
|
||||||
distance = self.node.distance_to(node)
|
|
||||||
heapq.heappush(self.heap, (distance, node))
|
|
||||||
|
|
||||||
def __len__(self):
|
|
||||||
return min(len(self.heap), self.maxsize)
|
|
||||||
|
|
||||||
def __iter__(self):
|
|
||||||
nodes = heapq.nsmallest(self.maxsize, self.heap)
|
|
||||||
return iter(map(itemgetter(1), nodes))
|
|
||||||
|
|
||||||
def __contains__(self, node):
|
|
||||||
for _, other in self.heap:
|
|
||||||
if node.peer_id_bytes == other.peer_id_bytes:
|
|
||||||
return True
|
|
||||||
return False
|
|
||||||
|
|
||||||
def get_uncontacted(self):
|
|
||||||
return [n for n in self if n.peer_id_bytes not in self.contacted]
|
|
||||||
|
|
||||||
|
|
||||||
def create_kad_peerinfo(node_id_bytes=None, sender_ip=None, sender_port=None):
|
|
||||||
node_id = (
|
|
||||||
ID(node_id_bytes) if node_id_bytes else ID(digest(random.getrandbits(255)))
|
|
||||||
)
|
|
||||||
addrs: List[Multiaddr]
|
|
||||||
if sender_ip and sender_port:
|
|
||||||
addrs = [
|
|
||||||
Multiaddr(
|
|
||||||
"/" + P_IP + "/" + str(sender_ip) + "/" + P_UDP + "/" + str(sender_port)
|
|
||||||
)
|
|
||||||
]
|
|
||||||
else:
|
|
||||||
addrs = []
|
|
||||||
|
|
||||||
return KadPeerInfo(node_id, addrs)
|
|
||||||
@ -1,251 +0,0 @@
|
|||||||
"""Package for interacting on the network at a high level."""
|
|
||||||
import asyncio
|
|
||||||
import logging
|
|
||||||
import pickle
|
|
||||||
|
|
||||||
from .crawling import NodeSpiderCrawl, ValueSpiderCrawl
|
|
||||||
from .kad_peerinfo import create_kad_peerinfo
|
|
||||||
from .protocol import KademliaProtocol
|
|
||||||
from .storage import ForgetfulStorage
|
|
||||||
from .utils import digest
|
|
||||||
|
|
||||||
log = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
class KademliaServer:
|
|
||||||
"""
|
|
||||||
High level view of a node instance.
|
|
||||||
|
|
||||||
This is the object that should be created to start listening as an
|
|
||||||
active node on the network.
|
|
||||||
"""
|
|
||||||
|
|
||||||
protocol_class = KademliaProtocol
|
|
||||||
|
|
||||||
def __init__(self, ksize=20, alpha=3, node_id=None, storage=None):
|
|
||||||
"""
|
|
||||||
Create a server instance. This will start listening on the given port.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
ksize (int): The k parameter from the paper
|
|
||||||
alpha (int): The alpha parameter from the paper
|
|
||||||
node_id: The id for this node on the network.
|
|
||||||
storage: An instance that implements
|
|
||||||
:interface:`~kademlia.storage.IStorage`
|
|
||||||
"""
|
|
||||||
self.ksize = ksize
|
|
||||||
self.alpha = alpha
|
|
||||||
self.storage = storage or ForgetfulStorage()
|
|
||||||
self.node = create_kad_peerinfo(node_id)
|
|
||||||
self.transport = None
|
|
||||||
self.protocol = None
|
|
||||||
self.refresh_loop = None
|
|
||||||
self.save_state_loop = None
|
|
||||||
|
|
||||||
def stop(self):
|
|
||||||
if self.transport is not None:
|
|
||||||
self.transport.close()
|
|
||||||
|
|
||||||
if self.refresh_loop:
|
|
||||||
self.refresh_loop.cancel()
|
|
||||||
|
|
||||||
if self.save_state_loop:
|
|
||||||
self.save_state_loop.cancel()
|
|
||||||
|
|
||||||
def _create_protocol(self):
|
|
||||||
return self.protocol_class(self.node, self.storage, self.ksize)
|
|
||||||
|
|
||||||
async def listen(self, port=0, interface="0.0.0.0"):
|
|
||||||
"""
|
|
||||||
Start listening on the given port.
|
|
||||||
|
|
||||||
Provide interface="::" to accept ipv6 address
|
|
||||||
"""
|
|
||||||
loop = asyncio.get_event_loop()
|
|
||||||
listen = loop.create_datagram_endpoint(
|
|
||||||
self._create_protocol, local_addr=(interface, port)
|
|
||||||
)
|
|
||||||
self.transport, self.protocol = await listen
|
|
||||||
socket = self.transport.get_extra_info("socket")
|
|
||||||
self.address = socket.getsockname()
|
|
||||||
log.info(
|
|
||||||
"Node %i listening on %s:%i",
|
|
||||||
self.node.xor_id,
|
|
||||||
self.address[0],
|
|
||||||
self.address[1],
|
|
||||||
)
|
|
||||||
# finally, schedule refreshing table
|
|
||||||
self.refresh_table()
|
|
||||||
|
|
||||||
def refresh_table(self):
|
|
||||||
log.debug("Refreshing routing table")
|
|
||||||
asyncio.ensure_future(self._refresh_table())
|
|
||||||
loop = asyncio.get_event_loop()
|
|
||||||
self.refresh_loop = loop.call_later(3600, self.refresh_table)
|
|
||||||
|
|
||||||
async def _refresh_table(self):
|
|
||||||
"""Refresh buckets that haven't had any lookups in the last hour (per
|
|
||||||
section 2.3 of the paper)."""
|
|
||||||
results = []
|
|
||||||
for node_id in self.protocol.get_refresh_ids():
|
|
||||||
node = create_kad_peerinfo(node_id)
|
|
||||||
nearest = self.protocol.router.find_neighbors(node, self.alpha)
|
|
||||||
spider = NodeSpiderCrawl(
|
|
||||||
self.protocol, node, nearest, self.ksize, self.alpha
|
|
||||||
)
|
|
||||||
results.append(spider.find())
|
|
||||||
|
|
||||||
# do our crawling
|
|
||||||
await asyncio.gather(*results)
|
|
||||||
|
|
||||||
# now republish keys older than one hour
|
|
||||||
for dkey, value in self.storage.iter_older_than(3600):
|
|
||||||
await self.set_digest(dkey, value)
|
|
||||||
|
|
||||||
def bootstrappable_neighbors(self):
|
|
||||||
"""
|
|
||||||
Get a :class:`list` of (ip, port) :class:`tuple` pairs suitable for use
|
|
||||||
as an argument to the bootstrap method.
|
|
||||||
|
|
||||||
The server should have been bootstrapped
|
|
||||||
already - this is just a utility for getting some neighbors and then
|
|
||||||
storing them if this server is going down for a while. When it comes
|
|
||||||
back up, the list of nodes can be used to bootstrap.
|
|
||||||
"""
|
|
||||||
neighbors = self.protocol.router.find_neighbors(self.node)
|
|
||||||
return [tuple(n)[-2:] for n in neighbors]
|
|
||||||
|
|
||||||
async def bootstrap(self, addrs):
|
|
||||||
"""
|
|
||||||
Bootstrap the server by connecting to other known nodes in the network.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
addrs: A `list` of (ip, port) `tuple` pairs. Note that only IP
|
|
||||||
addresses are acceptable - hostnames will cause an error.
|
|
||||||
"""
|
|
||||||
log.debug("Attempting to bootstrap node with %i initial contacts", len(addrs))
|
|
||||||
cos = list(map(self.bootstrap_node, addrs))
|
|
||||||
gathered = await asyncio.gather(*cos)
|
|
||||||
nodes = [node for node in gathered if node is not None]
|
|
||||||
spider = NodeSpiderCrawl(
|
|
||||||
self.protocol, self.node, nodes, self.ksize, self.alpha
|
|
||||||
)
|
|
||||||
return await spider.find()
|
|
||||||
|
|
||||||
async def bootstrap_node(self, addr):
|
|
||||||
result = await self.protocol.ping(addr, self.node.peer_id_bytes)
|
|
||||||
return create_kad_peerinfo(result[1], addr[0], addr[1]) if result[0] else None
|
|
||||||
|
|
||||||
async def get(self, key):
|
|
||||||
"""
|
|
||||||
Get a key if the network has it.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
:class:`None` if not found, the value otherwise.
|
|
||||||
"""
|
|
||||||
log.info("Looking up key %s", key)
|
|
||||||
dkey = digest(key)
|
|
||||||
# if this node has it, return it
|
|
||||||
if self.storage.get(dkey) is not None:
|
|
||||||
return self.storage.get(dkey)
|
|
||||||
|
|
||||||
node = create_kad_peerinfo(dkey)
|
|
||||||
nearest = self.protocol.router.find_neighbors(node)
|
|
||||||
if not nearest:
|
|
||||||
log.warning("There are no known neighbors to get key %s", key)
|
|
||||||
return None
|
|
||||||
spider = ValueSpiderCrawl(self.protocol, node, nearest, self.ksize, self.alpha)
|
|
||||||
return await spider.find()
|
|
||||||
|
|
||||||
async def set(self, key, value):
|
|
||||||
"""Set the given string key to the given value in the network."""
|
|
||||||
if not check_dht_value_type(value):
|
|
||||||
raise TypeError("Value must be of type int, float, bool, str, or bytes")
|
|
||||||
log.info("setting '%s' = '%s' on network", key, value)
|
|
||||||
dkey = digest(key)
|
|
||||||
return await self.set_digest(dkey, value)
|
|
||||||
|
|
||||||
async def provide(self, key):
|
|
||||||
"""publish to the network that it provides for a particular key."""
|
|
||||||
neighbors = self.protocol.router.find_neighbors(self.node)
|
|
||||||
return [
|
|
||||||
await self.protocol.call_add_provider(n, key, self.node.peer_id_bytes)
|
|
||||||
for n in neighbors
|
|
||||||
]
|
|
||||||
|
|
||||||
async def get_providers(self, key):
|
|
||||||
"""get the list of providers for a key."""
|
|
||||||
neighbors = self.protocol.router.find_neighbors(self.node)
|
|
||||||
return [await self.protocol.call_get_providers(n, key) for n in neighbors]
|
|
||||||
|
|
||||||
async def set_digest(self, dkey, value):
|
|
||||||
"""Set the given SHA1 digest key (bytes) to the given value in the
|
|
||||||
network."""
|
|
||||||
node = create_kad_peerinfo(dkey)
|
|
||||||
|
|
||||||
nearest = self.protocol.router.find_neighbors(node)
|
|
||||||
if not nearest:
|
|
||||||
log.warning("There are no known neighbors to set key %s", dkey.hex())
|
|
||||||
return False
|
|
||||||
|
|
||||||
spider = NodeSpiderCrawl(self.protocol, node, nearest, self.ksize, self.alpha)
|
|
||||||
nodes = await spider.find()
|
|
||||||
log.info("setting '%s' on %s", dkey.hex(), list(map(str, nodes)))
|
|
||||||
|
|
||||||
# if this node is close too, then store here as well
|
|
||||||
biggest = max([n.distance_to(node) for n in nodes])
|
|
||||||
if self.node.distance_to(node) < biggest:
|
|
||||||
self.storage[dkey] = value
|
|
||||||
results = [self.protocol.call_store(n, dkey, value) for n in nodes]
|
|
||||||
# return true only if at least one store call succeeded
|
|
||||||
return any(await asyncio.gather(*results))
|
|
||||||
|
|
||||||
def save_state(self, fname):
|
|
||||||
"""Save the state of this node (the alpha/ksize/id/immediate neighbors)
|
|
||||||
to a cache file with the given fname."""
|
|
||||||
log.info("Saving state to %s", fname)
|
|
||||||
data = {
|
|
||||||
"ksize": self.ksize,
|
|
||||||
"alpha": self.alpha,
|
|
||||||
"id": self.node.peer_id_bytes,
|
|
||||||
"neighbors": self.bootstrappable_neighbors(),
|
|
||||||
}
|
|
||||||
if not data["neighbors"]:
|
|
||||||
log.warning("No known neighbors, so not writing to cache.")
|
|
||||||
return
|
|
||||||
with open(fname, "wb") as file:
|
|
||||||
pickle.dump(data, file)
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def load_state(cls, fname):
|
|
||||||
"""Load the state of this node (the alpha/ksize/id/immediate neighbors)
|
|
||||||
from a cache file with the given fname."""
|
|
||||||
log.info("Loading state from %s", fname)
|
|
||||||
with open(fname, "rb") as file:
|
|
||||||
data = pickle.load(file)
|
|
||||||
svr = KademliaServer(data["ksize"], data["alpha"], data["id"])
|
|
||||||
if data["neighbors"]:
|
|
||||||
svr.bootstrap(data["neighbors"])
|
|
||||||
return svr
|
|
||||||
|
|
||||||
def save_state_regularly(self, fname, frequency=600):
|
|
||||||
"""
|
|
||||||
Save the state of node with a given regularity to the given filename.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
fname: File name to save retularly to
|
|
||||||
frequency: Frequency in seconds that the state should be saved.
|
|
||||||
By default, 10 minutes.
|
|
||||||
"""
|
|
||||||
self.save_state(fname)
|
|
||||||
loop = asyncio.get_event_loop()
|
|
||||||
self.save_state_loop = loop.call_later(
|
|
||||||
frequency, self.save_state_regularly, fname, frequency
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def check_dht_value_type(value):
|
|
||||||
"""Checks to see if the type of the value is a valid type for placing in
|
|
||||||
the dht."""
|
|
||||||
typeset = [int, float, bool, str, bytes]
|
|
||||||
return type(value) in typeset
|
|
||||||
@ -1,188 +0,0 @@
|
|||||||
import asyncio
|
|
||||||
import logging
|
|
||||||
import random
|
|
||||||
|
|
||||||
from rpcudp.protocol import RPCProtocol
|
|
||||||
|
|
||||||
from .kad_peerinfo import create_kad_peerinfo
|
|
||||||
from .routing import RoutingTable
|
|
||||||
|
|
||||||
log = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
class KademliaProtocol(RPCProtocol):
|
|
||||||
"""
|
|
||||||
There are four main RPCs in the Kademlia protocol PING, STORE, FIND_NODE,
|
|
||||||
FIND_VALUE.
|
|
||||||
|
|
||||||
- PING probes if a node is still online
|
|
||||||
- STORE instructs a node to store (key, value)
|
|
||||||
- FIND_NODE takes a 160-bit ID and gets back
|
|
||||||
(ip, udp_port, node_id) for k closest nodes to target
|
|
||||||
- FIND_VALUE behaves like FIND_NODE unless a value is stored.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, source_node, storage, ksize):
|
|
||||||
RPCProtocol.__init__(self)
|
|
||||||
self.router = RoutingTable(self, ksize, source_node)
|
|
||||||
self.storage = storage
|
|
||||||
self.source_node = source_node
|
|
||||||
|
|
||||||
def get_refresh_ids(self):
|
|
||||||
"""Get ids to search for to keep old buckets up to date."""
|
|
||||||
ids = []
|
|
||||||
for bucket in self.router.lonely_buckets():
|
|
||||||
rid = random.randint(*bucket.range).to_bytes(20, byteorder="big")
|
|
||||||
ids.append(rid)
|
|
||||||
return ids
|
|
||||||
|
|
||||||
def rpc_stun(self, sender):
|
|
||||||
return sender
|
|
||||||
|
|
||||||
def rpc_ping(self, sender, nodeid):
|
|
||||||
source = create_kad_peerinfo(nodeid, sender[0], sender[1])
|
|
||||||
|
|
||||||
self.welcome_if_new(source)
|
|
||||||
return self.source_node.peer_id_bytes
|
|
||||||
|
|
||||||
def rpc_store(self, sender, nodeid, key, value):
|
|
||||||
source = create_kad_peerinfo(nodeid, sender[0], sender[1])
|
|
||||||
|
|
||||||
self.welcome_if_new(source)
|
|
||||||
log.debug(
|
|
||||||
"got a store request from %s, storing '%s'='%s'", sender, key.hex(), value
|
|
||||||
)
|
|
||||||
self.storage[key] = value
|
|
||||||
return True
|
|
||||||
|
|
||||||
def rpc_find_node(self, sender, nodeid, key):
|
|
||||||
log.info("finding neighbors of %i in local table", int(nodeid.hex(), 16))
|
|
||||||
source = create_kad_peerinfo(nodeid, sender[0], sender[1])
|
|
||||||
|
|
||||||
self.welcome_if_new(source)
|
|
||||||
node = create_kad_peerinfo(key)
|
|
||||||
neighbors = self.router.find_neighbors(node, exclude=source)
|
|
||||||
return list(map(tuple, neighbors))
|
|
||||||
|
|
||||||
def rpc_find_value(self, sender, nodeid, key):
|
|
||||||
source = create_kad_peerinfo(nodeid, sender[0], sender[1])
|
|
||||||
|
|
||||||
self.welcome_if_new(source)
|
|
||||||
value = self.storage.get(key, None)
|
|
||||||
if value is None:
|
|
||||||
return self.rpc_find_node(sender, nodeid, key)
|
|
||||||
return {"value": value}
|
|
||||||
|
|
||||||
def rpc_add_provider(self, sender, nodeid, key, provider_id):
|
|
||||||
"""rpc when receiving an add_provider call should validate received
|
|
||||||
PeerInfo matches sender nodeid if it does, receipient must store a
|
|
||||||
record in its datastore we store a map of content_id to peer_id (non
|
|
||||||
xor)"""
|
|
||||||
if nodeid == provider_id:
|
|
||||||
log.info(
|
|
||||||
"adding provider %s for key %s in local table", provider_id, str(key)
|
|
||||||
)
|
|
||||||
self.storage[key] = provider_id
|
|
||||||
return True
|
|
||||||
return False
|
|
||||||
|
|
||||||
def rpc_get_providers(self, sender, key):
|
|
||||||
"""rpc when receiving a get_providers call should look up key in data
|
|
||||||
store and respond with records plus a list of closer peers in its
|
|
||||||
routing table."""
|
|
||||||
providers = []
|
|
||||||
record = self.storage.get(key, None)
|
|
||||||
|
|
||||||
if record:
|
|
||||||
providers.append(record)
|
|
||||||
|
|
||||||
keynode = create_kad_peerinfo(key)
|
|
||||||
neighbors = self.router.find_neighbors(keynode)
|
|
||||||
for neighbor in neighbors:
|
|
||||||
if neighbor.peer_id_bytes != record:
|
|
||||||
providers.append(neighbor.peer_id_bytes)
|
|
||||||
|
|
||||||
return providers
|
|
||||||
|
|
||||||
async def call_find_node(self, node_to_ask, node_to_find):
|
|
||||||
address = (node_to_ask.ip, node_to_ask.port)
|
|
||||||
result = await self.find_node(
|
|
||||||
address, self.source_node.peer_id_bytes, node_to_find.peer_id_bytes
|
|
||||||
)
|
|
||||||
return self.handle_call_response(result, node_to_ask)
|
|
||||||
|
|
||||||
async def call_find_value(self, node_to_ask, node_to_find):
|
|
||||||
address = (node_to_ask.ip, node_to_ask.port)
|
|
||||||
result = await self.find_value(
|
|
||||||
address, self.source_node.peer_id_bytes, node_to_find.peer_id_bytes
|
|
||||||
)
|
|
||||||
return self.handle_call_response(result, node_to_ask)
|
|
||||||
|
|
||||||
async def call_ping(self, node_to_ask):
|
|
||||||
address = (node_to_ask.ip, node_to_ask.port)
|
|
||||||
result = await self.ping(address, self.source_node.peer_id_bytes)
|
|
||||||
return self.handle_call_response(result, node_to_ask)
|
|
||||||
|
|
||||||
async def call_store(self, node_to_ask, key, value):
|
|
||||||
address = (node_to_ask.ip, node_to_ask.port)
|
|
||||||
result = await self.store(address, self.source_node.peer_id_bytes, key, value)
|
|
||||||
return self.handle_call_response(result, node_to_ask)
|
|
||||||
|
|
||||||
async def call_add_provider(self, node_to_ask, key, provider_id):
|
|
||||||
address = (node_to_ask.ip, node_to_ask.port)
|
|
||||||
result = await self.add_provider(
|
|
||||||
address, self.source_node.peer_id_bytes, key, provider_id
|
|
||||||
)
|
|
||||||
|
|
||||||
return self.handle_call_response(result, node_to_ask)
|
|
||||||
|
|
||||||
async def call_get_providers(self, node_to_ask, key):
|
|
||||||
address = (node_to_ask.ip, node_to_ask.port)
|
|
||||||
result = await self.get_providers(address, key)
|
|
||||||
return self.handle_call_response(result, node_to_ask)
|
|
||||||
|
|
||||||
def welcome_if_new(self, node):
|
|
||||||
"""
|
|
||||||
Given a new node, send it all the keys/values it should be storing,
|
|
||||||
then add it to the routing table.
|
|
||||||
|
|
||||||
@param node: A new node that just joined (or that we just found out
|
|
||||||
about).
|
|
||||||
|
|
||||||
Process:
|
|
||||||
For each key in storage, get k closest nodes. If newnode is closer
|
|
||||||
than the furtherst in that list, and the node for this server
|
|
||||||
is closer than the closest in that list, then store the key/value
|
|
||||||
on the new node (per section 2.5 of the paper)
|
|
||||||
"""
|
|
||||||
if not self.router.is_new_node(node):
|
|
||||||
return
|
|
||||||
|
|
||||||
log.info("never seen %s before, adding to router", node)
|
|
||||||
for key, value in self.storage:
|
|
||||||
keynode = create_kad_peerinfo(key)
|
|
||||||
neighbors = self.router.find_neighbors(keynode)
|
|
||||||
if neighbors:
|
|
||||||
last = neighbors[-1].distance_to(keynode)
|
|
||||||
new_node_close = node.distance_to(keynode) < last
|
|
||||||
first = neighbors[0].distance_to(keynode)
|
|
||||||
this_closest = self.source_node.distance_to(keynode) < first
|
|
||||||
if not neighbors or (new_node_close and this_closest):
|
|
||||||
asyncio.ensure_future(self.call_store(node, key, value))
|
|
||||||
self.router.add_contact(node)
|
|
||||||
|
|
||||||
def handle_call_response(self, result, node):
|
|
||||||
"""
|
|
||||||
If we get a response, add the node to the routing table.
|
|
||||||
|
|
||||||
If we get no response, make sure it's removed from the routing
|
|
||||||
table.
|
|
||||||
"""
|
|
||||||
if not result[0]:
|
|
||||||
log.warning("no response from %s, removing from router", node)
|
|
||||||
self.router.remove_contact(node)
|
|
||||||
return result
|
|
||||||
|
|
||||||
log.info("got successful response from %s", node)
|
|
||||||
self.welcome_if_new(node)
|
|
||||||
return result
|
|
||||||
@ -1,184 +0,0 @@
|
|||||||
import asyncio
|
|
||||||
from collections import OrderedDict
|
|
||||||
import heapq
|
|
||||||
import operator
|
|
||||||
import time
|
|
||||||
|
|
||||||
from .utils import OrderedSet, bytes_to_bit_string, shared_prefix
|
|
||||||
|
|
||||||
|
|
||||||
class KBucket:
|
|
||||||
"""each node keeps a list of (ip, udp_port, node_id) for nodes of distance
|
|
||||||
between 2^i and 2^(i+1) this list that every node keeps is a k-bucket each
|
|
||||||
k-bucket implements a last seen eviction policy except that live nodes are
|
|
||||||
never removed."""
|
|
||||||
|
|
||||||
def __init__(self, rangeLower, rangeUpper, ksize):
|
|
||||||
self.range = (rangeLower, rangeUpper)
|
|
||||||
self.nodes = OrderedDict()
|
|
||||||
self.replacement_nodes = OrderedSet()
|
|
||||||
self.touch_last_updated()
|
|
||||||
self.ksize = ksize
|
|
||||||
|
|
||||||
def touch_last_updated(self):
|
|
||||||
self.last_updated = time.monotonic()
|
|
||||||
|
|
||||||
def get_nodes(self):
|
|
||||||
return list(self.nodes.values())
|
|
||||||
|
|
||||||
def split(self):
|
|
||||||
midpoint = (self.range[0] + self.range[1]) / 2
|
|
||||||
one = KBucket(self.range[0], midpoint, self.ksize)
|
|
||||||
two = KBucket(midpoint + 1, self.range[1], self.ksize)
|
|
||||||
for node in self.nodes.values():
|
|
||||||
bucket = one if node.xor_id <= midpoint else two
|
|
||||||
bucket.nodes[node.peer_id_bytes] = node
|
|
||||||
return (one, two)
|
|
||||||
|
|
||||||
def remove_node(self, node):
|
|
||||||
if node.peer_id_bytes not in self.nodes:
|
|
||||||
return
|
|
||||||
|
|
||||||
# delete node, and see if we can add a replacement
|
|
||||||
del self.nodes[node.peer_id_bytes]
|
|
||||||
if self.replacement_nodes:
|
|
||||||
newnode = self.replacement_nodes.pop()
|
|
||||||
self.nodes[newnode.peer_id_bytes] = newnode
|
|
||||||
|
|
||||||
def has_in_range(self, node):
|
|
||||||
return self.range[0] <= node.xor_id <= self.range[1]
|
|
||||||
|
|
||||||
def is_new_node(self, node):
|
|
||||||
return node.peer_id_bytes not in self.nodes
|
|
||||||
|
|
||||||
def add_node(self, node):
|
|
||||||
"""
|
|
||||||
Add a C{Node} to the C{KBucket}. Return True if successful, False if
|
|
||||||
the bucket is full.
|
|
||||||
|
|
||||||
If the bucket is full, keep track of node in a replacement list,
|
|
||||||
per section 4.1 of the paper.
|
|
||||||
"""
|
|
||||||
if node.peer_id_bytes in self.nodes:
|
|
||||||
del self.nodes[node.peer_id_bytes]
|
|
||||||
self.nodes[node.peer_id_bytes] = node
|
|
||||||
elif len(self) < self.ksize:
|
|
||||||
self.nodes[node.peer_id_bytes] = node
|
|
||||||
else:
|
|
||||||
self.replacement_nodes.push(node)
|
|
||||||
return False
|
|
||||||
return True
|
|
||||||
|
|
||||||
def depth(self):
|
|
||||||
vals = self.nodes.values()
|
|
||||||
sprefix = shared_prefix([bytes_to_bit_string(n.peer_id_bytes) for n in vals])
|
|
||||||
return len(sprefix)
|
|
||||||
|
|
||||||
def head(self):
|
|
||||||
return list(self.nodes.values())[0]
|
|
||||||
|
|
||||||
def __getitem__(self, node_id):
|
|
||||||
return self.nodes.get(node_id, None)
|
|
||||||
|
|
||||||
def __len__(self):
|
|
||||||
return len(self.nodes)
|
|
||||||
|
|
||||||
|
|
||||||
class TableTraverser:
|
|
||||||
def __init__(self, table, startNode):
|
|
||||||
index = table.get_bucket_for(startNode)
|
|
||||||
table.buckets[index].touch_last_updated()
|
|
||||||
self.current_nodes = table.buckets[index].get_nodes()
|
|
||||||
self.left_buckets = table.buckets[:index]
|
|
||||||
self.right_buckets = table.buckets[(index + 1) :]
|
|
||||||
self.left = True
|
|
||||||
|
|
||||||
def __iter__(self):
|
|
||||||
return self
|
|
||||||
|
|
||||||
def __next__(self):
|
|
||||||
"""Pop an item from the left subtree, then right, then left, etc."""
|
|
||||||
if self.current_nodes:
|
|
||||||
return self.current_nodes.pop()
|
|
||||||
|
|
||||||
if self.left and self.left_buckets:
|
|
||||||
self.current_nodes = self.left_buckets.pop().get_nodes()
|
|
||||||
self.left = False
|
|
||||||
return next(self)
|
|
||||||
|
|
||||||
if self.right_buckets:
|
|
||||||
self.current_nodes = self.right_buckets.pop(0).get_nodes()
|
|
||||||
self.left = True
|
|
||||||
return next(self)
|
|
||||||
|
|
||||||
raise StopIteration
|
|
||||||
|
|
||||||
|
|
||||||
class RoutingTable:
|
|
||||||
def __init__(self, protocol, ksize, node):
|
|
||||||
"""
|
|
||||||
@param node: The node that represents this server. It won't
|
|
||||||
be added to the routing table, but will be needed later to
|
|
||||||
determine which buckets to split or not.
|
|
||||||
"""
|
|
||||||
self.node = node
|
|
||||||
self.protocol = protocol
|
|
||||||
self.ksize = ksize
|
|
||||||
self.flush()
|
|
||||||
|
|
||||||
def flush(self):
|
|
||||||
self.buckets = [KBucket(0, 2 ** 160, self.ksize)]
|
|
||||||
|
|
||||||
def split_bucket(self, index):
|
|
||||||
one, two = self.buckets[index].split()
|
|
||||||
self.buckets[index] = one
|
|
||||||
self.buckets.insert(index + 1, two)
|
|
||||||
|
|
||||||
def lonely_buckets(self):
|
|
||||||
"""Get all of the buckets that haven't been updated in over an hour."""
|
|
||||||
hrago = time.monotonic() - 3600
|
|
||||||
return [b for b in self.buckets if b.last_updated < hrago]
|
|
||||||
|
|
||||||
def remove_contact(self, node):
|
|
||||||
index = self.get_bucket_for(node)
|
|
||||||
self.buckets[index].remove_node(node)
|
|
||||||
|
|
||||||
def is_new_node(self, node):
|
|
||||||
index = self.get_bucket_for(node)
|
|
||||||
return self.buckets[index].is_new_node(node)
|
|
||||||
|
|
||||||
def add_contact(self, node):
|
|
||||||
index = self.get_bucket_for(node)
|
|
||||||
bucket = self.buckets[index]
|
|
||||||
|
|
||||||
# this will succeed unless the bucket is full
|
|
||||||
if bucket.add_node(node):
|
|
||||||
return
|
|
||||||
|
|
||||||
# Per section 4.2 of paper, split if the bucket has the node
|
|
||||||
# in its range or if the depth is not congruent to 0 mod 5
|
|
||||||
if bucket.has_in_range(self.node) or bucket.depth() % 5 != 0:
|
|
||||||
self.split_bucket(index)
|
|
||||||
self.add_contact(node)
|
|
||||||
else:
|
|
||||||
asyncio.ensure_future(self.protocol.call_ping(bucket.head()))
|
|
||||||
|
|
||||||
def get_bucket_for(self, node):
|
|
||||||
"""Get the index of the bucket that the given node would fall into."""
|
|
||||||
for index, bucket in enumerate(self.buckets):
|
|
||||||
if node.xor_id < bucket.range[1]:
|
|
||||||
return index
|
|
||||||
# we should never be here, but make linter happy
|
|
||||||
return None
|
|
||||||
|
|
||||||
def find_neighbors(self, node, k=None, exclude=None):
|
|
||||||
k = k or self.ksize
|
|
||||||
nodes = []
|
|
||||||
for neighbor in TableTraverser(self, node):
|
|
||||||
notexcluded = exclude is None or not neighbor.same_home_as(exclude)
|
|
||||||
if neighbor.peer_id_bytes != node.peer_id_bytes and notexcluded:
|
|
||||||
heapq.heappush(nodes, (node.distance_to(neighbor), neighbor))
|
|
||||||
if len(nodes) == k:
|
|
||||||
break
|
|
||||||
|
|
||||||
return list(map(operator.itemgetter(1), heapq.nsmallest(k, nodes)))
|
|
||||||
@ -1,78 +0,0 @@
|
|||||||
// Record represents a dht record that contains a value
|
|
||||||
// for a key value pair
|
|
||||||
message Record {
|
|
||||||
// The key that references this record
|
|
||||||
bytes key = 1;
|
|
||||||
|
|
||||||
// The actual value this record is storing
|
|
||||||
bytes value = 2;
|
|
||||||
|
|
||||||
// Note: These fields were removed from the Record message
|
|
||||||
// hash of the authors public key
|
|
||||||
//optional string author = 3;
|
|
||||||
// A PKI signature for the key+value+author
|
|
||||||
//optional bytes signature = 4;
|
|
||||||
|
|
||||||
// Time the record was received, set by receiver
|
|
||||||
string timeReceived = 5;
|
|
||||||
};
|
|
||||||
|
|
||||||
message Message {
|
|
||||||
enum MessageType {
|
|
||||||
PUT_VALUE = 0;
|
|
||||||
GET_VALUE = 1;
|
|
||||||
ADD_PROVIDER = 2;
|
|
||||||
GET_PROVIDERS = 3;
|
|
||||||
FIND_NODE = 4;
|
|
||||||
PING = 5;
|
|
||||||
}
|
|
||||||
|
|
||||||
enum ConnectionType {
|
|
||||||
// sender does not have a connection to peer, and no extra information (default)
|
|
||||||
NOT_CONNECTED = 0;
|
|
||||||
|
|
||||||
// sender has a live connection to peer
|
|
||||||
CONNECTED = 1;
|
|
||||||
|
|
||||||
// sender recently connected to peer
|
|
||||||
CAN_CONNECT = 2;
|
|
||||||
|
|
||||||
// sender recently tried to connect to peer repeatedly but failed to connect
|
|
||||||
// ("try" here is loose, but this should signal "made strong effort, failed")
|
|
||||||
CANNOT_CONNECT = 3;
|
|
||||||
}
|
|
||||||
|
|
||||||
message Peer {
|
|
||||||
// ID of a given peer.
|
|
||||||
bytes id = 1;
|
|
||||||
|
|
||||||
// multiaddrs for a given peer
|
|
||||||
repeated bytes addrs = 2;
|
|
||||||
|
|
||||||
// used to signal the sender's connection capabilities to the peer
|
|
||||||
ConnectionType connection = 3;
|
|
||||||
}
|
|
||||||
|
|
||||||
// defines what type of message it is.
|
|
||||||
MessageType type = 1;
|
|
||||||
|
|
||||||
// defines what coral cluster level this query/response belongs to.
|
|
||||||
// in case we want to implement coral's cluster rings in the future.
|
|
||||||
int32 clusterLevelRaw = 10; // NOT USED
|
|
||||||
|
|
||||||
// Used to specify the key associated with this message.
|
|
||||||
// PUT_VALUE, GET_VALUE, ADD_PROVIDER, GET_PROVIDERS
|
|
||||||
bytes key = 2;
|
|
||||||
|
|
||||||
// Used to return a value
|
|
||||||
// PUT_VALUE, GET_VALUE
|
|
||||||
Record record = 3;
|
|
||||||
|
|
||||||
// Used to return peers closer to a key in a query
|
|
||||||
// GET_VALUE, GET_PROVIDERS, FIND_NODE
|
|
||||||
repeated Peer closerPeers = 8;
|
|
||||||
|
|
||||||
// Used to return Providers
|
|
||||||
// GET_VALUE, ADD_PROVIDER, GET_PROVIDERS
|
|
||||||
repeated Peer providerPeers = 9;
|
|
||||||
}
|
|
||||||
@ -1,93 +0,0 @@
|
|||||||
from abc import ABC, abstractmethod
|
|
||||||
from collections import OrderedDict
|
|
||||||
from itertools import takewhile
|
|
||||||
import operator
|
|
||||||
import time
|
|
||||||
|
|
||||||
|
|
||||||
class IStorage(ABC):
|
|
||||||
"""
|
|
||||||
Local storage for this node.
|
|
||||||
|
|
||||||
IStorage implementations of get must return the same type as put in
|
|
||||||
by set
|
|
||||||
"""
|
|
||||||
|
|
||||||
@abstractmethod
|
|
||||||
def __setitem__(self, key, value):
|
|
||||||
"""Set a key to the given value."""
|
|
||||||
|
|
||||||
@abstractmethod
|
|
||||||
def __getitem__(self, key):
|
|
||||||
"""
|
|
||||||
Get the given key.
|
|
||||||
|
|
||||||
If item doesn't exist, raises C{KeyError}
|
|
||||||
"""
|
|
||||||
|
|
||||||
@abstractmethod
|
|
||||||
def get(self, key, default=None):
|
|
||||||
"""
|
|
||||||
Get given key.
|
|
||||||
|
|
||||||
If not found, return default.
|
|
||||||
"""
|
|
||||||
|
|
||||||
@abstractmethod
|
|
||||||
def iter_older_than(self, seconds_old):
|
|
||||||
"""Return the an iterator over (key, value) tuples for items older than
|
|
||||||
the given seconds_old."""
|
|
||||||
|
|
||||||
@abstractmethod
|
|
||||||
def __iter__(self):
|
|
||||||
"""Get the iterator for this storage, should yield tuple of (key,
|
|
||||||
value)"""
|
|
||||||
|
|
||||||
|
|
||||||
class ForgetfulStorage(IStorage):
|
|
||||||
def __init__(self, ttl=604800):
|
|
||||||
"""By default, max age is a week."""
|
|
||||||
self.data = OrderedDict()
|
|
||||||
self.ttl = ttl
|
|
||||||
|
|
||||||
def __setitem__(self, key, value):
|
|
||||||
if key in self.data:
|
|
||||||
del self.data[key]
|
|
||||||
self.data[key] = (time.monotonic(), value)
|
|
||||||
self.cull()
|
|
||||||
|
|
||||||
def cull(self):
|
|
||||||
for _, _ in self.iter_older_than(self.ttl):
|
|
||||||
self.data.popitem(last=False)
|
|
||||||
|
|
||||||
def get(self, key, default=None):
|
|
||||||
self.cull()
|
|
||||||
if key in self.data:
|
|
||||||
return self[key]
|
|
||||||
return default
|
|
||||||
|
|
||||||
def __getitem__(self, key):
|
|
||||||
self.cull()
|
|
||||||
return self.data[key][1]
|
|
||||||
|
|
||||||
def __repr__(self):
|
|
||||||
self.cull()
|
|
||||||
return repr(self.data)
|
|
||||||
|
|
||||||
def iter_older_than(self, seconds_old):
|
|
||||||
min_birthday = time.monotonic() - seconds_old
|
|
||||||
zipped = self._triple_iter()
|
|
||||||
matches = takewhile(lambda r: min_birthday >= r[1], zipped)
|
|
||||||
return list(map(operator.itemgetter(0, 2), matches))
|
|
||||||
|
|
||||||
def _triple_iter(self):
|
|
||||||
ikeys = self.data.keys()
|
|
||||||
ibirthday = map(operator.itemgetter(0), self.data.values())
|
|
||||||
ivalues = map(operator.itemgetter(1), self.data.values())
|
|
||||||
return zip(ikeys, ibirthday, ivalues)
|
|
||||||
|
|
||||||
def __iter__(self):
|
|
||||||
self.cull()
|
|
||||||
ikeys = self.data.keys()
|
|
||||||
ivalues = map(operator.itemgetter(1), self.data.values())
|
|
||||||
return zip(ikeys, ivalues)
|
|
||||||
@ -1,56 +0,0 @@
|
|||||||
"""General catchall for functions that don't make sense as methods."""
|
|
||||||
import asyncio
|
|
||||||
import hashlib
|
|
||||||
import operator
|
|
||||||
|
|
||||||
|
|
||||||
async def gather_dict(dic):
|
|
||||||
cors = list(dic.values())
|
|
||||||
results = await asyncio.gather(*cors)
|
|
||||||
return dict(zip(dic.keys(), results))
|
|
||||||
|
|
||||||
|
|
||||||
def digest(string):
|
|
||||||
if not isinstance(string, bytes):
|
|
||||||
string = str(string).encode("utf8")
|
|
||||||
return hashlib.sha1(string).digest()
|
|
||||||
|
|
||||||
|
|
||||||
class OrderedSet(list):
|
|
||||||
"""
|
|
||||||
Acts like a list in all ways, except in the behavior of the.
|
|
||||||
|
|
||||||
:meth:`push` method.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def push(self, thing):
|
|
||||||
"""
|
|
||||||
1. If the item exists in the list, it's removed
|
|
||||||
2. The item is pushed to the end of the list
|
|
||||||
"""
|
|
||||||
if thing in self:
|
|
||||||
self.remove(thing)
|
|
||||||
self.append(thing)
|
|
||||||
|
|
||||||
|
|
||||||
def shared_prefix(args):
|
|
||||||
"""
|
|
||||||
Find the shared prefix between the strings.
|
|
||||||
|
|
||||||
For instance:
|
|
||||||
|
|
||||||
sharedPrefix(['blahblah', 'blahwhat'])
|
|
||||||
|
|
||||||
returns 'blah'.
|
|
||||||
"""
|
|
||||||
i = 0
|
|
||||||
while i < min(map(len, args)):
|
|
||||||
if len(set(map(operator.itemgetter(i), args))) != 1:
|
|
||||||
break
|
|
||||||
i += 1
|
|
||||||
return args[0][:i]
|
|
||||||
|
|
||||||
|
|
||||||
def bytes_to_bit_string(bites):
|
|
||||||
bits = [bin(bite)[2:].rjust(8, "0") for bite in bites]
|
|
||||||
return "".join(bits)
|
|
||||||
@ -21,6 +21,7 @@ from libp2p.transport.transport_interface import ITransport
|
|||||||
from libp2p.transport.upgrader import TransportUpgrader
|
from libp2p.transport.upgrader import TransportUpgrader
|
||||||
from libp2p.typing import StreamHandlerFn
|
from libp2p.typing import StreamHandlerFn
|
||||||
|
|
||||||
|
from ..exceptions import MultiError
|
||||||
from .connection.raw_connection import RawConnection
|
from .connection.raw_connection import RawConnection
|
||||||
from .connection.swarm_connection import SwarmConn
|
from .connection.swarm_connection import SwarmConn
|
||||||
from .exceptions import SwarmException
|
from .exceptions import SwarmException
|
||||||
@ -95,21 +96,51 @@ class Swarm(INetwork, Service):
|
|||||||
try:
|
try:
|
||||||
# Get peer info from peer store
|
# Get peer info from peer store
|
||||||
addrs = self.peerstore.addrs(peer_id)
|
addrs = self.peerstore.addrs(peer_id)
|
||||||
except PeerStoreError:
|
except PeerStoreError as error:
|
||||||
raise SwarmException(f"No known addresses to peer {peer_id}")
|
raise SwarmException(f"No known addresses to peer {peer_id}") from error
|
||||||
|
|
||||||
if not addrs:
|
if not addrs:
|
||||||
raise SwarmException(f"No known addresses to peer {peer_id}")
|
raise SwarmException(f"No known addresses to peer {peer_id}")
|
||||||
|
|
||||||
multiaddr = addrs[0]
|
exceptions: List[SwarmException] = []
|
||||||
|
|
||||||
|
# Try all known addresses
|
||||||
|
for multiaddr in addrs:
|
||||||
|
try:
|
||||||
|
return await self.dial_addr(multiaddr, peer_id)
|
||||||
|
except SwarmException as e:
|
||||||
|
exceptions.append(e)
|
||||||
|
logger.debug(
|
||||||
|
"encountered swarm exception when trying to connect to %s, "
|
||||||
|
"trying next address...",
|
||||||
|
multiaddr,
|
||||||
|
exc_info=e,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Tried all addresses, raising exception.
|
||||||
|
raise SwarmException(
|
||||||
|
f"unable to connect to {peer_id}, no addresses established a successful connection "
|
||||||
|
"(with exceptions)"
|
||||||
|
) from MultiError(exceptions)
|
||||||
|
|
||||||
|
async def dial_addr(self, addr: Multiaddr, peer_id: ID) -> INetConn:
|
||||||
|
"""
|
||||||
|
dial_addr try to create a connection to peer_id with addr.
|
||||||
|
|
||||||
|
:param addr: the address we want to connect with
|
||||||
|
:param peer_id: the peer we want to connect to
|
||||||
|
:raises SwarmException: raised when an error occurs
|
||||||
|
:return: network connection
|
||||||
|
"""
|
||||||
|
|
||||||
# Dial peer (connection to peer does not yet exist)
|
# Dial peer (connection to peer does not yet exist)
|
||||||
# Transport dials peer (gets back a raw conn)
|
# Transport dials peer (gets back a raw conn)
|
||||||
try:
|
try:
|
||||||
raw_conn = await self.transport.dial(multiaddr)
|
raw_conn = await self.transport.dial(addr)
|
||||||
except OpenConnectionError as error:
|
except OpenConnectionError as error:
|
||||||
logger.debug("fail to dial peer %s over base transport", peer_id)
|
logger.debug("fail to dial peer %s over base transport", peer_id)
|
||||||
raise SwarmException(
|
raise SwarmException(
|
||||||
"fail to open connection to peer %s", peer_id
|
f"fail to open connection to peer {peer_id}"
|
||||||
) from error
|
) from error
|
||||||
|
|
||||||
logger.debug("dialed peer %s over base transport", peer_id)
|
logger.debug("dialed peer %s over base transport", peer_id)
|
||||||
@ -146,7 +177,6 @@ class Swarm(INetwork, Service):
|
|||||||
async def new_stream(self, peer_id: ID) -> INetStream:
|
async def new_stream(self, peer_id: ID) -> INetStream:
|
||||||
"""
|
"""
|
||||||
:param peer_id: peer_id of destination
|
:param peer_id: peer_id of destination
|
||||||
:param protocol_id: protocol id
|
|
||||||
:raises SwarmException: raised when an error occurs
|
:raises SwarmException: raised when an error occurs
|
||||||
:return: net stream instance
|
:return: net stream instance
|
||||||
"""
|
"""
|
||||||
@ -164,13 +194,15 @@ class Swarm(INetwork, Service):
|
|||||||
:return: true if at least one success
|
:return: true if at least one success
|
||||||
|
|
||||||
For each multiaddr
|
For each multiaddr
|
||||||
Check if a listener for multiaddr exists already
|
|
||||||
If listener already exists, continue
|
- Check if a listener for multiaddr exists already
|
||||||
Otherwise:
|
- If listener already exists, continue
|
||||||
Capture multiaddr in conn handler
|
- Otherwise:
|
||||||
Have conn handler delegate to stream handler
|
|
||||||
Call listener listen with the multiaddr
|
- Capture multiaddr in conn handler
|
||||||
Map multiaddr to listener
|
- Have conn handler delegate to stream handler
|
||||||
|
- Call listener listen with the multiaddr
|
||||||
|
- Map multiaddr to listener
|
||||||
"""
|
"""
|
||||||
for maddr in multiaddrs:
|
for maddr in multiaddrs:
|
||||||
if str(maddr) in self.listeners:
|
if str(maddr) in self.listeners:
|
||||||
@ -251,7 +283,7 @@ class Swarm(INetwork, Service):
|
|||||||
# TODO: Should be changed to close multisple connections,
|
# TODO: Should be changed to close multisple connections,
|
||||||
# if we have several connections per peer in the future.
|
# if we have several connections per peer in the future.
|
||||||
connection = self.connections[peer_id]
|
connection = self.connections[peer_id]
|
||||||
# NOTE: `connection.close` will perform `del self.connections[peer_id]`
|
# NOTE: `connection.close` will delete `peer_id` from `self.connections`
|
||||||
# and `notify_disconnected` for us.
|
# and `notify_disconnected` for us.
|
||||||
await connection.close()
|
await connection.close()
|
||||||
|
|
||||||
|
|||||||
@ -7,9 +7,6 @@ from .id import ID
|
|||||||
|
|
||||||
|
|
||||||
class IAddrBook(ABC):
|
class IAddrBook(ABC):
|
||||||
def __init__(self) -> None:
|
|
||||||
pass
|
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def add_addr(self, peer_id: ID, addr: Multiaddr, ttl: int) -> None:
|
def add_addr(self, peer_id: ID, addr: Multiaddr, ttl: int) -> None:
|
||||||
"""
|
"""
|
||||||
|
|||||||
@ -44,7 +44,7 @@ class ID:
|
|||||||
@property
|
@property
|
||||||
def xor_id(self) -> int:
|
def xor_id(self) -> int:
|
||||||
if not self._xor_id:
|
if not self._xor_id:
|
||||||
self._xor_id = int(digest(self._bytes).hex(), 16)
|
self._xor_id = int(sha256_digest(self._bytes).hex(), 16)
|
||||||
return self._xor_id
|
return self._xor_id
|
||||||
|
|
||||||
def to_bytes(self) -> bytes:
|
def to_bytes(self) -> bytes:
|
||||||
@ -89,7 +89,7 @@ class ID:
|
|||||||
return cls(mh_digest.encode())
|
return cls(mh_digest.encode())
|
||||||
|
|
||||||
|
|
||||||
def digest(data: Union[str, bytes]) -> bytes:
|
def sha256_digest(data: Union[str, bytes]) -> bytes:
|
||||||
if isinstance(data, str):
|
if isinstance(data, str):
|
||||||
data = data.encode("utf8")
|
data = data.encode("utf8")
|
||||||
return hashlib.sha1(data).digest()
|
return hashlib.sha256(data).digest()
|
||||||
|
|||||||
@ -2,46 +2,107 @@ from typing import Any, Dict, List, Sequence
|
|||||||
|
|
||||||
from multiaddr import Multiaddr
|
from multiaddr import Multiaddr
|
||||||
|
|
||||||
|
from libp2p.crypto.keys import PrivateKey, PublicKey
|
||||||
|
|
||||||
from .peerdata_interface import IPeerData
|
from .peerdata_interface import IPeerData
|
||||||
|
|
||||||
|
|
||||||
class PeerData(IPeerData):
|
class PeerData(IPeerData):
|
||||||
|
|
||||||
|
pubkey: PublicKey
|
||||||
|
privkey: PrivateKey
|
||||||
metadata: Dict[Any, Any]
|
metadata: Dict[Any, Any]
|
||||||
protocols: List[str]
|
protocols: List[str]
|
||||||
addrs: List[Multiaddr]
|
addrs: List[Multiaddr]
|
||||||
|
|
||||||
def __init__(self) -> None:
|
def __init__(self) -> None:
|
||||||
|
self.pubkey = None
|
||||||
|
self.privkey = None
|
||||||
self.metadata = {}
|
self.metadata = {}
|
||||||
self.protocols = []
|
self.protocols = []
|
||||||
self.addrs = []
|
self.addrs = []
|
||||||
|
|
||||||
def get_protocols(self) -> List[str]:
|
def get_protocols(self) -> List[str]:
|
||||||
|
"""
|
||||||
|
:return: all protocols associated with given peer
|
||||||
|
"""
|
||||||
return self.protocols
|
return self.protocols
|
||||||
|
|
||||||
def add_protocols(self, protocols: Sequence[str]) -> None:
|
def add_protocols(self, protocols: Sequence[str]) -> None:
|
||||||
|
"""
|
||||||
|
:param protocols: protocols to add
|
||||||
|
"""
|
||||||
self.protocols.extend(list(protocols))
|
self.protocols.extend(list(protocols))
|
||||||
|
|
||||||
def set_protocols(self, protocols: Sequence[str]) -> None:
|
def set_protocols(self, protocols: Sequence[str]) -> None:
|
||||||
|
"""
|
||||||
|
:param protocols: protocols to set
|
||||||
|
"""
|
||||||
self.protocols = list(protocols)
|
self.protocols = list(protocols)
|
||||||
|
|
||||||
def add_addrs(self, addrs: Sequence[Multiaddr]) -> None:
|
def add_addrs(self, addrs: Sequence[Multiaddr]) -> None:
|
||||||
|
"""
|
||||||
|
:param addrs: multiaddresses to add
|
||||||
|
"""
|
||||||
self.addrs.extend(addrs)
|
self.addrs.extend(addrs)
|
||||||
|
|
||||||
def get_addrs(self) -> List[Multiaddr]:
|
def get_addrs(self) -> List[Multiaddr]:
|
||||||
|
"""
|
||||||
|
:return: all multiaddresses
|
||||||
|
"""
|
||||||
return self.addrs
|
return self.addrs
|
||||||
|
|
||||||
def clear_addrs(self) -> None:
|
def clear_addrs(self) -> None:
|
||||||
|
"""Clear all addresses."""
|
||||||
self.addrs = []
|
self.addrs = []
|
||||||
|
|
||||||
def put_metadata(self, key: str, val: Any) -> None:
|
def put_metadata(self, key: str, val: Any) -> None:
|
||||||
|
"""
|
||||||
|
:param key: key in KV pair
|
||||||
|
:param val: val to associate with key
|
||||||
|
"""
|
||||||
self.metadata[key] = val
|
self.metadata[key] = val
|
||||||
|
|
||||||
def get_metadata(self, key: str) -> Any:
|
def get_metadata(self, key: str) -> Any:
|
||||||
|
"""
|
||||||
|
:param key: key in KV pair
|
||||||
|
:return: val for key
|
||||||
|
:raise PeerDataError: key not found
|
||||||
|
"""
|
||||||
if key in self.metadata:
|
if key in self.metadata:
|
||||||
return self.metadata[key]
|
return self.metadata[key]
|
||||||
raise PeerDataError("key not found")
|
raise PeerDataError("key not found")
|
||||||
|
|
||||||
|
def add_pubkey(self, pubkey: PublicKey) -> None:
|
||||||
|
"""
|
||||||
|
:param pubkey:
|
||||||
|
"""
|
||||||
|
self.pubkey = pubkey
|
||||||
|
|
||||||
|
def get_pubkey(self) -> PublicKey:
|
||||||
|
"""
|
||||||
|
:return: public key of the peer
|
||||||
|
:raise PeerDataError: if public key not found
|
||||||
|
"""
|
||||||
|
if self.pubkey is None:
|
||||||
|
raise PeerDataError("public key not found")
|
||||||
|
return self.pubkey
|
||||||
|
|
||||||
|
def add_privkey(self, privkey: PrivateKey) -> None:
|
||||||
|
"""
|
||||||
|
:param privkey:
|
||||||
|
"""
|
||||||
|
self.privkey = privkey
|
||||||
|
|
||||||
|
def get_privkey(self) -> PrivateKey:
|
||||||
|
"""
|
||||||
|
:return: private key of the peer
|
||||||
|
:raise PeerDataError: if private key not found
|
||||||
|
"""
|
||||||
|
if self.privkey is None:
|
||||||
|
raise PeerDataError("private key not found")
|
||||||
|
return self.privkey
|
||||||
|
|
||||||
|
|
||||||
class PeerDataError(KeyError):
|
class PeerDataError(KeyError):
|
||||||
"""Raised when a key is not found in peer metadata."""
|
"""Raised when a key is not found in peer metadata."""
|
||||||
|
|||||||
@ -3,6 +3,8 @@ from typing import Any, List, Sequence
|
|||||||
|
|
||||||
from multiaddr import Multiaddr
|
from multiaddr import Multiaddr
|
||||||
|
|
||||||
|
from libp2p.crypto.keys import PrivateKey, PublicKey
|
||||||
|
|
||||||
from .peermetadata_interface import IPeerMetadata
|
from .peermetadata_interface import IPeerMetadata
|
||||||
|
|
||||||
|
|
||||||
@ -22,7 +24,7 @@ class IPeerData(ABC):
|
|||||||
@abstractmethod
|
@abstractmethod
|
||||||
def set_protocols(self, protocols: Sequence[str]) -> None:
|
def set_protocols(self, protocols: Sequence[str]) -> None:
|
||||||
"""
|
"""
|
||||||
:param protocols: protocols to add
|
:param protocols: protocols to set
|
||||||
"""
|
"""
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
@ -46,7 +48,6 @@ class IPeerData(ABC):
|
|||||||
"""
|
"""
|
||||||
:param key: key in KV pair
|
:param key: key in KV pair
|
||||||
:param val: val to associate with key
|
:param val: val to associate with key
|
||||||
:raise Exception: unsuccesful put
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
@ -54,5 +55,31 @@ class IPeerData(ABC):
|
|||||||
"""
|
"""
|
||||||
:param key: key in KV pair
|
:param key: key in KV pair
|
||||||
:return: val for key
|
:return: val for key
|
||||||
:raise Exception: key not found
|
:raise PeerDataError: key not found
|
||||||
|
"""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def add_pubkey(self, pubkey: PublicKey) -> None:
|
||||||
|
"""
|
||||||
|
:param pubkey:
|
||||||
|
"""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def get_pubkey(self) -> PublicKey:
|
||||||
|
"""
|
||||||
|
:return: public key of the peer
|
||||||
|
:raise PeerDataError: if public key not found
|
||||||
|
"""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def add_privkey(self, privkey: PrivateKey) -> None:
|
||||||
|
"""
|
||||||
|
:param privkey:
|
||||||
|
"""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def get_privkey(self) -> PrivateKey:
|
||||||
|
"""
|
||||||
|
:return: private key of the peer
|
||||||
|
:raise PeerDataError: if private key not found
|
||||||
"""
|
"""
|
||||||
|
|||||||
@ -5,9 +5,6 @@ from .id import ID
|
|||||||
|
|
||||||
|
|
||||||
class IPeerMetadata(ABC):
|
class IPeerMetadata(ABC):
|
||||||
def __init__(self) -> None:
|
|
||||||
pass
|
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def get(self, peer_id: ID, key: str) -> Any:
|
def get(self, peer_id: ID, key: str) -> Any:
|
||||||
"""
|
"""
|
||||||
|
|||||||
@ -1,7 +1,10 @@
|
|||||||
from typing import Any, Dict, List, Optional, Sequence
|
from collections import defaultdict
|
||||||
|
from typing import Any, Dict, List, Sequence
|
||||||
|
|
||||||
from multiaddr import Multiaddr
|
from multiaddr import Multiaddr
|
||||||
|
|
||||||
|
from libp2p.crypto.keys import KeyPair, PrivateKey, PublicKey
|
||||||
|
|
||||||
from .id import ID
|
from .id import ID
|
||||||
from .peerdata import PeerData, PeerDataError
|
from .peerdata import PeerData, PeerDataError
|
||||||
from .peerinfo import PeerInfo
|
from .peerinfo import PeerInfo
|
||||||
@ -10,90 +13,185 @@ from .peerstore_interface import IPeerStore
|
|||||||
|
|
||||||
class PeerStore(IPeerStore):
|
class PeerStore(IPeerStore):
|
||||||
|
|
||||||
peer_map: Dict[ID, PeerData]
|
peer_data_map: Dict[ID, PeerData]
|
||||||
|
|
||||||
def __init__(self) -> None:
|
def __init__(self) -> None:
|
||||||
IPeerStore.__init__(self)
|
self.peer_data_map = defaultdict(PeerData)
|
||||||
self.peer_map = {}
|
|
||||||
|
|
||||||
def __create_or_get_peer(self, peer_id: ID) -> PeerData:
|
def peer_info(self, peer_id: ID) -> PeerInfo:
|
||||||
"""
|
"""
|
||||||
Returns the peer data for peer_id or creates a new peer data (and
|
:param peer_id: peer ID to get info for
|
||||||
stores it in peer_map) if peer data for peer_id does not yet exist.
|
:return: peer info object
|
||||||
|
|
||||||
:param peer_id: peer ID
|
|
||||||
:return: peer data
|
|
||||||
"""
|
"""
|
||||||
if peer_id in self.peer_map:
|
if peer_id in self.peer_data_map:
|
||||||
return self.peer_map[peer_id]
|
peer_data = self.peer_data_map[peer_id]
|
||||||
data = PeerData()
|
return PeerInfo(peer_id, peer_data.get_addrs())
|
||||||
self.peer_map[peer_id] = data
|
raise PeerStoreError("peer ID not found")
|
||||||
return self.peer_map[peer_id]
|
|
||||||
|
|
||||||
def peer_info(self, peer_id: ID) -> Optional[PeerInfo]:
|
|
||||||
if peer_id in self.peer_map:
|
|
||||||
peer_data = self.peer_map[peer_id]
|
|
||||||
return PeerInfo(peer_id, peer_data.addrs)
|
|
||||||
return None
|
|
||||||
|
|
||||||
def get_protocols(self, peer_id: ID) -> List[str]:
|
def get_protocols(self, peer_id: ID) -> List[str]:
|
||||||
if peer_id in self.peer_map:
|
"""
|
||||||
return self.peer_map[peer_id].get_protocols()
|
:param peer_id: peer ID to get protocols for
|
||||||
|
:return: protocols (as list of strings)
|
||||||
|
:raise PeerStoreError: if peer ID not found
|
||||||
|
"""
|
||||||
|
if peer_id in self.peer_data_map:
|
||||||
|
return self.peer_data_map[peer_id].get_protocols()
|
||||||
raise PeerStoreError("peer ID not found")
|
raise PeerStoreError("peer ID not found")
|
||||||
|
|
||||||
def add_protocols(self, peer_id: ID, protocols: Sequence[str]) -> None:
|
def add_protocols(self, peer_id: ID, protocols: Sequence[str]) -> None:
|
||||||
peer = self.__create_or_get_peer(peer_id)
|
"""
|
||||||
peer.add_protocols(list(protocols))
|
:param peer_id: peer ID to add protocols for
|
||||||
|
:param protocols: protocols to add
|
||||||
|
"""
|
||||||
|
peer_data = self.peer_data_map[peer_id]
|
||||||
|
peer_data.add_protocols(list(protocols))
|
||||||
|
|
||||||
def set_protocols(self, peer_id: ID, protocols: Sequence[str]) -> None:
|
def set_protocols(self, peer_id: ID, protocols: Sequence[str]) -> None:
|
||||||
peer = self.__create_or_get_peer(peer_id)
|
"""
|
||||||
peer.set_protocols(list(protocols))
|
:param peer_id: peer ID to set protocols for
|
||||||
|
:param protocols: protocols to set
|
||||||
|
"""
|
||||||
|
peer_data = self.peer_data_map[peer_id]
|
||||||
|
peer_data.set_protocols(list(protocols))
|
||||||
|
|
||||||
def peer_ids(self) -> List[ID]:
|
def peer_ids(self) -> List[ID]:
|
||||||
return list(self.peer_map.keys())
|
"""
|
||||||
|
:return: all of the peer IDs stored in peer store
|
||||||
|
"""
|
||||||
|
return list(self.peer_data_map.keys())
|
||||||
|
|
||||||
def get(self, peer_id: ID, key: str) -> Any:
|
def get(self, peer_id: ID, key: str) -> Any:
|
||||||
if peer_id in self.peer_map:
|
"""
|
||||||
|
:param peer_id: peer ID to get peer data for
|
||||||
|
:param key: the key to search value for
|
||||||
|
:return: value corresponding to the key
|
||||||
|
:raise PeerStoreError: if peer ID or value not found
|
||||||
|
"""
|
||||||
|
if peer_id in self.peer_data_map:
|
||||||
try:
|
try:
|
||||||
val = self.peer_map[peer_id].get_metadata(key)
|
val = self.peer_data_map[peer_id].get_metadata(key)
|
||||||
except PeerDataError as error:
|
except PeerDataError as error:
|
||||||
raise PeerStoreError(error)
|
raise PeerStoreError(error)
|
||||||
return val
|
return val
|
||||||
raise PeerStoreError("peer ID not found")
|
raise PeerStoreError("peer ID not found")
|
||||||
|
|
||||||
def put(self, peer_id: ID, key: str, val: Any) -> None:
|
def put(self, peer_id: ID, key: str, val: Any) -> None:
|
||||||
# <<?>>
|
"""
|
||||||
# This can output an error, not sure what the possible errors are
|
:param peer_id: peer ID to put peer data for
|
||||||
peer = self.__create_or_get_peer(peer_id)
|
:param key:
|
||||||
peer.put_metadata(key, val)
|
:param value:
|
||||||
|
"""
|
||||||
|
peer_data = self.peer_data_map[peer_id]
|
||||||
|
peer_data.put_metadata(key, val)
|
||||||
|
|
||||||
def add_addr(self, peer_id: ID, addr: Multiaddr, ttl: int) -> None:
|
def add_addr(self, peer_id: ID, addr: Multiaddr, ttl: int) -> None:
|
||||||
|
"""
|
||||||
|
:param peer_id: peer ID to add address for
|
||||||
|
:param addr:
|
||||||
|
:param ttl: time-to-live for the this record
|
||||||
|
"""
|
||||||
self.add_addrs(peer_id, [addr], ttl)
|
self.add_addrs(peer_id, [addr], ttl)
|
||||||
|
|
||||||
def add_addrs(self, peer_id: ID, addrs: Sequence[Multiaddr], ttl: int) -> None:
|
def add_addrs(self, peer_id: ID, addrs: Sequence[Multiaddr], ttl: int) -> None:
|
||||||
|
"""
|
||||||
|
:param peer_id: peer ID to add address for
|
||||||
|
:param addrs:
|
||||||
|
:param ttl: time-to-live for the this record
|
||||||
|
"""
|
||||||
# Ignore ttl for now
|
# Ignore ttl for now
|
||||||
peer = self.__create_or_get_peer(peer_id)
|
peer_data = self.peer_data_map[peer_id]
|
||||||
peer.add_addrs(list(addrs))
|
peer_data.add_addrs(list(addrs))
|
||||||
|
|
||||||
def addrs(self, peer_id: ID) -> List[Multiaddr]:
|
def addrs(self, peer_id: ID) -> List[Multiaddr]:
|
||||||
if peer_id in self.peer_map:
|
"""
|
||||||
return self.peer_map[peer_id].get_addrs()
|
:param peer_id: peer ID to get addrs for
|
||||||
|
:return: list of addrs
|
||||||
|
:raise PeerStoreError: if peer ID not found
|
||||||
|
"""
|
||||||
|
if peer_id in self.peer_data_map:
|
||||||
|
return self.peer_data_map[peer_id].get_addrs()
|
||||||
raise PeerStoreError("peer ID not found")
|
raise PeerStoreError("peer ID not found")
|
||||||
|
|
||||||
def clear_addrs(self, peer_id: ID) -> None:
|
def clear_addrs(self, peer_id: ID) -> None:
|
||||||
|
"""
|
||||||
|
:param peer_id: peer ID to clear addrs for
|
||||||
|
"""
|
||||||
# Only clear addresses if the peer is in peer map
|
# Only clear addresses if the peer is in peer map
|
||||||
if peer_id in self.peer_map:
|
if peer_id in self.peer_data_map:
|
||||||
self.peer_map[peer_id].clear_addrs()
|
self.peer_data_map[peer_id].clear_addrs()
|
||||||
|
|
||||||
def peers_with_addrs(self) -> List[ID]:
|
def peers_with_addrs(self) -> List[ID]:
|
||||||
|
"""
|
||||||
|
:return: all of the peer IDs which has addrs stored in peer store
|
||||||
|
"""
|
||||||
# Add all peers with addrs at least 1 to output
|
# Add all peers with addrs at least 1 to output
|
||||||
output: List[ID] = []
|
output: List[ID] = []
|
||||||
|
|
||||||
for peer_id in self.peer_map:
|
for peer_id in self.peer_data_map:
|
||||||
if len(self.peer_map[peer_id].get_addrs()) >= 1:
|
if len(self.peer_data_map[peer_id].get_addrs()) >= 1:
|
||||||
output.append(peer_id)
|
output.append(peer_id)
|
||||||
return output
|
return output
|
||||||
|
|
||||||
|
def add_pubkey(self, peer_id: ID, pubkey: PublicKey) -> None:
|
||||||
|
"""
|
||||||
|
:param peer_id: peer ID to add public key for
|
||||||
|
:param pubkey:
|
||||||
|
:raise PeerStoreError: if peer ID and pubkey does not match
|
||||||
|
"""
|
||||||
|
peer_data = self.peer_data_map[peer_id]
|
||||||
|
if ID.from_pubkey(pubkey) != peer_id:
|
||||||
|
raise PeerStoreError("peer ID and pubkey does not match")
|
||||||
|
peer_data.add_pubkey(pubkey)
|
||||||
|
|
||||||
|
def pubkey(self, peer_id: ID) -> PublicKey:
|
||||||
|
"""
|
||||||
|
:param peer_id: peer ID to get public key for
|
||||||
|
:return: public key of the peer
|
||||||
|
:raise PeerStoreError: if peer ID or peer pubkey not found
|
||||||
|
"""
|
||||||
|
if peer_id in self.peer_data_map:
|
||||||
|
peer_data = self.peer_data_map[peer_id]
|
||||||
|
try:
|
||||||
|
pubkey = peer_data.get_pubkey()
|
||||||
|
except PeerDataError:
|
||||||
|
raise PeerStoreError("peer pubkey not found")
|
||||||
|
return pubkey
|
||||||
|
raise PeerStoreError("peer ID not found")
|
||||||
|
|
||||||
|
def add_privkey(self, peer_id: ID, privkey: PrivateKey) -> None:
|
||||||
|
"""
|
||||||
|
:param peer_id: peer ID to add private key for
|
||||||
|
:param privkey:
|
||||||
|
:raise PeerStoreError: if peer ID or peer privkey not found
|
||||||
|
"""
|
||||||
|
peer_data = self.peer_data_map[peer_id]
|
||||||
|
if ID.from_pubkey(privkey.get_public_key()) != peer_id:
|
||||||
|
raise PeerStoreError("peer ID and privkey does not match")
|
||||||
|
peer_data.add_privkey(privkey)
|
||||||
|
|
||||||
|
def privkey(self, peer_id: ID) -> PrivateKey:
|
||||||
|
"""
|
||||||
|
:param peer_id: peer ID to get private key for
|
||||||
|
:return: private key of the peer
|
||||||
|
:raise PeerStoreError: if peer ID or peer privkey not found
|
||||||
|
"""
|
||||||
|
if peer_id in self.peer_data_map:
|
||||||
|
peer_data = self.peer_data_map[peer_id]
|
||||||
|
try:
|
||||||
|
privkey = peer_data.get_privkey()
|
||||||
|
except PeerDataError:
|
||||||
|
raise PeerStoreError("peer privkey not found")
|
||||||
|
return privkey
|
||||||
|
raise PeerStoreError("peer ID not found")
|
||||||
|
|
||||||
|
def add_key_pair(self, peer_id: ID, key_pair: KeyPair) -> None:
|
||||||
|
"""
|
||||||
|
:param peer_id: peer ID to add private key for
|
||||||
|
:param key_pair:
|
||||||
|
"""
|
||||||
|
self.add_pubkey(peer_id, key_pair.public_key)
|
||||||
|
self.add_privkey(peer_id, key_pair.private_key)
|
||||||
|
|
||||||
|
|
||||||
class PeerStoreError(KeyError):
|
class PeerStoreError(KeyError):
|
||||||
"""Raised when peer ID is not found in peer store."""
|
"""Raised when peer ID is not found in peer store."""
|
||||||
|
|||||||
@ -1,5 +1,9 @@
|
|||||||
from abc import abstractmethod
|
from abc import abstractmethod
|
||||||
from typing import List, Sequence
|
from typing import Any, List, Sequence
|
||||||
|
|
||||||
|
from multiaddr import Multiaddr
|
||||||
|
|
||||||
|
from libp2p.crypto.keys import KeyPair, PrivateKey, PublicKey
|
||||||
|
|
||||||
from .addrbook_interface import IAddrBook
|
from .addrbook_interface import IAddrBook
|
||||||
from .id import ID
|
from .id import ID
|
||||||
@ -8,10 +12,6 @@ from .peermetadata_interface import IPeerMetadata
|
|||||||
|
|
||||||
|
|
||||||
class IPeerStore(IAddrBook, IPeerMetadata):
|
class IPeerStore(IAddrBook, IPeerMetadata):
|
||||||
def __init__(self) -> None:
|
|
||||||
IPeerMetadata.__init__(self)
|
|
||||||
IAddrBook.__init__(self)
|
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def peer_info(self, peer_id: ID) -> PeerInfo:
|
def peer_info(self, peer_id: ID) -> PeerInfo:
|
||||||
"""
|
"""
|
||||||
@ -23,8 +23,8 @@ class IPeerStore(IAddrBook, IPeerMetadata):
|
|||||||
def get_protocols(self, peer_id: ID) -> List[str]:
|
def get_protocols(self, peer_id: ID) -> List[str]:
|
||||||
"""
|
"""
|
||||||
:param peer_id: peer ID to get protocols for
|
:param peer_id: peer ID to get protocols for
|
||||||
:return: protocols (as strings)
|
:return: protocols (as list of strings)
|
||||||
:raise Exception: peer ID not found exception
|
:raise PeerStoreError: if peer ID not found
|
||||||
"""
|
"""
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
@ -32,7 +32,6 @@ class IPeerStore(IAddrBook, IPeerMetadata):
|
|||||||
"""
|
"""
|
||||||
:param peer_id: peer ID to add protocols for
|
:param peer_id: peer ID to add protocols for
|
||||||
:param protocols: protocols to add
|
:param protocols: protocols to add
|
||||||
:raise Exception: peer ID not found
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
@ -40,7 +39,6 @@ class IPeerStore(IAddrBook, IPeerMetadata):
|
|||||||
"""
|
"""
|
||||||
:param peer_id: peer ID to set protocols for
|
:param peer_id: peer ID to set protocols for
|
||||||
:param protocols: protocols to set
|
:param protocols: protocols to set
|
||||||
:raise Exception: peer ID not found
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
@ -48,3 +46,95 @@ class IPeerStore(IAddrBook, IPeerMetadata):
|
|||||||
"""
|
"""
|
||||||
:return: all of the peer IDs stored in peer store
|
:return: all of the peer IDs stored in peer store
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def get(self, peer_id: ID, key: str) -> Any:
|
||||||
|
"""
|
||||||
|
:param peer_id: peer ID to get peer data for
|
||||||
|
:param key: the key to search value for
|
||||||
|
:return: value corresponding to the key
|
||||||
|
:raise PeerStoreError: if peer ID or value not found
|
||||||
|
"""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def put(self, peer_id: ID, key: str, val: Any) -> None:
|
||||||
|
"""
|
||||||
|
:param peer_id: peer ID to put peer data for
|
||||||
|
:param key:
|
||||||
|
:param value:
|
||||||
|
"""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def add_addr(self, peer_id: ID, addr: Multiaddr, ttl: int) -> None:
|
||||||
|
"""
|
||||||
|
:param peer_id: peer ID to add address for
|
||||||
|
:param addr:
|
||||||
|
:param ttl: time-to-live for the this record
|
||||||
|
"""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def add_addrs(self, peer_id: ID, addrs: Sequence[Multiaddr], ttl: int) -> None:
|
||||||
|
"""
|
||||||
|
:param peer_id: peer ID to add address for
|
||||||
|
:param addrs:
|
||||||
|
:param ttl: time-to-live for the this record
|
||||||
|
"""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def addrs(self, peer_id: ID) -> List[Multiaddr]:
|
||||||
|
"""
|
||||||
|
:param peer_id: peer ID to get addrs for
|
||||||
|
:return: list of addrs
|
||||||
|
"""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def clear_addrs(self, peer_id: ID) -> None:
|
||||||
|
"""
|
||||||
|
:param peer_id: peer ID to clear addrs for
|
||||||
|
"""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def peers_with_addrs(self) -> List[ID]:
|
||||||
|
"""
|
||||||
|
:return: all of the peer IDs which has addrs stored in peer store
|
||||||
|
"""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def add_pubkey(self, peer_id: ID, pubkey: PublicKey) -> None:
|
||||||
|
"""
|
||||||
|
:param peer_id: peer ID to add public key for
|
||||||
|
:param pubkey:
|
||||||
|
:raise PeerStoreError: if peer ID already has pubkey set
|
||||||
|
"""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def pubkey(self, peer_id: ID) -> PublicKey:
|
||||||
|
"""
|
||||||
|
:param peer_id: peer ID to get public key for
|
||||||
|
:return: public key of the peer
|
||||||
|
:raise PeerStoreError: if peer ID not found
|
||||||
|
"""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def add_privkey(self, peer_id: ID, privkey: PrivateKey) -> None:
|
||||||
|
"""
|
||||||
|
:param peer_id: peer ID to add private key for
|
||||||
|
:param privkey:
|
||||||
|
:raise PeerStoreError: if peer ID already has privkey set
|
||||||
|
"""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def privkey(self, peer_id: ID) -> PrivateKey:
|
||||||
|
"""
|
||||||
|
:param peer_id: peer ID to get private key for
|
||||||
|
:return: private key of the peer
|
||||||
|
:raise PeerStoreError: if peer ID not found
|
||||||
|
"""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def add_key_pair(self, peer_id: ID, key_pair: KeyPair) -> None:
|
||||||
|
"""
|
||||||
|
:param peer_id: peer ID to add private key for
|
||||||
|
:param key_pair:
|
||||||
|
:raise PeerStoreError: if peer ID already has pubkey or privkey set
|
||||||
|
"""
|
||||||
|
|||||||
@ -81,16 +81,20 @@ class FloodSub(IPubsubRouter):
|
|||||||
:param pubsub_msg: pubsub message in protobuf.
|
:param pubsub_msg: pubsub message in protobuf.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
peers_gen = self._get_peers_to_send(
|
peers_gen = set(
|
||||||
pubsub_msg.topicIDs,
|
self._get_peers_to_send(
|
||||||
msg_forwarder=msg_forwarder,
|
pubsub_msg.topicIDs,
|
||||||
origin=ID(pubsub_msg.from_id),
|
msg_forwarder=msg_forwarder,
|
||||||
|
origin=ID(pubsub_msg.from_id),
|
||||||
|
)
|
||||||
)
|
)
|
||||||
rpc_msg = rpc_pb2.RPC(publish=[pubsub_msg])
|
rpc_msg = rpc_pb2.RPC(publish=[pubsub_msg])
|
||||||
|
|
||||||
logger.debug("publishing message %s", pubsub_msg)
|
logger.debug("publishing message %s", pubsub_msg)
|
||||||
|
|
||||||
for peer_id in peers_gen:
|
for peer_id in peers_gen:
|
||||||
|
if peer_id not in self.pubsub.peers:
|
||||||
|
continue
|
||||||
stream = self.pubsub.peers[peer_id]
|
stream = self.pubsub.peers[peer_id]
|
||||||
# FIXME: We should add a `WriteMsg` similar to write delimited messages.
|
# FIXME: We should add a `WriteMsg` similar to write delimited messages.
|
||||||
# Ref: https://github.com/libp2p/go-libp2p-pubsub/blob/master/comm.go#L107
|
# Ref: https://github.com/libp2p/go-libp2p-pubsub/blob/master/comm.go#L107
|
||||||
@ -98,6 +102,7 @@ class FloodSub(IPubsubRouter):
|
|||||||
await stream.write(encode_varint_prefixed(rpc_msg.SerializeToString()))
|
await stream.write(encode_varint_prefixed(rpc_msg.SerializeToString()))
|
||||||
except StreamClosed:
|
except StreamClosed:
|
||||||
logger.debug("Fail to publish message to %s: stream closed", peer_id)
|
logger.debug("Fail to publish message to %s: stream closed", peer_id)
|
||||||
|
self.pubsub._handle_dead_peer(peer_id)
|
||||||
|
|
||||||
async def join(self, topic: str) -> None:
|
async def join(self, topic: str) -> None:
|
||||||
"""
|
"""
|
||||||
|
|||||||
@ -1,7 +1,8 @@
|
|||||||
from ast import literal_eval
|
from ast import literal_eval
|
||||||
|
from collections import defaultdict
|
||||||
import logging
|
import logging
|
||||||
import random
|
import random
|
||||||
from typing import Any, Dict, Iterable, List, Sequence, Set
|
from typing import Any, DefaultDict, Dict, Iterable, List, Sequence, Set, Tuple
|
||||||
|
|
||||||
from async_service import Service
|
from async_service import Service
|
||||||
import trio
|
import trio
|
||||||
@ -33,18 +34,18 @@ class GossipSub(IPubsubRouter, Service):
|
|||||||
|
|
||||||
time_to_live: int
|
time_to_live: int
|
||||||
|
|
||||||
mesh: Dict[str, List[ID]]
|
mesh: Dict[str, Set[ID]]
|
||||||
fanout: Dict[str, List[ID]]
|
fanout: Dict[str, Set[ID]]
|
||||||
|
|
||||||
peers_to_protocol: Dict[ID, str]
|
# The protocol peer supports
|
||||||
|
peer_protocol: Dict[ID, TProtocol]
|
||||||
|
|
||||||
time_since_last_publish: Dict[str, int]
|
# TODO: Add `time_since_last_publish`
|
||||||
|
# Create topic --> time since last publish map.
|
||||||
peers_gossipsub: List[ID]
|
|
||||||
peers_floodsub: List[ID]
|
|
||||||
|
|
||||||
mcache: MessageCache
|
mcache: MessageCache
|
||||||
|
|
||||||
|
heartbeat_initial_delay: float
|
||||||
heartbeat_interval: int
|
heartbeat_interval: int
|
||||||
|
|
||||||
def __init__(
|
def __init__(
|
||||||
@ -56,6 +57,7 @@ class GossipSub(IPubsubRouter, Service):
|
|||||||
time_to_live: int,
|
time_to_live: int,
|
||||||
gossip_window: int = 3,
|
gossip_window: int = 3,
|
||||||
gossip_history: int = 5,
|
gossip_history: int = 5,
|
||||||
|
heartbeat_initial_delay: float = 0.1,
|
||||||
heartbeat_interval: int = 120,
|
heartbeat_interval: int = 120,
|
||||||
) -> None:
|
) -> None:
|
||||||
self.protocols = list(protocols)
|
self.protocols = list(protocols)
|
||||||
@ -74,18 +76,13 @@ class GossipSub(IPubsubRouter, Service):
|
|||||||
self.fanout = {}
|
self.fanout = {}
|
||||||
|
|
||||||
# Create peer --> protocol mapping
|
# Create peer --> protocol mapping
|
||||||
self.peers_to_protocol = {}
|
self.peer_protocol = {}
|
||||||
|
|
||||||
# Create topic --> time since last publish map
|
|
||||||
self.time_since_last_publish = {}
|
|
||||||
|
|
||||||
self.peers_gossipsub = []
|
|
||||||
self.peers_floodsub = []
|
|
||||||
|
|
||||||
# Create message cache
|
# Create message cache
|
||||||
self.mcache = MessageCache(gossip_window, gossip_history)
|
self.mcache = MessageCache(gossip_window, gossip_history)
|
||||||
|
|
||||||
# Create heartbeat timer
|
# Create heartbeat timer
|
||||||
|
self.heartbeat_initial_delay = heartbeat_initial_delay
|
||||||
self.heartbeat_interval = heartbeat_interval
|
self.heartbeat_interval = heartbeat_interval
|
||||||
|
|
||||||
async def run(self) -> None:
|
async def run(self) -> None:
|
||||||
@ -122,18 +119,13 @@ class GossipSub(IPubsubRouter, Service):
|
|||||||
"""
|
"""
|
||||||
logger.debug("adding peer %s with protocol %s", peer_id, protocol_id)
|
logger.debug("adding peer %s with protocol %s", peer_id, protocol_id)
|
||||||
|
|
||||||
if protocol_id == PROTOCOL_ID:
|
if protocol_id not in (PROTOCOL_ID, floodsub.PROTOCOL_ID):
|
||||||
self.peers_gossipsub.append(peer_id)
|
|
||||||
elif protocol_id == floodsub.PROTOCOL_ID:
|
|
||||||
self.peers_floodsub.append(peer_id)
|
|
||||||
else:
|
|
||||||
# We should never enter here. Becuase the `protocol_id` is registered by your pubsub
|
# We should never enter here. Becuase the `protocol_id` is registered by your pubsub
|
||||||
# instance in multistream-select, but it is not the protocol that gossipsub supports.
|
# instance in multistream-select, but it is not the protocol that gossipsub supports.
|
||||||
# In this case, probably we registered gossipsub to a wrong `protocol_id`
|
# In this case, probably we registered gossipsub to a wrong `protocol_id`
|
||||||
# in multistream-select, or wrong versions.
|
# in multistream-select, or wrong versions.
|
||||||
# TODO: Better handling
|
raise ValueError(f"Protocol={protocol_id} is not supported.")
|
||||||
raise Exception(f"protocol is not supported: protocol_id={protocol_id}")
|
self.peer_protocol[peer_id] = protocol_id
|
||||||
self.peers_to_protocol[peer_id] = protocol_id
|
|
||||||
|
|
||||||
def remove_peer(self, peer_id: ID) -> None:
|
def remove_peer(self, peer_id: ID) -> None:
|
||||||
"""
|
"""
|
||||||
@ -143,13 +135,12 @@ class GossipSub(IPubsubRouter, Service):
|
|||||||
"""
|
"""
|
||||||
logger.debug("removing peer %s", peer_id)
|
logger.debug("removing peer %s", peer_id)
|
||||||
|
|
||||||
if peer_id in self.peers_gossipsub:
|
for topic in self.mesh:
|
||||||
self.peers_gossipsub.remove(peer_id)
|
self.mesh[topic].discard(peer_id)
|
||||||
elif peer_id in self.peers_floodsub:
|
for topic in self.fanout:
|
||||||
self.peers_floodsub.remove(peer_id)
|
self.fanout[topic].discard(peer_id)
|
||||||
|
|
||||||
if peer_id in self.peers_to_protocol:
|
self.peer_protocol.pop(peer_id, None)
|
||||||
del self.peers_to_protocol[peer_id]
|
|
||||||
|
|
||||||
async def handle_rpc(self, rpc: rpc_pb2.RPC, sender_peer_id: ID) -> None:
|
async def handle_rpc(self, rpc: rpc_pb2.RPC, sender_peer_id: ID) -> None:
|
||||||
"""
|
"""
|
||||||
@ -189,6 +180,8 @@ class GossipSub(IPubsubRouter, Service):
|
|||||||
logger.debug("publishing message %s", pubsub_msg)
|
logger.debug("publishing message %s", pubsub_msg)
|
||||||
|
|
||||||
for peer_id in peers_gen:
|
for peer_id in peers_gen:
|
||||||
|
if peer_id not in self.pubsub.peers:
|
||||||
|
continue
|
||||||
stream = self.pubsub.peers[peer_id]
|
stream = self.pubsub.peers[peer_id]
|
||||||
# FIXME: We should add a `WriteMsg` similar to write delimited messages.
|
# FIXME: We should add a `WriteMsg` similar to write delimited messages.
|
||||||
# Ref: https://github.com/libp2p/go-libp2p-pubsub/blob/master/comm.go#L107
|
# Ref: https://github.com/libp2p/go-libp2p-pubsub/blob/master/comm.go#L107
|
||||||
@ -215,36 +208,41 @@ class GossipSub(IPubsubRouter, Service):
|
|||||||
continue
|
continue
|
||||||
|
|
||||||
# floodsub peers
|
# floodsub peers
|
||||||
for peer_id in self.pubsub.peer_topics[topic]:
|
floodsub_peers: Set[ID] = set(
|
||||||
# FIXME: `gossipsub.peers_floodsub` can be changed to `gossipsub.peers` in go.
|
peer_id
|
||||||
# This will improve the efficiency when searching for a peer's protocol id.
|
for peer_id in self.pubsub.peer_topics[topic]
|
||||||
if peer_id in self.peers_floodsub:
|
if self.peer_protocol[peer_id] == floodsub.PROTOCOL_ID
|
||||||
send_to.add(peer_id)
|
)
|
||||||
|
send_to.update(floodsub_peers)
|
||||||
|
|
||||||
# gossipsub peers
|
# gossipsub peers
|
||||||
in_topic_gossipsub_peers: List[ID] = None
|
gossipsub_peers: Set[ID] = set()
|
||||||
# TODO: Do we need to check `topic in self.pubsub.my_topics`?
|
|
||||||
if topic in self.mesh:
|
if topic in self.mesh:
|
||||||
in_topic_gossipsub_peers = self.mesh[topic]
|
gossipsub_peers = self.mesh[topic]
|
||||||
else:
|
else:
|
||||||
# TODO(robzajac): Is topic DEFINITELY supposed to be in fanout if we are not
|
# When we publish to a topic that we have not subscribe to, we randomly pick
|
||||||
# subscribed?
|
# `self.degree` number of peers who have subscribed to the topic and add them
|
||||||
# I assume there could be short periods between heartbeats where topic may not
|
# as our `fanout` peers.
|
||||||
# be but we should check that this path gets hit appropriately
|
topic_in_fanout: bool = topic in self.fanout
|
||||||
|
fanout_peers: Set[ID] = self.fanout[topic] if topic_in_fanout else set()
|
||||||
if (topic not in self.fanout) or (len(self.fanout[topic]) == 0):
|
fanout_size = len(fanout_peers)
|
||||||
# If no peers in fanout, choose some peers from gossipsub peers in topic.
|
if not topic_in_fanout or (
|
||||||
self.fanout[topic] = self._get_in_topic_gossipsub_peers_from_minus(
|
topic_in_fanout and fanout_size < self.degree
|
||||||
topic, self.degree, []
|
):
|
||||||
)
|
if topic in self.pubsub.peer_topics:
|
||||||
in_topic_gossipsub_peers = self.fanout[topic]
|
# Combine fanout peers with selected peers
|
||||||
for peer_id in in_topic_gossipsub_peers:
|
fanout_peers.update(
|
||||||
send_to.add(peer_id)
|
self._get_in_topic_gossipsub_peers_from_minus(
|
||||||
|
topic, self.degree - fanout_size, fanout_peers
|
||||||
|
)
|
||||||
|
)
|
||||||
|
self.fanout[topic] = fanout_peers
|
||||||
|
gossipsub_peers = fanout_peers
|
||||||
|
send_to.update(gossipsub_peers)
|
||||||
# Excludes `msg_forwarder` and `origin`
|
# Excludes `msg_forwarder` and `origin`
|
||||||
yield from send_to.difference([msg_forwarder, origin])
|
yield from send_to.difference([msg_forwarder, origin])
|
||||||
|
|
||||||
async def join(self, topic: str) -> None:
|
async def join(self, topic: str) -> None:
|
||||||
# Note: the comments here are the near-exact algorithm description from the spec
|
|
||||||
"""
|
"""
|
||||||
Join notifies the router that we want to receive and forward messages
|
Join notifies the router that we want to receive and forward messages
|
||||||
in a topic. It is invoked after the subscription announcement.
|
in a topic. It is invoked after the subscription announcement.
|
||||||
@ -256,10 +254,10 @@ class GossipSub(IPubsubRouter, Service):
|
|||||||
if topic in self.mesh:
|
if topic in self.mesh:
|
||||||
return
|
return
|
||||||
# Create mesh[topic] if it does not yet exist
|
# Create mesh[topic] if it does not yet exist
|
||||||
self.mesh[topic] = []
|
self.mesh[topic] = set()
|
||||||
|
|
||||||
topic_in_fanout: bool = topic in self.fanout
|
topic_in_fanout: bool = topic in self.fanout
|
||||||
fanout_peers: List[ID] = self.fanout[topic] if topic_in_fanout else []
|
fanout_peers: Set[ID] = self.fanout[topic] if topic_in_fanout else set()
|
||||||
fanout_size = len(fanout_peers)
|
fanout_size = len(fanout_peers)
|
||||||
if not topic_in_fanout or (topic_in_fanout and fanout_size < self.degree):
|
if not topic_in_fanout or (topic_in_fanout and fanout_size < self.degree):
|
||||||
# There are less than D peers (let this number be x)
|
# There are less than D peers (let this number be x)
|
||||||
@ -270,16 +268,14 @@ class GossipSub(IPubsubRouter, Service):
|
|||||||
topic, self.degree - fanout_size, fanout_peers
|
topic, self.degree - fanout_size, fanout_peers
|
||||||
)
|
)
|
||||||
# Combine fanout peers with selected peers
|
# Combine fanout peers with selected peers
|
||||||
fanout_peers += selected_peers
|
fanout_peers.update(selected_peers)
|
||||||
|
|
||||||
# Add fanout peers to mesh and notifies them with a GRAFT(topic) control message.
|
# Add fanout peers to mesh and notifies them with a GRAFT(topic) control message.
|
||||||
for peer in fanout_peers:
|
for peer in fanout_peers:
|
||||||
if peer not in self.mesh[topic]:
|
self.mesh[topic].add(peer)
|
||||||
self.mesh[topic].append(peer)
|
await self.emit_graft(topic, peer)
|
||||||
await self.emit_graft(topic, peer)
|
|
||||||
|
|
||||||
if topic_in_fanout:
|
self.fanout.pop(topic, None)
|
||||||
del self.fanout[topic]
|
|
||||||
|
|
||||||
async def leave(self, topic: str) -> None:
|
async def leave(self, topic: str) -> None:
|
||||||
# Note: the comments here are the near-exact algorithm description from the spec
|
# Note: the comments here are the near-exact algorithm description from the spec
|
||||||
@ -298,7 +294,75 @@ class GossipSub(IPubsubRouter, Service):
|
|||||||
await self.emit_prune(topic, peer)
|
await self.emit_prune(topic, peer)
|
||||||
|
|
||||||
# Forget mesh[topic]
|
# Forget mesh[topic]
|
||||||
del self.mesh[topic]
|
self.mesh.pop(topic, None)
|
||||||
|
|
||||||
|
async def _emit_control_msgs(
|
||||||
|
self,
|
||||||
|
peers_to_graft: Dict[ID, List[str]],
|
||||||
|
peers_to_prune: Dict[ID, List[str]],
|
||||||
|
peers_to_gossip: Dict[ID, Dict[str, List[str]]],
|
||||||
|
) -> None:
|
||||||
|
graft_msgs: List[rpc_pb2.ControlGraft] = []
|
||||||
|
prune_msgs: List[rpc_pb2.ControlPrune] = []
|
||||||
|
ihave_msgs: List[rpc_pb2.ControlIHave] = []
|
||||||
|
# Starting with GRAFT messages
|
||||||
|
for peer, topics in peers_to_graft.items():
|
||||||
|
for topic in topics:
|
||||||
|
graft_msg: rpc_pb2.ControlGraft = rpc_pb2.ControlGraft(topicID=topic)
|
||||||
|
graft_msgs.append(graft_msg)
|
||||||
|
|
||||||
|
# If there are also PRUNE messages to send to this peer
|
||||||
|
if peer in peers_to_prune:
|
||||||
|
for topic in peers_to_prune[peer]:
|
||||||
|
prune_msg: rpc_pb2.ControlPrune = rpc_pb2.ControlPrune(
|
||||||
|
topicID=topic
|
||||||
|
)
|
||||||
|
prune_msgs.append(prune_msg)
|
||||||
|
del peers_to_prune[peer]
|
||||||
|
|
||||||
|
# If there are also IHAVE messages to send to this peer
|
||||||
|
if peer in peers_to_gossip:
|
||||||
|
for topic in peers_to_gossip[peer]:
|
||||||
|
ihave_msg: rpc_pb2.ControlIHave = rpc_pb2.ControlIHave(
|
||||||
|
messageIDs=peers_to_gossip[peer][topic], topicID=topic
|
||||||
|
)
|
||||||
|
ihave_msgs.append(ihave_msg)
|
||||||
|
del peers_to_gossip[peer]
|
||||||
|
|
||||||
|
control_msg = self.pack_control_msgs(ihave_msgs, graft_msgs, prune_msgs)
|
||||||
|
await self.emit_control_message(control_msg, peer)
|
||||||
|
|
||||||
|
# Next with PRUNE messages
|
||||||
|
for peer, topics in peers_to_prune.items():
|
||||||
|
prune_msgs = []
|
||||||
|
for topic in topics:
|
||||||
|
prune_msg = rpc_pb2.ControlPrune(topicID=topic)
|
||||||
|
prune_msgs.append(prune_msg)
|
||||||
|
|
||||||
|
# If there are also IHAVE messages to send to this peer
|
||||||
|
if peer in peers_to_gossip:
|
||||||
|
ihave_msgs = []
|
||||||
|
for topic in peers_to_gossip[peer]:
|
||||||
|
ihave_msg = rpc_pb2.ControlIHave(
|
||||||
|
messageIDs=peers_to_gossip[peer][topic], topicID=topic
|
||||||
|
)
|
||||||
|
ihave_msgs.append(ihave_msg)
|
||||||
|
del peers_to_gossip[peer]
|
||||||
|
|
||||||
|
control_msg = self.pack_control_msgs(ihave_msgs, None, prune_msgs)
|
||||||
|
await self.emit_control_message(control_msg, peer)
|
||||||
|
|
||||||
|
# Fianlly IHAVE messages
|
||||||
|
for peer in peers_to_gossip:
|
||||||
|
ihave_msgs = []
|
||||||
|
for topic in peers_to_gossip[peer]:
|
||||||
|
ihave_msg = rpc_pb2.ControlIHave(
|
||||||
|
messageIDs=peers_to_gossip[peer][topic], topicID=topic
|
||||||
|
)
|
||||||
|
ihave_msgs.append(ihave_msg)
|
||||||
|
|
||||||
|
control_msg = self.pack_control_msgs(ihave_msgs, None, None)
|
||||||
|
await self.emit_control_message(control_msg, peer)
|
||||||
|
|
||||||
# Heartbeat
|
# Heartbeat
|
||||||
async def heartbeat(self) -> None:
|
async def heartbeat(self) -> None:
|
||||||
@ -308,16 +372,29 @@ class GossipSub(IPubsubRouter, Service):
|
|||||||
Note: the heartbeats are called with awaits because each heartbeat depends on the
|
Note: the heartbeats are called with awaits because each heartbeat depends on the
|
||||||
state changes in the preceding heartbeat
|
state changes in the preceding heartbeat
|
||||||
"""
|
"""
|
||||||
|
# Start after a delay. Ref: https://github.com/libp2p/go-libp2p-pubsub/blob/01b9825fbee1848751d90a8469e3f5f43bac8466/gossipsub.go#L410 # Noqa: E501
|
||||||
|
await trio.sleep(self.heartbeat_initial_delay)
|
||||||
while True:
|
while True:
|
||||||
|
# Maintain mesh and keep track of which peers to send GRAFT or PRUNE to
|
||||||
|
peers_to_graft, peers_to_prune = self.mesh_heartbeat()
|
||||||
|
# Maintain fanout
|
||||||
|
self.fanout_heartbeat()
|
||||||
|
# Get the peers to send IHAVE to
|
||||||
|
peers_to_gossip = self.gossip_heartbeat()
|
||||||
|
# Pack GRAFT, PRUNE and IHAVE for the same peer into one control message and send it
|
||||||
|
await self._emit_control_msgs(
|
||||||
|
peers_to_graft, peers_to_prune, peers_to_gossip
|
||||||
|
)
|
||||||
|
|
||||||
await self.mesh_heartbeat()
|
self.mcache.shift()
|
||||||
await self.fanout_heartbeat()
|
|
||||||
await self.gossip_heartbeat()
|
|
||||||
|
|
||||||
await trio.sleep(self.heartbeat_interval)
|
await trio.sleep(self.heartbeat_interval)
|
||||||
|
|
||||||
async def mesh_heartbeat(self) -> None:
|
def mesh_heartbeat(
|
||||||
# Note: the comments here are the exact pseudocode from the spec
|
self
|
||||||
|
) -> Tuple[DefaultDict[ID, List[str]], DefaultDict[ID, List[str]]]:
|
||||||
|
peers_to_graft: DefaultDict[ID, List[str]] = defaultdict(list)
|
||||||
|
peers_to_prune: DefaultDict[ID, List[str]] = defaultdict(list)
|
||||||
for topic in self.mesh:
|
for topic in self.mesh:
|
||||||
# Skip if no peers have subscribed to the topic
|
# Skip if no peers have subscribed to the topic
|
||||||
if topic not in self.pubsub.peer_topics:
|
if topic not in self.pubsub.peer_topics:
|
||||||
@ -330,41 +407,43 @@ class GossipSub(IPubsubRouter, Service):
|
|||||||
topic, self.degree - num_mesh_peers_in_topic, self.mesh[topic]
|
topic, self.degree - num_mesh_peers_in_topic, self.mesh[topic]
|
||||||
)
|
)
|
||||||
|
|
||||||
fanout_peers_not_in_mesh: List[ID] = [
|
for peer in selected_peers:
|
||||||
peer for peer in selected_peers if peer not in self.mesh[topic]
|
|
||||||
]
|
|
||||||
for peer in fanout_peers_not_in_mesh:
|
|
||||||
# Add peer to mesh[topic]
|
# Add peer to mesh[topic]
|
||||||
self.mesh[topic].append(peer)
|
self.mesh[topic].add(peer)
|
||||||
|
|
||||||
# Emit GRAFT(topic) control message to peer
|
# Emit GRAFT(topic) control message to peer
|
||||||
await self.emit_graft(topic, peer)
|
peers_to_graft[peer].append(topic)
|
||||||
|
|
||||||
if num_mesh_peers_in_topic > self.degree_high:
|
if num_mesh_peers_in_topic > self.degree_high:
|
||||||
# Select |mesh[topic]| - D peers from mesh[topic]
|
# Select |mesh[topic]| - D peers from mesh[topic]
|
||||||
selected_peers = self.select_from_minus(
|
selected_peers = self.select_from_minus(
|
||||||
num_mesh_peers_in_topic - self.degree, self.mesh[topic], []
|
num_mesh_peers_in_topic - self.degree, self.mesh[topic], set()
|
||||||
)
|
)
|
||||||
for peer in selected_peers:
|
for peer in selected_peers:
|
||||||
# Remove peer from mesh[topic]
|
# Remove peer from mesh[topic]
|
||||||
self.mesh[topic].remove(peer)
|
self.mesh[topic].discard(peer)
|
||||||
|
|
||||||
# Emit PRUNE(topic) control message to peer
|
# Emit PRUNE(topic) control message to peer
|
||||||
await self.emit_prune(topic, peer)
|
peers_to_prune[peer].append(topic)
|
||||||
|
return peers_to_graft, peers_to_prune
|
||||||
|
|
||||||
async def fanout_heartbeat(self) -> None:
|
def fanout_heartbeat(self) -> None:
|
||||||
# Note: the comments here are the exact pseudocode from the spec
|
# Note: the comments here are the exact pseudocode from the spec
|
||||||
for topic in self.fanout:
|
for topic in self.fanout:
|
||||||
# If time since last published > ttl
|
# Delete topic entry if it's not in `pubsub.peer_topics`
|
||||||
# TODO: there's no way time_since_last_publish gets set anywhere yet
|
# or (TODO) if it's time-since-last-published > ttl
|
||||||
if (
|
if topic not in self.pubsub.peer_topics:
|
||||||
topic in self.time_since_last_publish
|
|
||||||
and self.time_since_last_publish[topic] > self.time_to_live
|
|
||||||
):
|
|
||||||
# Remove topic from fanout
|
# Remove topic from fanout
|
||||||
del self.fanout[topic]
|
del self.fanout[topic]
|
||||||
del self.time_since_last_publish[topic]
|
|
||||||
else:
|
else:
|
||||||
|
# Check if fanout peers are still in the topic and remove the ones that are not
|
||||||
|
# ref: https://github.com/libp2p/go-libp2p-pubsub/blob/01b9825fbee1848751d90a8469e3f5f43bac8466/gossipsub.go#L498-L504 # noqa: E501
|
||||||
|
in_topic_fanout_peers = [
|
||||||
|
peer
|
||||||
|
for peer in self.fanout[topic]
|
||||||
|
if peer in self.pubsub.peer_topics[topic]
|
||||||
|
]
|
||||||
|
self.fanout[topic] = set(in_topic_fanout_peers)
|
||||||
num_fanout_peers_in_topic = len(self.fanout[topic])
|
num_fanout_peers_in_topic = len(self.fanout[topic])
|
||||||
|
|
||||||
# If |fanout[topic]| < D
|
# If |fanout[topic]| < D
|
||||||
@ -376,53 +455,43 @@ class GossipSub(IPubsubRouter, Service):
|
|||||||
self.fanout[topic],
|
self.fanout[topic],
|
||||||
)
|
)
|
||||||
# Add the peers to fanout[topic]
|
# Add the peers to fanout[topic]
|
||||||
self.fanout[topic].extend(selected_peers)
|
self.fanout[topic].update(selected_peers)
|
||||||
|
|
||||||
async def gossip_heartbeat(self) -> None:
|
def gossip_heartbeat(self) -> DefaultDict[ID, Dict[str, List[str]]]:
|
||||||
|
peers_to_gossip: DefaultDict[ID, Dict[str, List[str]]] = defaultdict(dict)
|
||||||
for topic in self.mesh:
|
for topic in self.mesh:
|
||||||
msg_ids = self.mcache.window(topic)
|
msg_ids = self.mcache.window(topic)
|
||||||
if msg_ids:
|
if msg_ids:
|
||||||
# TODO: Make more efficient, possibly using a generator?
|
|
||||||
# Get all pubsub peers in a topic and only add them if they are gossipsub peers too
|
# Get all pubsub peers in a topic and only add them if they are gossipsub peers too
|
||||||
if topic in self.pubsub.peer_topics:
|
if topic in self.pubsub.peer_topics:
|
||||||
# Select D peers from peers.gossipsub[topic]
|
# Select D peers from peers.gossipsub[topic]
|
||||||
peers_to_emit_ihave_to = self._get_in_topic_gossipsub_peers_from_minus(
|
peers_to_emit_ihave_to = self._get_in_topic_gossipsub_peers_from_minus(
|
||||||
topic, self.degree, []
|
topic, self.degree, self.mesh[topic]
|
||||||
)
|
)
|
||||||
|
|
||||||
|
msg_id_strs = [str(msg_id) for msg_id in msg_ids]
|
||||||
for peer in peers_to_emit_ihave_to:
|
for peer in peers_to_emit_ihave_to:
|
||||||
# TODO: this line is a monster, can hopefully be simplified
|
peers_to_gossip[peer][topic] = msg_id_strs
|
||||||
if (
|
|
||||||
topic not in self.mesh or (peer not in self.mesh[topic])
|
|
||||||
) and (
|
|
||||||
topic not in self.fanout or (peer not in self.fanout[topic])
|
|
||||||
):
|
|
||||||
msg_id_strs = [str(msg_id) for msg_id in msg_ids]
|
|
||||||
await self.emit_ihave(topic, msg_id_strs, peer)
|
|
||||||
|
|
||||||
# TODO: Refactor and Dedup. This section is the roughly the same as the above.
|
# TODO: Refactor and Dedup. This section is the roughly the same as the above.
|
||||||
# Do the same for fanout, for all topics not already hit in mesh
|
# Do the same for fanout, for all topics not already hit in mesh
|
||||||
for topic in self.fanout:
|
for topic in self.fanout:
|
||||||
if topic not in self.mesh:
|
msg_ids = self.mcache.window(topic)
|
||||||
msg_ids = self.mcache.window(topic)
|
if msg_ids:
|
||||||
if msg_ids:
|
# Get all pubsub peers in topic and only add if they are gossipsub peers also
|
||||||
# TODO: Make more efficient, possibly using a generator?
|
if topic in self.pubsub.peer_topics:
|
||||||
# Get all pubsub peers in topic and only add if they are gossipsub peers also
|
# Select D peers from peers.gossipsub[topic]
|
||||||
if topic in self.pubsub.peer_topics:
|
peers_to_emit_ihave_to = self._get_in_topic_gossipsub_peers_from_minus(
|
||||||
# Select D peers from peers.gossipsub[topic]
|
topic, self.degree, self.fanout[topic]
|
||||||
peers_to_emit_ihave_to = self._get_in_topic_gossipsub_peers_from_minus(
|
)
|
||||||
topic, self.degree, []
|
msg_id_strs = [str(msg) for msg in msg_ids]
|
||||||
)
|
for peer in peers_to_emit_ihave_to:
|
||||||
for peer in peers_to_emit_ihave_to:
|
peers_to_gossip[peer][topic] = msg_id_strs
|
||||||
if peer not in self.fanout[topic]:
|
return peers_to_gossip
|
||||||
msg_id_strs = [str(msg) for msg in msg_ids]
|
|
||||||
await self.emit_ihave(topic, msg_id_strs, peer)
|
|
||||||
|
|
||||||
self.mcache.shift()
|
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def select_from_minus(
|
def select_from_minus(
|
||||||
num_to_select: int, pool: Sequence[Any], minus: Sequence[Any]
|
num_to_select: int, pool: Iterable[Any], minus: Iterable[Any]
|
||||||
) -> List[Any]:
|
) -> List[Any]:
|
||||||
"""
|
"""
|
||||||
Select at most num_to_select subset of elements from the set (pool - minus) randomly.
|
Select at most num_to_select subset of elements from the set (pool - minus) randomly.
|
||||||
@ -441,7 +510,7 @@ class GossipSub(IPubsubRouter, Service):
|
|||||||
|
|
||||||
# If num_to_select > size(selection_pool), then return selection_pool (which has the most
|
# If num_to_select > size(selection_pool), then return selection_pool (which has the most
|
||||||
# possible elements s.t. the number of elements is less than num_to_select)
|
# possible elements s.t. the number of elements is less than num_to_select)
|
||||||
if num_to_select > len(selection_pool):
|
if num_to_select >= len(selection_pool):
|
||||||
return selection_pool
|
return selection_pool
|
||||||
|
|
||||||
# Random selection
|
# Random selection
|
||||||
@ -450,16 +519,14 @@ class GossipSub(IPubsubRouter, Service):
|
|||||||
return selection
|
return selection
|
||||||
|
|
||||||
def _get_in_topic_gossipsub_peers_from_minus(
|
def _get_in_topic_gossipsub_peers_from_minus(
|
||||||
self, topic: str, num_to_select: int, minus: Sequence[ID]
|
self, topic: str, num_to_select: int, minus: Iterable[ID]
|
||||||
) -> List[ID]:
|
) -> List[ID]:
|
||||||
gossipsub_peers_in_topic = [
|
gossipsub_peers_in_topic = set(
|
||||||
peer_id
|
peer_id
|
||||||
for peer_id in self.pubsub.peer_topics[topic]
|
for peer_id in self.pubsub.peer_topics[topic]
|
||||||
if peer_id in self.peers_gossipsub
|
if self.peer_protocol[peer_id] == PROTOCOL_ID
|
||||||
]
|
|
||||||
return self.select_from_minus(
|
|
||||||
num_to_select, gossipsub_peers_in_topic, list(minus)
|
|
||||||
)
|
)
|
||||||
|
return self.select_from_minus(num_to_select, gossipsub_peers_in_topic, minus)
|
||||||
|
|
||||||
# RPC handlers
|
# RPC handlers
|
||||||
|
|
||||||
@ -517,6 +584,12 @@ class GossipSub(IPubsubRouter, Service):
|
|||||||
rpc_msg: bytes = packet.SerializeToString()
|
rpc_msg: bytes = packet.SerializeToString()
|
||||||
|
|
||||||
# 3) Get the stream to this peer
|
# 3) Get the stream to this peer
|
||||||
|
if sender_peer_id not in self.pubsub.peers:
|
||||||
|
logger.debug(
|
||||||
|
"Fail to responed to iwant request from %s: peer record not exist",
|
||||||
|
sender_peer_id,
|
||||||
|
)
|
||||||
|
return
|
||||||
peer_stream = self.pubsub.peers[sender_peer_id]
|
peer_stream = self.pubsub.peers[sender_peer_id]
|
||||||
|
|
||||||
# 4) And write the packet to the stream
|
# 4) And write the packet to the stream
|
||||||
@ -537,7 +610,7 @@ class GossipSub(IPubsubRouter, Service):
|
|||||||
# Add peer to mesh for topic
|
# Add peer to mesh for topic
|
||||||
if topic in self.mesh:
|
if topic in self.mesh:
|
||||||
if sender_peer_id not in self.mesh[topic]:
|
if sender_peer_id not in self.mesh[topic]:
|
||||||
self.mesh[topic].append(sender_peer_id)
|
self.mesh[topic].add(sender_peer_id)
|
||||||
else:
|
else:
|
||||||
# Respond with PRUNE if not subscribed to the topic
|
# Respond with PRUNE if not subscribed to the topic
|
||||||
await self.emit_prune(topic, sender_peer_id)
|
await self.emit_prune(topic, sender_peer_id)
|
||||||
@ -547,12 +620,27 @@ class GossipSub(IPubsubRouter, Service):
|
|||||||
) -> None:
|
) -> None:
|
||||||
topic: str = prune_msg.topicID
|
topic: str = prune_msg.topicID
|
||||||
|
|
||||||
# Remove peer from mesh for topic, if peer is in topic
|
# Remove peer from mesh for topic
|
||||||
if topic in self.mesh and sender_peer_id in self.mesh[topic]:
|
if topic in self.mesh:
|
||||||
self.mesh[topic].remove(sender_peer_id)
|
self.mesh[topic].discard(sender_peer_id)
|
||||||
|
|
||||||
# RPC emitters
|
# RPC emitters
|
||||||
|
|
||||||
|
def pack_control_msgs(
|
||||||
|
self,
|
||||||
|
ihave_msgs: List[rpc_pb2.ControlIHave],
|
||||||
|
graft_msgs: List[rpc_pb2.ControlGraft],
|
||||||
|
prune_msgs: List[rpc_pb2.ControlPrune],
|
||||||
|
) -> rpc_pb2.ControlMessage:
|
||||||
|
control_msg: rpc_pb2.ControlMessage = rpc_pb2.ControlMessage()
|
||||||
|
if ihave_msgs:
|
||||||
|
control_msg.ihave.extend(ihave_msgs)
|
||||||
|
if graft_msgs:
|
||||||
|
control_msg.graft.extend(graft_msgs)
|
||||||
|
if prune_msgs:
|
||||||
|
control_msg.prune.extend(prune_msgs)
|
||||||
|
return control_msg
|
||||||
|
|
||||||
async def emit_ihave(self, topic: str, msg_ids: Any, to_peer: ID) -> None:
|
async def emit_ihave(self, topic: str, msg_ids: Any, to_peer: ID) -> None:
|
||||||
"""Emit ihave message, sent to to_peer, for topic and msg_ids."""
|
"""Emit ihave message, sent to to_peer, for topic and msg_ids."""
|
||||||
|
|
||||||
@ -608,6 +696,11 @@ class GossipSub(IPubsubRouter, Service):
|
|||||||
rpc_msg: bytes = packet.SerializeToString()
|
rpc_msg: bytes = packet.SerializeToString()
|
||||||
|
|
||||||
# Get stream for peer from pubsub
|
# Get stream for peer from pubsub
|
||||||
|
if to_peer not in self.pubsub.peers:
|
||||||
|
logger.debug(
|
||||||
|
"Fail to emit control message to %s: peer record not exist", to_peer
|
||||||
|
)
|
||||||
|
return
|
||||||
peer_stream = self.pubsub.peers[to_peer]
|
peer_stream = self.pubsub.peers[to_peer]
|
||||||
|
|
||||||
# Write rpc to stream
|
# Write rpc to stream
|
||||||
|
|||||||
@ -96,8 +96,7 @@ class MessageCache:
|
|||||||
last_entries: List[CacheEntry] = self.history[len(self.history) - 1]
|
last_entries: List[CacheEntry] = self.history[len(self.history) - 1]
|
||||||
|
|
||||||
for entry in last_entries:
|
for entry in last_entries:
|
||||||
if entry.mid in self.msgs:
|
self.msgs.pop(entry.mid)
|
||||||
del self.msgs[entry.mid]
|
|
||||||
|
|
||||||
i: int = len(self.history) - 2
|
i: int = len(self.history) - 2
|
||||||
|
|
||||||
|
|||||||
@ -9,6 +9,7 @@ from typing import (
|
|||||||
KeysView,
|
KeysView,
|
||||||
List,
|
List,
|
||||||
NamedTuple,
|
NamedTuple,
|
||||||
|
Set,
|
||||||
Tuple,
|
Tuple,
|
||||||
Union,
|
Union,
|
||||||
cast,
|
cast,
|
||||||
@ -19,6 +20,7 @@ import base58
|
|||||||
from lru import LRU
|
from lru import LRU
|
||||||
import trio
|
import trio
|
||||||
|
|
||||||
|
from libp2p.crypto.keys import PrivateKey
|
||||||
from libp2p.exceptions import ParseError, ValidationError
|
from libp2p.exceptions import ParseError, ValidationError
|
||||||
from libp2p.host.host_interface import IHost
|
from libp2p.host.host_interface import IHost
|
||||||
from libp2p.io.exceptions import IncompleteReadError
|
from libp2p.io.exceptions import IncompleteReadError
|
||||||
@ -33,7 +35,7 @@ from .abc import IPubsub, ISubscriptionAPI
|
|||||||
from .pb import rpc_pb2
|
from .pb import rpc_pb2
|
||||||
from .pubsub_notifee import PubsubNotifee
|
from .pubsub_notifee import PubsubNotifee
|
||||||
from .subscription import TrioSubscriptionAPI
|
from .subscription import TrioSubscriptionAPI
|
||||||
from .validators import signature_validator
|
from .validators import PUBSUB_SIGNING_PREFIX, signature_validator
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
if TYPE_CHECKING:
|
||||||
from .abc import IPubsubRouter # noqa: F401
|
from .abc import IPubsubRouter # noqa: F401
|
||||||
@ -73,16 +75,23 @@ class Pubsub(IPubsub, Service):
|
|||||||
subscribed_topics_send: Dict[str, "trio.MemorySendChannel[rpc_pb2.Message]"]
|
subscribed_topics_send: Dict[str, "trio.MemorySendChannel[rpc_pb2.Message]"]
|
||||||
subscribed_topics_receive: Dict[str, "TrioSubscriptionAPI"]
|
subscribed_topics_receive: Dict[str, "TrioSubscriptionAPI"]
|
||||||
|
|
||||||
peer_topics: Dict[str, List[ID]]
|
peer_topics: Dict[str, Set[ID]]
|
||||||
peers: Dict[ID, INetStream]
|
peers: Dict[ID, INetStream]
|
||||||
|
|
||||||
topic_validators: Dict[str, TopicValidator]
|
topic_validators: Dict[str, TopicValidator]
|
||||||
|
|
||||||
# TODO: Be sure it is increased atomically everytime.
|
|
||||||
counter: int # uint64
|
counter: int # uint64
|
||||||
|
|
||||||
|
# Indicate if we should enforce signature verification
|
||||||
|
strict_signing: bool
|
||||||
|
sign_key: PrivateKey
|
||||||
|
|
||||||
def __init__(
|
def __init__(
|
||||||
self, host: IHost, router: "IPubsubRouter", cache_size: int = None
|
self,
|
||||||
|
host: IHost,
|
||||||
|
router: "IPubsubRouter",
|
||||||
|
cache_size: int = None,
|
||||||
|
strict_signing: bool = True,
|
||||||
) -> None:
|
) -> None:
|
||||||
"""
|
"""
|
||||||
Construct a new Pubsub object, which is responsible for handling all
|
Construct a new Pubsub object, which is responsible for handling all
|
||||||
@ -126,6 +135,12 @@ class Pubsub(IPubsub, Service):
|
|||||||
else:
|
else:
|
||||||
self.cache_size = cache_size
|
self.cache_size = cache_size
|
||||||
|
|
||||||
|
self.strict_signing = strict_signing
|
||||||
|
if strict_signing:
|
||||||
|
self.sign_key = self.host.get_private_key()
|
||||||
|
else:
|
||||||
|
self.sign_key = None
|
||||||
|
|
||||||
self.seen_messages = LRU(self.cache_size)
|
self.seen_messages = LRU(self.cache_size)
|
||||||
|
|
||||||
# Map of topics we are subscribed to blocking queues
|
# Map of topics we are subscribed to blocking queues
|
||||||
@ -142,7 +157,7 @@ class Pubsub(IPubsub, Service):
|
|||||||
# Map of topic to topic validator
|
# Map of topic to topic validator
|
||||||
self.topic_validators = {}
|
self.topic_validators = {}
|
||||||
|
|
||||||
self.counter = time.time_ns()
|
self.counter = int(time.time())
|
||||||
|
|
||||||
async def run(self) -> None:
|
async def run(self) -> None:
|
||||||
self.manager.run_daemon_task(self.handle_peer_queue)
|
self.manager.run_daemon_task(self.handle_peer_queue)
|
||||||
@ -239,8 +254,7 @@ class Pubsub(IPubsub, Service):
|
|||||||
|
|
||||||
:param topic: the topic to remove validator from
|
:param topic: the topic to remove validator from
|
||||||
"""
|
"""
|
||||||
if topic in self.topic_validators:
|
self.topic_validators.pop(topic, None)
|
||||||
del self.topic_validators[topic]
|
|
||||||
|
|
||||||
def get_msg_validators(self, msg: rpc_pb2.Message) -> Tuple[TopicValidator, ...]:
|
def get_msg_validators(self, msg: rpc_pb2.Message) -> Tuple[TopicValidator, ...]:
|
||||||
"""
|
"""
|
||||||
@ -282,24 +296,22 @@ class Pubsub(IPubsub, Service):
|
|||||||
logger.debug("fail to add new peer %s, error %s", peer_id, error)
|
logger.debug("fail to add new peer %s, error %s", peer_id, error)
|
||||||
return
|
return
|
||||||
|
|
||||||
self.peers[peer_id] = stream
|
|
||||||
|
|
||||||
# Send hello packet
|
# Send hello packet
|
||||||
hello = self.get_hello_packet()
|
hello = self.get_hello_packet()
|
||||||
try:
|
try:
|
||||||
await stream.write(encode_varint_prefixed(hello.SerializeToString()))
|
await stream.write(encode_varint_prefixed(hello.SerializeToString()))
|
||||||
except StreamClosed:
|
except StreamClosed:
|
||||||
logger.debug("Fail to add new peer %s: stream closed", peer_id)
|
logger.debug("Fail to add new peer %s: stream closed", peer_id)
|
||||||
del self.peers[peer_id]
|
|
||||||
return
|
return
|
||||||
# TODO: Check if the peer in black list.
|
# TODO: Check if the peer in black list.
|
||||||
try:
|
try:
|
||||||
self.router.add_peer(peer_id, stream.get_protocol())
|
self.router.add_peer(peer_id, stream.get_protocol())
|
||||||
except Exception as error:
|
except Exception as error:
|
||||||
logger.debug("fail to add new peer %s, error %s", peer_id, error)
|
logger.debug("fail to add new peer %s, error %s", peer_id, error)
|
||||||
del self.peers[peer_id]
|
|
||||||
return
|
return
|
||||||
|
|
||||||
|
self.peers[peer_id] = stream
|
||||||
|
|
||||||
logger.debug("added new peer %s", peer_id)
|
logger.debug("added new peer %s", peer_id)
|
||||||
|
|
||||||
def _handle_dead_peer(self, peer_id: ID) -> None:
|
def _handle_dead_peer(self, peer_id: ID) -> None:
|
||||||
@ -309,19 +321,16 @@ class Pubsub(IPubsub, Service):
|
|||||||
|
|
||||||
for topic in self.peer_topics:
|
for topic in self.peer_topics:
|
||||||
if peer_id in self.peer_topics[topic]:
|
if peer_id in self.peer_topics[topic]:
|
||||||
self.peer_topics[topic].remove(peer_id)
|
self.peer_topics[topic].discard(peer_id)
|
||||||
|
|
||||||
self.router.remove_peer(peer_id)
|
self.router.remove_peer(peer_id)
|
||||||
|
|
||||||
logger.debug("removed dead peer %s", peer_id)
|
logger.debug("removed dead peer %s", peer_id)
|
||||||
|
|
||||||
async def handle_peer_queue(self) -> None:
|
async def handle_peer_queue(self) -> None:
|
||||||
"""
|
"""Continuously read from peer queue and each time a new peer is found,
|
||||||
Continuously read from peer channel and each time a new peer is found,
|
open a stream to the peer using a supported pubsub protocol pubsub
|
||||||
open a stream to the peer using a supported pubsub protocol
|
protocols we support."""
|
||||||
TODO: Handle failure for when the peer does not support any of the
|
|
||||||
pubsub protocols we support
|
|
||||||
"""
|
|
||||||
async with self.peer_receive_channel:
|
async with self.peer_receive_channel:
|
||||||
while self.manager.is_running:
|
while self.manager.is_running:
|
||||||
peer_id: ID = await self.peer_receive_channel.receive()
|
peer_id: ID = await self.peer_receive_channel.receive()
|
||||||
@ -351,14 +360,14 @@ class Pubsub(IPubsub, Service):
|
|||||||
"""
|
"""
|
||||||
if sub_message.subscribe:
|
if sub_message.subscribe:
|
||||||
if sub_message.topicid not in self.peer_topics:
|
if sub_message.topicid not in self.peer_topics:
|
||||||
self.peer_topics[sub_message.topicid] = [origin_id]
|
self.peer_topics[sub_message.topicid] = set([origin_id])
|
||||||
elif origin_id not in self.peer_topics[sub_message.topicid]:
|
elif origin_id not in self.peer_topics[sub_message.topicid]:
|
||||||
# Add peer to topic
|
# Add peer to topic
|
||||||
self.peer_topics[sub_message.topicid].append(origin_id)
|
self.peer_topics[sub_message.topicid].add(origin_id)
|
||||||
else:
|
else:
|
||||||
if sub_message.topicid in self.peer_topics:
|
if sub_message.topicid in self.peer_topics:
|
||||||
if origin_id in self.peer_topics[sub_message.topicid]:
|
if origin_id in self.peer_topics[sub_message.topicid]:
|
||||||
self.peer_topics[sub_message.topicid].remove(origin_id)
|
self.peer_topics[sub_message.topicid].discard(origin_id)
|
||||||
|
|
||||||
# FIXME(mhchia): Change the function name?
|
# FIXME(mhchia): Change the function name?
|
||||||
async def handle_talk(self, publish_message: rpc_pb2.Message) -> None:
|
async def handle_talk(self, publish_message: rpc_pb2.Message) -> None:
|
||||||
@ -476,7 +485,13 @@ class Pubsub(IPubsub, Service):
|
|||||||
seqno=self._next_seqno(),
|
seqno=self._next_seqno(),
|
||||||
)
|
)
|
||||||
|
|
||||||
# TODO: Sign with our signing key
|
if self.strict_signing:
|
||||||
|
priv_key = self.sign_key
|
||||||
|
signature = priv_key.sign(
|
||||||
|
PUBSUB_SIGNING_PREFIX.encode() + msg.SerializeToString()
|
||||||
|
)
|
||||||
|
msg.key = self.host.get_public_key().serialize()
|
||||||
|
msg.signature = signature
|
||||||
|
|
||||||
await self.push_msg(self.my_id, msg)
|
await self.push_msg(self.my_id, msg)
|
||||||
|
|
||||||
@ -536,18 +551,17 @@ class Pubsub(IPubsub, Service):
|
|||||||
|
|
||||||
# TODO: Check if the `from` is in the blacklist. If yes, reject.
|
# TODO: Check if the `from` is in the blacklist. If yes, reject.
|
||||||
|
|
||||||
# TODO: Check if signing is required and if so signature should be attached.
|
|
||||||
|
|
||||||
# If the message is processed before, return(i.e., don't further process the message).
|
# If the message is processed before, return(i.e., don't further process the message).
|
||||||
if self._is_msg_seen(msg):
|
if self._is_msg_seen(msg):
|
||||||
return
|
return
|
||||||
|
|
||||||
# TODO: - Validate the message. If failed, reject it.
|
# Check if signing is required and if so validate the signature
|
||||||
# Validate the signature of the message
|
if self.strict_signing:
|
||||||
# FIXME: `signature_validator` is currently a stub.
|
# Validate the signature of the message
|
||||||
if not signature_validator(msg.key, msg.SerializeToString()):
|
if not signature_validator(msg):
|
||||||
logger.debug("Signature validation failed for msg: %s", msg)
|
logger.debug("Signature validation failed for msg: %s", msg)
|
||||||
return
|
return
|
||||||
|
|
||||||
# Validate the message with registered topic validators.
|
# Validate the message with registered topic validators.
|
||||||
# If the validation failed, return(i.e., don't further process the message).
|
# If the validation failed, return(i.e., don't further process the message).
|
||||||
try:
|
try:
|
||||||
|
|||||||
@ -1,10 +1,41 @@
|
|||||||
# FIXME: Replace the type of `pubkey` with a custom type `Pubkey`
|
import logging
|
||||||
def signature_validator(pubkey: bytes, msg: bytes) -> bool:
|
|
||||||
|
from libp2p.crypto.serialization import deserialize_public_key
|
||||||
|
from libp2p.peer.id import ID
|
||||||
|
|
||||||
|
from .pb import rpc_pb2
|
||||||
|
|
||||||
|
logger = logging.getLogger("libp2p.pubsub")
|
||||||
|
|
||||||
|
PUBSUB_SIGNING_PREFIX = "libp2p-pubsub:"
|
||||||
|
|
||||||
|
|
||||||
|
def signature_validator(msg: rpc_pb2.Message) -> bool:
|
||||||
"""
|
"""
|
||||||
Verify the message against the given public key.
|
Verify the message against the given public key.
|
||||||
|
|
||||||
:param pubkey: the public key which signs the message.
|
:param pubkey: the public key which signs the message.
|
||||||
:param msg: the message signed.
|
:param msg: the message signed.
|
||||||
"""
|
"""
|
||||||
# TODO: Implement the signature validation
|
# Check if signature is attached
|
||||||
return True
|
if msg.signature == b"":
|
||||||
|
logger.debug("Reject because no signature attached for msg: %s", msg)
|
||||||
|
return False
|
||||||
|
|
||||||
|
# Validate if message sender matches message signer,
|
||||||
|
# i.e., check if `msg.key` matches `msg.from_id`
|
||||||
|
msg_pubkey = deserialize_public_key(msg.key)
|
||||||
|
if ID.from_pubkey(msg_pubkey) != msg.from_id:
|
||||||
|
logger.debug(
|
||||||
|
"Reject because signing key does not match sender ID for msg: %s", msg
|
||||||
|
)
|
||||||
|
return False
|
||||||
|
# First, construct the original payload that's signed by 'msg.key'
|
||||||
|
msg_without_key_sig = rpc_pb2.Message(
|
||||||
|
data=msg.data, topicIDs=msg.topicIDs, from_id=msg.from_id, seqno=msg.seqno
|
||||||
|
)
|
||||||
|
payload = PUBSUB_SIGNING_PREFIX.encode() + msg_without_key_sig.SerializeToString()
|
||||||
|
try:
|
||||||
|
return msg_pubkey.verify(payload, msg.signature)
|
||||||
|
except Exception:
|
||||||
|
return False
|
||||||
|
|||||||
@ -1,21 +0,0 @@
|
|||||||
from typing import Iterable
|
|
||||||
|
|
||||||
from libp2p.peer.peerinfo import PeerInfo
|
|
||||||
from libp2p.routing.interfaces import IContentRouting
|
|
||||||
|
|
||||||
|
|
||||||
class KadmeliaContentRouter(IContentRouting):
|
|
||||||
def provide(self, cid: bytes, announce: bool = True) -> None:
|
|
||||||
"""
|
|
||||||
Provide adds the given cid to the content routing system.
|
|
||||||
|
|
||||||
If announce is True, it also announces it, otherwise it is just
|
|
||||||
kept in the local accounting of which objects are being
|
|
||||||
provided.
|
|
||||||
"""
|
|
||||||
# the DHT finds the closest peers to `key` using the `FIND_NODE` RPC
|
|
||||||
# then sends a `ADD_PROVIDER` RPC with its own `PeerInfo` to each of these peers.
|
|
||||||
|
|
||||||
def find_provider_iter(self, cid: bytes, count: int) -> Iterable[PeerInfo]:
|
|
||||||
"""Search for peers who are able to provide a given key returns an
|
|
||||||
iterator of peer.PeerInfo."""
|
|
||||||
@ -1,43 +0,0 @@
|
|||||||
import json
|
|
||||||
|
|
||||||
import multiaddr
|
|
||||||
|
|
||||||
from libp2p.kademlia.network import KademliaServer
|
|
||||||
from libp2p.peer.id import ID
|
|
||||||
from libp2p.peer.peerinfo import PeerInfo
|
|
||||||
from libp2p.routing.interfaces import IPeerRouting
|
|
||||||
|
|
||||||
|
|
||||||
class KadmeliaPeerRouter(IPeerRouting):
|
|
||||||
server: KademliaServer
|
|
||||||
|
|
||||||
def __init__(self, dht_server: KademliaServer) -> None:
|
|
||||||
self.server = dht_server
|
|
||||||
|
|
||||||
async def find_peer(self, peer_id: ID) -> PeerInfo:
|
|
||||||
"""
|
|
||||||
Find a specific peer.
|
|
||||||
|
|
||||||
:param peer_id: peer to search for
|
|
||||||
:return: PeerInfo of specified peer
|
|
||||||
"""
|
|
||||||
# switching peer_id to xor_id used by kademlia as node_id
|
|
||||||
xor_id = peer_id.xor_id
|
|
||||||
# ignore type for kad
|
|
||||||
value = await self.server.get(xor_id) # type: ignore
|
|
||||||
return (
|
|
||||||
peer_info_from_str(value) if value else None
|
|
||||||
) # TODO: should raise error if None?
|
|
||||||
|
|
||||||
|
|
||||||
def peer_info_to_str(peer_info: PeerInfo) -> str:
|
|
||||||
return json.dumps(
|
|
||||||
[peer_info.peer_id.to_string(), list(map(lambda a: str(a), peer_info.addrs))]
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def peer_info_from_str(string: str) -> PeerInfo:
|
|
||||||
peer_id, raw_addrs = json.loads(string)
|
|
||||||
return PeerInfo(
|
|
||||||
ID.from_base58(peer_id), list(map(lambda a: multiaddr.Multiaddr(a), raw_addrs))
|
|
||||||
)
|
|
||||||
@ -50,8 +50,7 @@ class SecurityMultistream(ABC):
|
|||||||
:param transport: the corresponding transportation to the ``protocol``.
|
:param transport: the corresponding transportation to the ``protocol``.
|
||||||
"""
|
"""
|
||||||
# If protocol is already added before, remove it and add it again.
|
# If protocol is already added before, remove it and add it again.
|
||||||
if protocol in self.transports:
|
self.transports.pop(protocol, None)
|
||||||
del self.transports[protocol]
|
|
||||||
self.transports[protocol] = transport
|
self.transports[protocol] = transport
|
||||||
# Note: None is added as the handler for the given protocol since
|
# Note: None is added as the handler for the given protocol since
|
||||||
# we only care about selecting the protocol, not any handler function
|
# we only care about selecting the protocol, not any handler function
|
||||||
|
|||||||
@ -292,8 +292,7 @@ class Mplex(IMuxedConn, Service):
|
|||||||
# the entry of this stream, to avoid others from accessing it.
|
# the entry of this stream, to avoid others from accessing it.
|
||||||
if is_local_closed:
|
if is_local_closed:
|
||||||
async with self.streams_lock:
|
async with self.streams_lock:
|
||||||
if stream_id in self.streams:
|
self.streams.pop(stream_id, None)
|
||||||
del self.streams[stream_id]
|
|
||||||
|
|
||||||
async def _handle_reset(self, stream_id: StreamID) -> None:
|
async def _handle_reset(self, stream_id: StreamID) -> None:
|
||||||
async with self.streams_lock:
|
async with self.streams_lock:
|
||||||
@ -311,9 +310,8 @@ class Mplex(IMuxedConn, Service):
|
|||||||
if not stream.event_local_closed.is_set():
|
if not stream.event_local_closed.is_set():
|
||||||
stream.event_local_closed.set()
|
stream.event_local_closed.set()
|
||||||
async with self.streams_lock:
|
async with self.streams_lock:
|
||||||
if stream_id in self.streams:
|
self.streams.pop(stream_id, None)
|
||||||
del self.streams[stream_id]
|
self.streams_msg_channels.pop(stream_id, None)
|
||||||
del self.streams_msg_channels[stream_id]
|
|
||||||
|
|
||||||
async def _cleanup(self) -> None:
|
async def _cleanup(self) -> None:
|
||||||
if not self.event_shutting_down.is_set():
|
if not self.event_shutting_down.is_set():
|
||||||
|
|||||||
@ -170,8 +170,7 @@ class MplexStream(IMuxedStream):
|
|||||||
if _is_remote_closed:
|
if _is_remote_closed:
|
||||||
# Both sides are closed, we can safely remove the buffer from the dict.
|
# Both sides are closed, we can safely remove the buffer from the dict.
|
||||||
async with self.muxed_conn.streams_lock:
|
async with self.muxed_conn.streams_lock:
|
||||||
if self.stream_id in self.muxed_conn.streams:
|
self.muxed_conn.streams.pop(self.stream_id, None)
|
||||||
del self.muxed_conn.streams[self.stream_id]
|
|
||||||
|
|
||||||
async def reset(self) -> None:
|
async def reset(self) -> None:
|
||||||
"""closes both ends of the stream tells this remote side to hang up."""
|
"""closes both ends of the stream tells this remote side to hang up."""
|
||||||
@ -199,11 +198,8 @@ class MplexStream(IMuxedStream):
|
|||||||
await self.incoming_data_channel.aclose()
|
await self.incoming_data_channel.aclose()
|
||||||
|
|
||||||
async with self.muxed_conn.streams_lock:
|
async with self.muxed_conn.streams_lock:
|
||||||
if (
|
if self.muxed_conn.streams is not None:
|
||||||
self.muxed_conn.streams is not None
|
self.muxed_conn.streams.pop(self.stream_id, None)
|
||||||
and self.stream_id in self.muxed_conn.streams
|
|
||||||
):
|
|
||||||
del self.muxed_conn.streams[self.stream_id]
|
|
||||||
|
|
||||||
# TODO deadline not in use
|
# TODO deadline not in use
|
||||||
def set_deadline(self, ttl: int) -> bool:
|
def set_deadline(self, ttl: int) -> bool:
|
||||||
|
|||||||
@ -44,8 +44,7 @@ class MuxerMultistream:
|
|||||||
:param transport: the corresponding transportation to the ``protocol``.
|
:param transport: the corresponding transportation to the ``protocol``.
|
||||||
"""
|
"""
|
||||||
# If protocol is already added before, remove it and add it again.
|
# If protocol is already added before, remove it and add it again.
|
||||||
if protocol in self.transports:
|
self.transports.pop(protocol, None)
|
||||||
del self.transports[protocol]
|
|
||||||
self.transports[protocol] = transport
|
self.transports[protocol] = transport
|
||||||
self.multiselect.add_handler(protocol, None)
|
self.multiselect.add_handler(protocol, None)
|
||||||
|
|
||||||
|
|||||||
@ -24,6 +24,7 @@ class GossipsubParams(NamedTuple):
|
|||||||
time_to_live: int = 30
|
time_to_live: int = 30
|
||||||
gossip_window: int = 3
|
gossip_window: int = 3
|
||||||
gossip_history: int = 5
|
gossip_history: int = 5
|
||||||
|
heartbeat_initial_delay: float = 0.1
|
||||||
heartbeat_interval: float = 0.5
|
heartbeat_interval: float = 0.5
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@ -1,14 +1,18 @@
|
|||||||
from contextlib import AsyncExitStack, asynccontextmanager
|
from contextlib import AsyncExitStack
|
||||||
from typing import Any, AsyncIterator, Dict, Sequence, Tuple, cast
|
from typing import Any, AsyncIterator, Dict, List, Sequence, Tuple, cast
|
||||||
|
|
||||||
|
# NOTE: import ``asynccontextmanager`` from ``contextlib`` when support for python 3.6 is dropped.
|
||||||
|
from async_generator import asynccontextmanager
|
||||||
from async_service import background_trio_service
|
from async_service import background_trio_service
|
||||||
import factory
|
import factory
|
||||||
|
from multiaddr import Multiaddr
|
||||||
import trio
|
import trio
|
||||||
|
|
||||||
from libp2p import generate_new_rsa_identity, generate_peer_id_from
|
from libp2p import generate_new_rsa_identity, generate_peer_id_from
|
||||||
from libp2p.crypto.keys import KeyPair
|
from libp2p.crypto.keys import KeyPair
|
||||||
from libp2p.host.basic_host import BasicHost
|
from libp2p.host.basic_host import BasicHost
|
||||||
from libp2p.host.host_interface import IHost
|
from libp2p.host.host_interface import IHost
|
||||||
|
from libp2p.host.routed_host import RoutedHost
|
||||||
from libp2p.io.abc import ReadWriteCloser
|
from libp2p.io.abc import ReadWriteCloser
|
||||||
from libp2p.network.connection.raw_connection import RawConnection
|
from libp2p.network.connection.raw_connection import RawConnection
|
||||||
from libp2p.network.connection.raw_connection_interface import IRawConnection
|
from libp2p.network.connection.raw_connection_interface import IRawConnection
|
||||||
@ -16,11 +20,13 @@ from libp2p.network.connection.swarm_connection import SwarmConn
|
|||||||
from libp2p.network.stream.net_stream_interface import INetStream
|
from libp2p.network.stream.net_stream_interface import INetStream
|
||||||
from libp2p.network.swarm import Swarm
|
from libp2p.network.swarm import Swarm
|
||||||
from libp2p.peer.id import ID
|
from libp2p.peer.id import ID
|
||||||
|
from libp2p.peer.peerinfo import PeerInfo
|
||||||
from libp2p.peer.peerstore import PeerStore
|
from libp2p.peer.peerstore import PeerStore
|
||||||
from libp2p.pubsub.abc import IPubsubRouter
|
from libp2p.pubsub.abc import IPubsubRouter
|
||||||
from libp2p.pubsub.floodsub import FloodSub
|
from libp2p.pubsub.floodsub import FloodSub
|
||||||
from libp2p.pubsub.gossipsub import GossipSub
|
from libp2p.pubsub.gossipsub import GossipSub
|
||||||
from libp2p.pubsub.pubsub import Pubsub
|
from libp2p.pubsub.pubsub import Pubsub
|
||||||
|
from libp2p.routing.interfaces import IPeerRouting
|
||||||
from libp2p.security.base_transport import BaseSecureTransport
|
from libp2p.security.base_transport import BaseSecureTransport
|
||||||
from libp2p.security.insecure.transport import PLAINTEXT_PROTOCOL_ID, InsecureTransport
|
from libp2p.security.insecure.transport import PLAINTEXT_PROTOCOL_ID, InsecureTransport
|
||||||
import libp2p.security.secio.transport as secio
|
import libp2p.security.secio.transport as secio
|
||||||
@ -45,6 +51,12 @@ class IDFactory(factory.Factory):
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def initialize_peerstore_with_our_keypair(self_id: ID, key_pair: KeyPair) -> PeerStore:
|
||||||
|
peer_store = PeerStore()
|
||||||
|
peer_store.add_key_pair(self_id, key_pair)
|
||||||
|
return peer_store
|
||||||
|
|
||||||
|
|
||||||
def security_transport_factory(
|
def security_transport_factory(
|
||||||
is_secure: bool, key_pair: KeyPair
|
is_secure: bool, key_pair: KeyPair
|
||||||
) -> Dict[TProtocol, BaseSecureTransport]:
|
) -> Dict[TProtocol, BaseSecureTransport]:
|
||||||
@ -60,10 +72,12 @@ async def raw_conn_factory(
|
|||||||
) -> AsyncIterator[Tuple[IRawConnection, IRawConnection]]:
|
) -> AsyncIterator[Tuple[IRawConnection, IRawConnection]]:
|
||||||
conn_0 = None
|
conn_0 = None
|
||||||
conn_1 = None
|
conn_1 = None
|
||||||
|
event = trio.Event()
|
||||||
|
|
||||||
async def tcp_stream_handler(stream: ReadWriteCloser) -> None:
|
async def tcp_stream_handler(stream: ReadWriteCloser) -> None:
|
||||||
nonlocal conn_1
|
nonlocal conn_1
|
||||||
conn_1 = RawConnection(stream, initiator=False)
|
conn_1 = RawConnection(stream, initiator=False)
|
||||||
|
event.set()
|
||||||
await trio.sleep_forever()
|
await trio.sleep_forever()
|
||||||
|
|
||||||
tcp_transport = TCP()
|
tcp_transport = TCP()
|
||||||
@ -71,6 +85,7 @@ async def raw_conn_factory(
|
|||||||
await listener.listen(LISTEN_MADDR, nursery)
|
await listener.listen(LISTEN_MADDR, nursery)
|
||||||
listening_maddr = listener.get_addrs()[0]
|
listening_maddr = listener.get_addrs()[0]
|
||||||
conn_0 = await tcp_transport.dial(listening_maddr)
|
conn_0 = await tcp_transport.dial(listening_maddr)
|
||||||
|
await event.wait()
|
||||||
yield conn_0, conn_1
|
yield conn_0, conn_1
|
||||||
|
|
||||||
|
|
||||||
@ -84,7 +99,9 @@ class SwarmFactory(factory.Factory):
|
|||||||
muxer_opt = {MPLEX_PROTOCOL_ID: Mplex}
|
muxer_opt = {MPLEX_PROTOCOL_ID: Mplex}
|
||||||
|
|
||||||
peer_id = factory.LazyAttribute(lambda o: generate_peer_id_from(o.key_pair))
|
peer_id = factory.LazyAttribute(lambda o: generate_peer_id_from(o.key_pair))
|
||||||
peerstore = factory.LazyFunction(PeerStore)
|
peerstore = factory.LazyAttribute(
|
||||||
|
lambda o: initialize_peerstore_with_our_keypair(o.peer_id, o.key_pair)
|
||||||
|
)
|
||||||
upgrader = factory.LazyAttribute(
|
upgrader = factory.LazyAttribute(
|
||||||
lambda o: TransportUpgrader(
|
lambda o: TransportUpgrader(
|
||||||
security_transport_factory(o.is_secure, o.key_pair), o.muxer_opt
|
security_transport_factory(o.is_secure, o.key_pair), o.muxer_opt
|
||||||
@ -133,31 +150,59 @@ class HostFactory(factory.Factory):
|
|||||||
is_secure = False
|
is_secure = False
|
||||||
key_pair = factory.LazyFunction(generate_new_rsa_identity)
|
key_pair = factory.LazyFunction(generate_new_rsa_identity)
|
||||||
|
|
||||||
public_key = factory.LazyAttribute(lambda o: o.key_pair.public_key)
|
network = factory.LazyAttribute(lambda o: SwarmFactory(is_secure=o.is_secure))
|
||||||
network = factory.LazyAttribute(
|
|
||||||
lambda o: SwarmFactory(is_secure=o.is_secure, key_pair=o.key_pair)
|
|
||||||
)
|
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
@asynccontextmanager
|
@asynccontextmanager
|
||||||
async def create_batch_and_listen(
|
async def create_batch_and_listen(
|
||||||
cls, is_secure: bool, number: int
|
cls, is_secure: bool, number: int
|
||||||
) -> AsyncIterator[Tuple[BasicHost, ...]]:
|
) -> AsyncIterator[Tuple[BasicHost, ...]]:
|
||||||
key_pairs = [generate_new_rsa_identity() for _ in range(number)]
|
async with SwarmFactory.create_batch_and_listen(is_secure, number) as swarms:
|
||||||
async with AsyncExitStack() as stack:
|
hosts = tuple(BasicHost(swarm) for swarm in swarms)
|
||||||
swarms = [
|
|
||||||
await stack.enter_async_context(
|
|
||||||
SwarmFactory.create_and_listen(is_secure, key_pair)
|
|
||||||
)
|
|
||||||
for key_pair in key_pairs
|
|
||||||
]
|
|
||||||
hosts = tuple(
|
|
||||||
BasicHost(key_pair.public_key, swarm)
|
|
||||||
for key_pair, swarm in zip(key_pairs, swarms)
|
|
||||||
)
|
|
||||||
yield hosts
|
yield hosts
|
||||||
|
|
||||||
|
|
||||||
|
class DummyRouter(IPeerRouting):
|
||||||
|
_routing_table: Dict[ID, PeerInfo]
|
||||||
|
|
||||||
|
def __init__(self) -> None:
|
||||||
|
self._routing_table = dict()
|
||||||
|
|
||||||
|
def _add_peer(self, peer_id: ID, addrs: List[Multiaddr]) -> None:
|
||||||
|
self._routing_table[peer_id] = PeerInfo(peer_id, addrs)
|
||||||
|
|
||||||
|
async def find_peer(self, peer_id: ID) -> PeerInfo:
|
||||||
|
await trio.hazmat.checkpoint()
|
||||||
|
return self._routing_table.get(peer_id, None)
|
||||||
|
|
||||||
|
|
||||||
|
class RoutedHostFactory(factory.Factory):
|
||||||
|
class Meta:
|
||||||
|
model = RoutedHost
|
||||||
|
|
||||||
|
class Params:
|
||||||
|
is_secure = False
|
||||||
|
|
||||||
|
network = factory.LazyAttribute(
|
||||||
|
lambda o: HostFactory(is_secure=o.is_secure).get_network()
|
||||||
|
)
|
||||||
|
router = factory.LazyFunction(DummyRouter)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
@asynccontextmanager
|
||||||
|
async def create_batch_and_listen(
|
||||||
|
cls, is_secure: bool, number: int
|
||||||
|
) -> AsyncIterator[Tuple[RoutedHost, ...]]:
|
||||||
|
routing_table = DummyRouter()
|
||||||
|
async with HostFactory.create_batch_and_listen(is_secure, number) as hosts:
|
||||||
|
for host in hosts:
|
||||||
|
routing_table._add_peer(host.get_id(), host.get_addrs())
|
||||||
|
routed_hosts = tuple(
|
||||||
|
RoutedHost(host.get_network(), routing_table) for host in hosts
|
||||||
|
)
|
||||||
|
yield routed_hosts
|
||||||
|
|
||||||
|
|
||||||
class FloodsubFactory(factory.Factory):
|
class FloodsubFactory(factory.Factory):
|
||||||
class Meta:
|
class Meta:
|
||||||
model = FloodSub
|
model = FloodSub
|
||||||
@ -176,6 +221,7 @@ class GossipsubFactory(factory.Factory):
|
|||||||
time_to_live = GOSSIPSUB_PARAMS.time_to_live
|
time_to_live = GOSSIPSUB_PARAMS.time_to_live
|
||||||
gossip_window = GOSSIPSUB_PARAMS.gossip_window
|
gossip_window = GOSSIPSUB_PARAMS.gossip_window
|
||||||
gossip_history = GOSSIPSUB_PARAMS.gossip_history
|
gossip_history = GOSSIPSUB_PARAMS.gossip_history
|
||||||
|
heartbeat_initial_delay = GOSSIPSUB_PARAMS.heartbeat_initial_delay
|
||||||
heartbeat_interval = GOSSIPSUB_PARAMS.heartbeat_interval
|
heartbeat_interval = GOSSIPSUB_PARAMS.heartbeat_interval
|
||||||
|
|
||||||
|
|
||||||
@ -186,13 +232,19 @@ class PubsubFactory(factory.Factory):
|
|||||||
host = factory.SubFactory(HostFactory)
|
host = factory.SubFactory(HostFactory)
|
||||||
router = None
|
router = None
|
||||||
cache_size = None
|
cache_size = None
|
||||||
|
strict_signing = False
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
@asynccontextmanager
|
@asynccontextmanager
|
||||||
async def create_and_start(
|
async def create_and_start(
|
||||||
cls, host: IHost, router: IPubsubRouter, cache_size: int
|
cls, host: IHost, router: IPubsubRouter, cache_size: int, strict_signing: bool
|
||||||
) -> AsyncIterator[Pubsub]:
|
) -> AsyncIterator[Pubsub]:
|
||||||
pubsub = PubsubFactory(host=host, router=router, cache_size=cache_size)
|
pubsub = PubsubFactory(
|
||||||
|
host=host,
|
||||||
|
router=router,
|
||||||
|
cache_size=cache_size,
|
||||||
|
strict_signing=strict_signing,
|
||||||
|
)
|
||||||
async with background_trio_service(pubsub):
|
async with background_trio_service(pubsub):
|
||||||
yield pubsub
|
yield pubsub
|
||||||
|
|
||||||
@ -204,13 +256,14 @@ class PubsubFactory(factory.Factory):
|
|||||||
routers: Sequence[IPubsubRouter],
|
routers: Sequence[IPubsubRouter],
|
||||||
is_secure: bool = False,
|
is_secure: bool = False,
|
||||||
cache_size: int = None,
|
cache_size: int = None,
|
||||||
|
strict_signing: bool = False,
|
||||||
) -> AsyncIterator[Tuple[Pubsub, ...]]:
|
) -> AsyncIterator[Tuple[Pubsub, ...]]:
|
||||||
async with HostFactory.create_batch_and_listen(is_secure, number) as hosts:
|
async with HostFactory.create_batch_and_listen(is_secure, number) as hosts:
|
||||||
# Pubsubs should exit before hosts
|
# Pubsubs should exit before hosts
|
||||||
async with AsyncExitStack() as stack:
|
async with AsyncExitStack() as stack:
|
||||||
pubsubs = [
|
pubsubs = [
|
||||||
await stack.enter_async_context(
|
await stack.enter_async_context(
|
||||||
cls.create_and_start(host, router, cache_size)
|
cls.create_and_start(host, router, cache_size, strict_signing)
|
||||||
)
|
)
|
||||||
for host, router in zip(hosts, routers)
|
for host, router in zip(hosts, routers)
|
||||||
]
|
]
|
||||||
@ -223,6 +276,7 @@ class PubsubFactory(factory.Factory):
|
|||||||
number: int,
|
number: int,
|
||||||
is_secure: bool = False,
|
is_secure: bool = False,
|
||||||
cache_size: int = None,
|
cache_size: int = None,
|
||||||
|
strict_signing: bool = False,
|
||||||
protocols: Sequence[TProtocol] = None,
|
protocols: Sequence[TProtocol] = None,
|
||||||
) -> AsyncIterator[Tuple[Pubsub, ...]]:
|
) -> AsyncIterator[Tuple[Pubsub, ...]]:
|
||||||
if protocols is not None:
|
if protocols is not None:
|
||||||
@ -230,7 +284,7 @@ class PubsubFactory(factory.Factory):
|
|||||||
else:
|
else:
|
||||||
floodsubs = FloodsubFactory.create_batch(number)
|
floodsubs = FloodsubFactory.create_batch(number)
|
||||||
async with cls._create_batch_with_router(
|
async with cls._create_batch_with_router(
|
||||||
number, floodsubs, is_secure, cache_size
|
number, floodsubs, is_secure, cache_size, strict_signing
|
||||||
) as pubsubs:
|
) as pubsubs:
|
||||||
yield pubsubs
|
yield pubsubs
|
||||||
|
|
||||||
@ -242,6 +296,7 @@ class PubsubFactory(factory.Factory):
|
|||||||
*,
|
*,
|
||||||
is_secure: bool = False,
|
is_secure: bool = False,
|
||||||
cache_size: int = None,
|
cache_size: int = None,
|
||||||
|
strict_signing: bool = False,
|
||||||
protocols: Sequence[TProtocol] = None,
|
protocols: Sequence[TProtocol] = None,
|
||||||
degree: int = GOSSIPSUB_PARAMS.degree,
|
degree: int = GOSSIPSUB_PARAMS.degree,
|
||||||
degree_low: int = GOSSIPSUB_PARAMS.degree_low,
|
degree_low: int = GOSSIPSUB_PARAMS.degree_low,
|
||||||
@ -250,6 +305,7 @@ class PubsubFactory(factory.Factory):
|
|||||||
gossip_window: int = GOSSIPSUB_PARAMS.gossip_window,
|
gossip_window: int = GOSSIPSUB_PARAMS.gossip_window,
|
||||||
gossip_history: int = GOSSIPSUB_PARAMS.gossip_history,
|
gossip_history: int = GOSSIPSUB_PARAMS.gossip_history,
|
||||||
heartbeat_interval: float = GOSSIPSUB_PARAMS.heartbeat_interval,
|
heartbeat_interval: float = GOSSIPSUB_PARAMS.heartbeat_interval,
|
||||||
|
heartbeat_initial_delay: float = GOSSIPSUB_PARAMS.heartbeat_initial_delay,
|
||||||
) -> AsyncIterator[Tuple[Pubsub, ...]]:
|
) -> AsyncIterator[Tuple[Pubsub, ...]]:
|
||||||
if protocols is not None:
|
if protocols is not None:
|
||||||
gossipsubs = GossipsubFactory.create_batch(
|
gossipsubs = GossipsubFactory.create_batch(
|
||||||
@ -274,7 +330,7 @@ class PubsubFactory(factory.Factory):
|
|||||||
)
|
)
|
||||||
|
|
||||||
async with cls._create_batch_with_router(
|
async with cls._create_batch_with_router(
|
||||||
number, gossipsubs, is_secure, cache_size
|
number, gossipsubs, is_secure, cache_size, strict_signing
|
||||||
) as pubsubs:
|
) as pubsubs:
|
||||||
async with AsyncExitStack() as stack:
|
async with AsyncExitStack() as stack:
|
||||||
for router in gossipsubs:
|
for router in gossipsubs:
|
||||||
|
|||||||
@ -153,31 +153,34 @@ floodsub_protocol_pytest_params = [
|
|||||||
|
|
||||||
async def perform_test_from_obj(obj, pubsub_factory) -> None:
|
async def perform_test_from_obj(obj, pubsub_factory) -> None:
|
||||||
"""
|
"""
|
||||||
Perform pubsub tests from a test obj.
|
Perform pubsub tests from a test object, which is composed as follows:
|
||||||
test obj are composed as follows:
|
|
||||||
|
|
||||||
{
|
.. code-block:: python
|
||||||
"supported_protocols": ["supported/protocol/1.0.0",...],
|
|
||||||
"adj_list": {
|
{
|
||||||
"node1": ["neighbor1_of_node1", "neighbor2_of_node1", ...],
|
"supported_protocols": ["supported/protocol/1.0.0",...],
|
||||||
"node2": ["neighbor1_of_node2", "neighbor2_of_node2", ...],
|
"adj_list": {
|
||||||
...
|
"node1": ["neighbor1_of_node1", "neighbor2_of_node1", ...],
|
||||||
},
|
"node2": ["neighbor1_of_node2", "neighbor2_of_node2", ...],
|
||||||
"topic_map": {
|
...
|
||||||
"topic1": ["node1_subscribed_to_topic1", "node2_subscribed_to_topic1", ...]
|
|
||||||
},
|
|
||||||
"messages": [
|
|
||||||
{
|
|
||||||
"topics": ["topic1_for_message", "topic2_for_message", ...],
|
|
||||||
"data": b"some contents of the message (newlines are not supported)",
|
|
||||||
"node_id": "message sender node id"
|
|
||||||
},
|
},
|
||||||
...
|
"topic_map": {
|
||||||
]
|
"topic1": ["node1_subscribed_to_topic1", "node2_subscribed_to_topic1", ...]
|
||||||
}
|
},
|
||||||
NOTE: In adj_list, for any neighbors A and B, only list B as a neighbor of A
|
"messages": [
|
||||||
or B as a neighbor of A once. Do NOT list both A: ["B"] and B:["A"] as the behavior
|
{
|
||||||
is undefined (even if it may work)
|
"topics": ["topic1_for_message", "topic2_for_message", ...],
|
||||||
|
"data": b"some contents of the message (newlines are not supported)",
|
||||||
|
"node_id": "message sender node id"
|
||||||
|
},
|
||||||
|
...
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
.. note::
|
||||||
|
In adj_list, for any neighbors A and B, only list B as a neighbor of A
|
||||||
|
or B as a neighbor of A once. Do NOT list both A: ["B"] and B:["A"] as the behavior
|
||||||
|
is undefined (even if it may work)
|
||||||
"""
|
"""
|
||||||
|
|
||||||
# Step 1) Create graph
|
# Step 1) Create graph
|
||||||
|
|||||||
@ -39,6 +39,3 @@ def create_echo_stream_handler(
|
|||||||
await stream.write(resp.encode())
|
await stream.write(resp.encode())
|
||||||
|
|
||||||
return echo_stream_handler
|
return echo_stream_handler
|
||||||
|
|
||||||
|
|
||||||
# TODO: Service `external_api`
|
|
||||||
|
|||||||
@ -8,6 +8,7 @@ from trio_typing import TaskStatus
|
|||||||
from libp2p.io.trio import TrioTCPStream
|
from libp2p.io.trio import TrioTCPStream
|
||||||
from libp2p.network.connection.raw_connection import RawConnection
|
from libp2p.network.connection.raw_connection import RawConnection
|
||||||
from libp2p.network.connection.raw_connection_interface import IRawConnection
|
from libp2p.network.connection.raw_connection_interface import IRawConnection
|
||||||
|
from libp2p.transport.exceptions import OpenConnectionError
|
||||||
from libp2p.transport.listener_interface import IListener
|
from libp2p.transport.listener_interface import IListener
|
||||||
from libp2p.transport.transport_interface import ITransport
|
from libp2p.transport.transport_interface import ITransport
|
||||||
from libp2p.transport.typing import THandler
|
from libp2p.transport.typing import THandler
|
||||||
@ -80,7 +81,10 @@ class TCP(ITransport):
|
|||||||
self.host = maddr.value_for_protocol("ip4")
|
self.host = maddr.value_for_protocol("ip4")
|
||||||
self.port = int(maddr.value_for_protocol("tcp"))
|
self.port = int(maddr.value_for_protocol("tcp"))
|
||||||
|
|
||||||
stream = await trio.open_tcp_stream(self.host, self.port)
|
try:
|
||||||
|
stream = await trio.open_tcp_stream(self.host, self.port)
|
||||||
|
except OSError as error:
|
||||||
|
raise OpenConnectionError from error
|
||||||
read_write_closer = TrioTCPStream(stream)
|
read_write_closer = TrioTCPStream(stream)
|
||||||
|
|
||||||
return RawConnection(read_write_closer, True)
|
return RawConnection(read_write_closer, True)
|
||||||
|
|||||||
15
mypy.ini
15
mypy.ini
@ -1,16 +1,17 @@
|
|||||||
[mypy]
|
[mypy]
|
||||||
warn_unused_ignores = True
|
|
||||||
ignore_missing_imports = True
|
|
||||||
strict_optional = False
|
|
||||||
check_untyped_defs = True
|
check_untyped_defs = True
|
||||||
disallow_incomplete_defs = True
|
disallow_incomplete_defs = True
|
||||||
disallow_untyped_defs = True
|
disallow_untyped_defs = True
|
||||||
disallow_any_generics = True
|
disallow_any_generics = True
|
||||||
disallow_untyped_calls = True
|
disallow_untyped_calls = True
|
||||||
|
disallow_untyped_decorators = True
|
||||||
|
disallow_subclassing_any = False
|
||||||
|
ignore_missing_imports = True
|
||||||
|
strict_optional = False
|
||||||
|
warn_unused_ignores = True
|
||||||
|
strict_equality = True
|
||||||
warn_redundant_casts = True
|
warn_redundant_casts = True
|
||||||
|
warn_return_any = False
|
||||||
warn_unused_configs = True
|
warn_unused_configs = True
|
||||||
warn_unreachable = True
|
warn_unreachable = True
|
||||||
strict_equality = True
|
|
||||||
|
|
||||||
[mypy-libp2p.kademlia.*]
|
|
||||||
ignore_errors = True
|
|
||||||
|
|||||||
1
newsfragments/387.bugfix.rst
Normal file
1
newsfragments/387.bugfix.rst
Normal file
@ -0,0 +1 @@
|
|||||||
|
Store peer ids in ``set`` instead of ``list`` and check if peer id exists in ``dict`` before accessing to prevent ``KeyError``.
|
||||||
27
newsfragments/README.md
Normal file
27
newsfragments/README.md
Normal file
@ -0,0 +1,27 @@
|
|||||||
|
This directory collects "newsfragments": short files that each contain
|
||||||
|
a snippet of ReST-formatted text that will be added to the next
|
||||||
|
release notes. This should be a description of aspects of the change
|
||||||
|
(if any) that are relevant to users. (This contrasts with the
|
||||||
|
commit message and PR description, which are a description of the change as
|
||||||
|
relevant to people working on the code itself.)
|
||||||
|
|
||||||
|
Each file should be named like `<ISSUE>.<TYPE>.rst`, where
|
||||||
|
`<ISSUE>` is an issue numbers, and `<TYPE>` is one of:
|
||||||
|
|
||||||
|
* `feature`
|
||||||
|
* `bugfix`
|
||||||
|
* `performance`
|
||||||
|
* `doc`
|
||||||
|
* `internal`
|
||||||
|
* `removal`
|
||||||
|
* `misc`
|
||||||
|
|
||||||
|
So for example: `123.feature.rst`, `456.bugfix.rst`
|
||||||
|
|
||||||
|
If the PR fixes an issue, use that number here. If there is no issue,
|
||||||
|
then open up the PR first and use the PR number for the newsfragment.
|
||||||
|
|
||||||
|
Note that the `towncrier` tool will automatically
|
||||||
|
reflow your text, so don't try to do any fancy formatting. Run
|
||||||
|
`towncrier --draft` to get a preview of what the release notes entry
|
||||||
|
will look like in the final release notes.
|
||||||
43
newsfragments/validate_files.py
Executable file
43
newsfragments/validate_files.py
Executable file
@ -0,0 +1,43 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
|
||||||
|
# Towncrier silently ignores files that do not match the expected ending.
|
||||||
|
# We use this script to ensure we catch these as errors in CI.
|
||||||
|
|
||||||
|
import os
|
||||||
|
import pathlib
|
||||||
|
import sys
|
||||||
|
|
||||||
|
ALLOWED_EXTENSIONS = {
|
||||||
|
'.bugfix.rst',
|
||||||
|
'.doc.rst',
|
||||||
|
'.feature.rst',
|
||||||
|
'.internal.rst',
|
||||||
|
'.misc.rst',
|
||||||
|
'.performance.rst',
|
||||||
|
'.removal.rst',
|
||||||
|
}
|
||||||
|
|
||||||
|
ALLOWED_FILES = {
|
||||||
|
'validate_files.py',
|
||||||
|
'README.md',
|
||||||
|
}
|
||||||
|
|
||||||
|
THIS_DIR = pathlib.Path(__file__).parent
|
||||||
|
|
||||||
|
num_args = len(sys.argv) - 1
|
||||||
|
assert num_args in {0, 1}
|
||||||
|
if num_args == 1:
|
||||||
|
assert sys.argv[1] in ('is-empty', )
|
||||||
|
|
||||||
|
for fragment_file in THIS_DIR.iterdir():
|
||||||
|
|
||||||
|
if fragment_file.name in ALLOWED_FILES:
|
||||||
|
continue
|
||||||
|
elif num_args == 0:
|
||||||
|
full_extension = "".join(fragment_file.suffixes)
|
||||||
|
if full_extension not in ALLOWED_EXTENSIONS:
|
||||||
|
raise Exception(f"Unexpected file: {fragment_file}")
|
||||||
|
elif sys.argv[1] == 'is-empty':
|
||||||
|
raise Exception(f"Unexpected file: {fragment_file}")
|
||||||
|
else:
|
||||||
|
raise RuntimeError("Strange: arguments {sys.argv} were validated, but not found")
|
||||||
@ -1,3 +1,47 @@
|
|||||||
|
[tool.towncrier]
|
||||||
|
# Read https://github.com/libp2p/py-libp2p/newsfragments/README.md for instructions
|
||||||
|
package = "libp2p"
|
||||||
|
filename = "docs/release_notes.rst"
|
||||||
|
directory = "newsfragments"
|
||||||
|
underlines = ["-", "~", "^"]
|
||||||
|
title_format = "libp2p v{version} ({project_date})"
|
||||||
|
issue_format = "`#{issue} <https://github.com/libp2p/py-libp2p/issues/{issue}>`__"
|
||||||
|
|
||||||
|
[[tool.towncrier.type]]
|
||||||
|
directory = "feature"
|
||||||
|
name = "Features"
|
||||||
|
showcontent = true
|
||||||
|
|
||||||
|
[[tool.towncrier.type]]
|
||||||
|
directory = "bugfix"
|
||||||
|
name = "Bugfixes"
|
||||||
|
showcontent = true
|
||||||
|
|
||||||
|
[[tool.towncrier.type]]
|
||||||
|
directory = "performance"
|
||||||
|
name = "Performance improvements"
|
||||||
|
showcontent = true
|
||||||
|
|
||||||
|
[[tool.towncrier.type]]
|
||||||
|
directory = "doc"
|
||||||
|
name = "Improved Documentation"
|
||||||
|
showcontent = true
|
||||||
|
|
||||||
|
[[tool.towncrier.type]]
|
||||||
|
directory = "removal"
|
||||||
|
name = "Deprecations and Removals"
|
||||||
|
showcontent = true
|
||||||
|
|
||||||
|
[[tool.towncrier.type]]
|
||||||
|
directory = "internal"
|
||||||
|
name = "Internal Changes - for py-libp2p Contributors"
|
||||||
|
showcontent = true
|
||||||
|
|
||||||
|
[[tool.towncrier.type]]
|
||||||
|
directory = "misc"
|
||||||
|
name = "Miscellaneous changes"
|
||||||
|
showcontent = false
|
||||||
|
|
||||||
[tool.black]
|
[tool.black]
|
||||||
target_version = ['py37']
|
target_version = ['py37']
|
||||||
include = '\.pyi?$'
|
include = '\.pyi?$'
|
||||||
|
|||||||
@ -1,6 +1,9 @@
|
|||||||
[pytest]
|
[pytest]
|
||||||
addopts= --showlocals --durations 50 --maxfail 10
|
addopts= -v --showlocals --durations 50 --maxfail 10
|
||||||
python_paths= .
|
python_paths= .
|
||||||
xfail_strict=true
|
xfail_strict=true
|
||||||
log_format = %(levelname)8s %(asctime)s %(filename)20s %(message)s
|
log_format = %(levelname)8s %(asctime)s %(filename)20s %(message)s
|
||||||
log_date_format = %m-%d %H:%M:%S
|
log_date_format = %m-%d %H:%M:%S
|
||||||
|
|
||||||
|
[pytest-watch]
|
||||||
|
runner= pytest --failed-first --maxfail=1 --no-success-flaky-report
|
||||||
|
|||||||
1
requirements-docs.txt
Normal file
1
requirements-docs.txt
Normal file
@ -0,0 +1 @@
|
|||||||
|
.[doc]
|
||||||
52
scripts/release/test_package.py
Normal file
52
scripts/release/test_package.py
Normal file
@ -0,0 +1,52 @@
|
|||||||
|
from pathlib import Path
|
||||||
|
import subprocess
|
||||||
|
from tempfile import TemporaryDirectory
|
||||||
|
import venv
|
||||||
|
|
||||||
|
|
||||||
|
def create_venv(parent_path):
|
||||||
|
venv_path = parent_path / 'package-smoke-test'
|
||||||
|
venv.create(venv_path, with_pip=True)
|
||||||
|
subprocess.run([venv_path / 'bin' / 'pip', 'install', '-U', 'pip', 'setuptools'], check=True)
|
||||||
|
return venv_path
|
||||||
|
|
||||||
|
|
||||||
|
def find_wheel(project_path):
|
||||||
|
wheels = list(project_path.glob('dist/*.whl'))
|
||||||
|
|
||||||
|
if len(wheels) != 1:
|
||||||
|
raise Exception(
|
||||||
|
f"Expected one wheel. Instead found: {wheels} in project {project_path.absolute()}"
|
||||||
|
)
|
||||||
|
|
||||||
|
return wheels[0]
|
||||||
|
|
||||||
|
|
||||||
|
def install_wheel(venv_path, wheel_path, extras=()):
|
||||||
|
if extras:
|
||||||
|
extra_suffix = f"[{','.join(extras)}]"
|
||||||
|
else:
|
||||||
|
extra_suffix = ""
|
||||||
|
|
||||||
|
subprocess.run(
|
||||||
|
[
|
||||||
|
venv_path / 'bin' / 'pip',
|
||||||
|
'install',
|
||||||
|
f"{wheel_path}{extra_suffix}"
|
||||||
|
],
|
||||||
|
check=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def test_install_local_wheel():
|
||||||
|
with TemporaryDirectory() as tmpdir:
|
||||||
|
venv_path = create_venv(Path(tmpdir))
|
||||||
|
wheel_path = find_wheel(Path('.'))
|
||||||
|
install_wheel(venv_path, wheel_path)
|
||||||
|
print("Installed", wheel_path.absolute(), "to", venv_path)
|
||||||
|
print(f"Activate with `source {venv_path}/bin/activate`")
|
||||||
|
input("Press enter when the test has completed. The directory will be deleted.")
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
test_install_local_wheel()
|
||||||
114
setup.py
114
setup.py
@ -1,79 +1,119 @@
|
|||||||
import setuptools
|
#!/usr/bin/env python
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
py_classifiers = [f"Programming Language :: Python :: {version}" for version in ["3.7"]]
|
import os
|
||||||
|
|
||||||
|
from setuptools import find_packages, setup
|
||||||
|
|
||||||
extras_require = {
|
extras_require = {
|
||||||
"test": [
|
"test": [
|
||||||
"factory-boy>=2.12.0,<3.0.0",
|
|
||||||
"pytest>=4.6.3,<5.0.0",
|
"pytest>=4.6.3,<5.0.0",
|
||||||
"pytest-xdist>=1.30.0",
|
"pytest-xdist>=1.30.0",
|
||||||
"pytest-trio>=0.5.2",
|
"pytest-trio>=0.5.2",
|
||||||
|
"factory-boy>=2.12.0,<3.0.0",
|
||||||
],
|
],
|
||||||
"lint": [
|
"lint": [
|
||||||
"mypy>=0.701,<1.0",
|
"flake8==3.7.9", # flake8 is not semver: it has added new warnings at minor releases
|
||||||
|
"isort==4.3.21",
|
||||||
|
"mypy==0.740", # mypy is not semver: it has added new warnings at minor releases
|
||||||
"mypy-protobuf==1.15",
|
"mypy-protobuf==1.15",
|
||||||
"black==19.3b0",
|
"black==19.3b0",
|
||||||
"isort==4.3.21",
|
"flake8-bugbear>=19.8.0,<20",
|
||||||
"flake8>=3.7.7,<4.0.0",
|
"docformatter>=1.3.1,<2",
|
||||||
"flake8-bugbear",
|
],
|
||||||
|
"doc": [
|
||||||
|
"Sphinx>=2.2.1,<3",
|
||||||
|
"sphinx_rtd_theme>=0.4.3,<=1",
|
||||||
|
"towncrier>=19.2.0, <20",
|
||||||
],
|
],
|
||||||
"dev": [
|
"dev": [
|
||||||
"bumpversion>=0.5.3,<1",
|
"bumpversion>=0.5.3,<1",
|
||||||
"docformatter",
|
"pytest-watch>=4.1.0,<5",
|
||||||
|
"wheel",
|
||||||
|
"twine",
|
||||||
|
"ipython",
|
||||||
"setuptools>=36.2.0",
|
"setuptools>=36.2.0",
|
||||||
"tox>=3.13.2,<4.0.0",
|
"tox>=3.13.2,<4.0.0",
|
||||||
"twine",
|
|
||||||
"wheel",
|
|
||||||
],
|
],
|
||||||
}
|
}
|
||||||
|
|
||||||
extras_require["dev"] = (
|
extras_require["dev"] = (
|
||||||
extras_require["test"] + extras_require["lint"] + extras_require["dev"]
|
extras_require["dev"]
|
||||||
|
+ extras_require["test"]
|
||||||
|
+ extras_require["lint"]
|
||||||
|
+ extras_require["doc"]
|
||||||
)
|
)
|
||||||
|
|
||||||
|
fastecdsa = [
|
||||||
|
# No official fastecdsa==1.7.4,1.7.5 wheels for Windows, using a pypi package that includes
|
||||||
|
# the original library, but also windows-built wheels (32+64-bit) on those versions.
|
||||||
|
# Fixme: Remove section when fastecdsa has released a windows-compatible wheel
|
||||||
|
# (specifically: both win32 and win_amd64 targets)
|
||||||
|
# See the following issues for more information;
|
||||||
|
# https://github.com/libp2p/py-libp2p/issues/363
|
||||||
|
# https://github.com/AntonKueltz/fastecdsa/issues/11
|
||||||
|
"fastecdsa-any==1.7.5;sys_platform=='win32'",
|
||||||
|
# Wheels are provided for these platforms, or compiling one is minimally frustrating in a
|
||||||
|
# default python installation.
|
||||||
|
"fastecdsa==1.7.5;sys_platform!='win32'",
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
with open("./README.md") as readme:
|
with open("./README.md") as readme:
|
||||||
long_description = readme.read()
|
long_description = readme.read()
|
||||||
|
|
||||||
|
|
||||||
setuptools.setup(
|
install_requires = [
|
||||||
|
"pycryptodome>=3.9.2,<4.0.0",
|
||||||
|
"base58>=1.0.3,<2.0.0",
|
||||||
|
"pymultihash>=0.8.2",
|
||||||
|
"multiaddr>=0.0.8,<0.1.0",
|
||||||
|
"rpcudp>=3.0.0,<4.0.0",
|
||||||
|
"lru-dict>=1.1.6",
|
||||||
|
"protobuf>=3.10.0,<4.0.0",
|
||||||
|
"coincurve>=10.0.0,<11.0.0",
|
||||||
|
"pynacl==1.3.0",
|
||||||
|
"dataclasses>=0.7, <1;python_version<'3.7'",
|
||||||
|
"async_generator==1.10",
|
||||||
|
"trio>=0.13.0",
|
||||||
|
"async-service>=0.1.0a2,<0.2.0",
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
# NOTE: Some dependencies break RTD builds. We can not install system dependencies on the
|
||||||
|
# RTD system so we have to exclude these dependencies when we are in an RTD environment.
|
||||||
|
readthedocs_is_building = os.environ.get("READTHEDOCS", False)
|
||||||
|
if not readthedocs_is_building:
|
||||||
|
install_requires.extend(fastecdsa)
|
||||||
|
|
||||||
|
|
||||||
|
setup(
|
||||||
name="libp2p",
|
name="libp2p",
|
||||||
|
# *IMPORTANT*: Don't manually change the version here. Use `make bump`, as described in readme
|
||||||
|
version="0.1.4",
|
||||||
description="libp2p implementation written in python",
|
description="libp2p implementation written in python",
|
||||||
version="0.1.2",
|
|
||||||
long_description=long_description,
|
long_description=long_description,
|
||||||
long_description_content_type="text/markdown",
|
long_description_content_type="text/markdown",
|
||||||
maintainer="The Ethereum Foundation",
|
maintainer="The Ethereum Foundation",
|
||||||
maintainer_email="snakecharmers@ethereum.org",
|
maintainer_email="snakecharmers@ethereum.org",
|
||||||
url="https://github.com/ethereum/py-libp2p",
|
url="https://github.com/libp2p/py-libp2p",
|
||||||
|
include_package_data=True,
|
||||||
|
install_requires=install_requires,
|
||||||
|
python_requires=">=3.6,<4",
|
||||||
|
extras_require=extras_require,
|
||||||
|
py_modules=["libp2p"],
|
||||||
license="MIT/APACHE2.0",
|
license="MIT/APACHE2.0",
|
||||||
platforms=["unix", "linux", "osx"],
|
zip_safe=False,
|
||||||
|
keywords="libp2p p2p",
|
||||||
|
packages=find_packages(exclude=["tests", "tests.*"]),
|
||||||
classifiers=[
|
classifiers=[
|
||||||
"Development Status :: 4 - Beta",
|
"Development Status :: 4 - Beta",
|
||||||
"Intended Audience :: Developers",
|
"Intended Audience :: Developers",
|
||||||
"License :: OSI Approved :: MIT License",
|
"License :: OSI Approved :: MIT License",
|
||||||
"License :: OSI Approved :: Apache Software License",
|
"License :: OSI Approved :: Apache Software License",
|
||||||
"Natural Language :: English",
|
"Natural Language :: English",
|
||||||
]
|
"Programming Language :: Python :: 3",
|
||||||
+ py_classifiers,
|
"Programming Language :: Python :: 3.6",
|
||||||
python_requires=">=3.7,<4",
|
"Programming Language :: Python :: 3.7",
|
||||||
install_requires=[
|
|
||||||
"pycryptodome>=3.9.2,<4.0.0",
|
|
||||||
"base58>=1.0.3,<2.0.0",
|
|
||||||
"pymultihash>=0.8.2",
|
|
||||||
"multiaddr>=0.0.8,<0.1.0",
|
|
||||||
"rpcudp>=3.0.0,<4.0.0",
|
|
||||||
"lru-dict>=1.1.6",
|
|
||||||
"protobuf>=3.10.0,<4.0.0",
|
|
||||||
"coincurve>=10.0.0,<11.0.0",
|
|
||||||
"fastecdsa==1.7.4",
|
|
||||||
"pynacl==1.3.0",
|
|
||||||
"trio-asyncio>=0.10.0",
|
|
||||||
"trio>=0.13.0",
|
|
||||||
"async-service>=0.1.0a2,<0.2.0",
|
|
||||||
],
|
],
|
||||||
extras_require=extras_require,
|
platforms=["unix", "linux", "osx"],
|
||||||
packages=setuptools.find_packages(exclude=["tests", "tests.*"]),
|
|
||||||
zip_safe=False,
|
|
||||||
keywords="libp2p p2p",
|
|
||||||
)
|
)
|
||||||
|
|||||||
0
tests/core/conftest.py
Normal file
0
tests/core/conftest.py
Normal file
2
tests/core/test_import.py
Normal file
2
tests/core/test_import.py
Normal file
@ -0,0 +1,2 @@
|
|||||||
|
def test_import():
|
||||||
|
import libp2p # noqa: F401
|
||||||
@ -7,7 +7,7 @@ from libp2p.host.defaults import get_default_protocols
|
|||||||
def test_default_protocols():
|
def test_default_protocols():
|
||||||
key_pair = create_new_key_pair()
|
key_pair = create_new_key_pair()
|
||||||
swarm = initialize_default_swarm(key_pair)
|
swarm = initialize_default_swarm(key_pair)
|
||||||
host = BasicHost(key_pair.public_key, swarm)
|
host = BasicHost(swarm)
|
||||||
|
|
||||||
mux = host.get_mux()
|
mux = host.get_mux()
|
||||||
handlers = mux.handlers
|
handlers = mux.handlers
|
||||||
|
|||||||
26
tests/host/test_routed_host.py
Normal file
26
tests/host/test_routed_host.py
Normal file
@ -0,0 +1,26 @@
|
|||||||
|
import pytest
|
||||||
|
|
||||||
|
from libp2p.host.exceptions import ConnectionFailure
|
||||||
|
from libp2p.peer.peerinfo import PeerInfo
|
||||||
|
from libp2p.tools.factories import HostFactory, RoutedHostFactory
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.trio
|
||||||
|
async def test_host_routing_success():
|
||||||
|
async with RoutedHostFactory.create_batch_and_listen(False, 2) as hosts:
|
||||||
|
# forces to use routing as no addrs are provided
|
||||||
|
await hosts[0].connect(PeerInfo(hosts[1].get_id(), []))
|
||||||
|
await hosts[1].connect(PeerInfo(hosts[0].get_id(), []))
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.trio
|
||||||
|
async def test_host_routing_fail():
|
||||||
|
is_secure = False
|
||||||
|
async with RoutedHostFactory.create_batch_and_listen(
|
||||||
|
is_secure, 2
|
||||||
|
) as routed_hosts, HostFactory.create_batch_and_listen(is_secure, 1) as basic_hosts:
|
||||||
|
# routing fails because host_c does not use routing
|
||||||
|
with pytest.raises(ConnectionFailure):
|
||||||
|
await routed_hosts[0].connect(PeerInfo(basic_hosts[0].get_id(), []))
|
||||||
|
with pytest.raises(ConnectionFailure):
|
||||||
|
await routed_hosts[1].connect(PeerInfo(basic_hosts[0].get_id(), []))
|
||||||
@ -1,79 +0,0 @@
|
|||||||
import pytest
|
|
||||||
|
|
||||||
from libp2p.kademlia.network import KademliaServer
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_example():
|
|
||||||
node_a = KademliaServer()
|
|
||||||
await node_a.listen()
|
|
||||||
|
|
||||||
node_b = KademliaServer()
|
|
||||||
await node_b.listen()
|
|
||||||
|
|
||||||
# Bootstrap the node by connecting to other known nodes, in this case
|
|
||||||
# replace 123.123.123.123 with the IP of another node and optionally
|
|
||||||
# give as many ip/port combos as you can for other nodes.
|
|
||||||
await node_b.bootstrap([node_a.address])
|
|
||||||
|
|
||||||
# set a value for the key "my-key" on the network
|
|
||||||
value = "my-value"
|
|
||||||
key = "my-key"
|
|
||||||
await node_b.set(key, value)
|
|
||||||
|
|
||||||
# get the value associated with "my-key" from the network
|
|
||||||
assert await node_b.get(key) == value
|
|
||||||
assert await node_a.get(key) == value
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize("nodes_nr", [(2 ** i) for i in range(2, 5)])
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_multiple_nodes_bootstrap_set_get(nodes_nr):
|
|
||||||
|
|
||||||
node_bootstrap = KademliaServer()
|
|
||||||
await node_bootstrap.listen(3000 + nodes_nr * 2)
|
|
||||||
|
|
||||||
nodes = []
|
|
||||||
for i in range(nodes_nr):
|
|
||||||
node = KademliaServer()
|
|
||||||
addrs = [("127.0.0.1", 3000 + nodes_nr * 2)]
|
|
||||||
await node.listen(3001 + i + nodes_nr * 2)
|
|
||||||
await node.bootstrap(addrs)
|
|
||||||
nodes.append(node)
|
|
||||||
|
|
||||||
for i, node in enumerate(nodes):
|
|
||||||
# set a value for the key "my-key" on the network
|
|
||||||
value = "my awesome value %d" % i
|
|
||||||
key = "set from %d" % i
|
|
||||||
await node.set(key, value)
|
|
||||||
|
|
||||||
for i in range(nodes_nr):
|
|
||||||
for node in nodes:
|
|
||||||
value = "my awesome value %d" % i
|
|
||||||
key = "set from %d" % i
|
|
||||||
assert await node.get(key) == value
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize("nodes_nr", [(2 ** i) for i in range(2, 5)])
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_multiple_nodes_set_bootstrap_get(nodes_nr):
|
|
||||||
node_bootstrap = KademliaServer()
|
|
||||||
await node_bootstrap.listen(2000 + nodes_nr * 2)
|
|
||||||
|
|
||||||
nodes = []
|
|
||||||
for i in range(nodes_nr):
|
|
||||||
node = KademliaServer()
|
|
||||||
addrs = [("127.0.0.1", 2000 + nodes_nr * 2)]
|
|
||||||
await node.listen(2001 + i + nodes_nr * 2)
|
|
||||||
await node.bootstrap(addrs)
|
|
||||||
|
|
||||||
value = "my awesome value %d" % i
|
|
||||||
key = "set from %d" % i
|
|
||||||
await node.set(key, value)
|
|
||||||
nodes.append(node)
|
|
||||||
|
|
||||||
for i in range(nodes_nr):
|
|
||||||
for node in nodes:
|
|
||||||
value = "my awesome value %d" % i
|
|
||||||
key = "set from %d" % i
|
|
||||||
assert await node.get(key) == value
|
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user