mirror of
https://github.com/varun-r-mallya/py-libp2p.git
synced 2025-12-31 20:36:24 +00:00
Compare commits
476 Commits
master
...
80b58a2ae0
| Author | SHA1 | Date | |
|---|---|---|---|
| 80b58a2ae0 | |||
| 9370101a84 | |||
| 56732a1506 | |||
| 2a249b1792 | |||
| 5ec1671608 | |||
| 431a4807fb | |||
| f54a14b713 | |||
| 37a4d96f90 | |||
| b8217bb8a8 | |||
| 333d56dc00 | |||
| d385cb45cf | |||
| 2535305123 | |||
| 9df542f97f | |||
| 93fe070cfb | |||
| 7a4c955c98 | |||
| 14a74fdbd1 | |||
| 934f49af83 | |||
| 970b535b25 | |||
| 145727a9ba | |||
| 84c1a7031a | |||
| fc6b290c56 | |||
| 20edc3830a | |||
| ef6557518c | |||
| 6742dd38f7 | |||
| 1783a6b0b9 | |||
| 1077516196 | |||
| aad87f983f | |||
| 69680e9c1f | |||
| 7d6eb28d7c | |||
| fcb35084b3 | |||
| 42c8937a8d | |||
| 64ccce17eb | |||
| 6a24b138dd | |||
| 9a06ee429f | |||
| 526b65e1d5 | |||
| 59e1d9ae39 | |||
| d620270eaf | |||
| 31040931ea | |||
| 96e2149f4d | |||
| cb5bfeda39 | |||
| b26e8333bd | |||
| d99b67eafa | |||
| cdfb083c06 | |||
| d4c387f923 | |||
| 56526b4870 | |||
| 8f5dd3bd11 | |||
| 997094e5b7 | |||
| 3c52b859ba | |||
| 426aae7efb | |||
| 40dad64949 | |||
| 999315a74a | |||
| df39e240e7 | |||
| 5c11ac20e7 | |||
| 9fa3afbb04 | |||
| 3d1c36419c | |||
| c577fd2f71 | |||
| 9f80dbae12 | |||
| c08007feda | |||
| c2c4228591 | |||
| 943bcc4d36 | |||
| 8100a5cd20 | |||
| 2006b2c92c | |||
| fe3f7adc1b | |||
| 7b2d637382 | |||
| 91bee9df89 | |||
| 5bf9c7b537 | |||
| 8958c0fac3 | |||
| 091ac082b9 | |||
| 15f4a399ec | |||
| 3917d7b596 | |||
| 3aacb3a391 | |||
| ba39e91a2e | |||
| 57d1c9d807 | |||
| efc899e872 | |||
| cea1985c5c | |||
| 702ad4876e | |||
| a21d9e878b | |||
| 5ab68026d6 | |||
| d1792588f9 | |||
| 53db128f69 | |||
| cacb3c8aca | |||
| c940dac1e6 | |||
| 6214697349 | |||
| fb544d6db2 | |||
| b40d84fc26 | |||
| cda50e0ead | |||
| 3b27b02a8b | |||
| 05fde3ad40 | |||
| 292bd1a942 | |||
| c9795e3138 | |||
| b80817b5ae | |||
| 6c6adf7459 | |||
| 79f3a173f4 | |||
| 7fb3c2da9f | |||
| 6b7f50be3d | |||
| 6a0a7c21e8 | |||
| fde8c8f127 | |||
| bc1b1ed6ae | |||
| 63a8458d45 | |||
| ed91ee0c31 | |||
| 75ffb791ac | |||
| cf48d2e9a4 | |||
| 88a1f0a390 | |||
| b38d504fc1 | |||
| 3bd6d1f579 | |||
| b6cbd78943 | |||
| ed2716c1bf | |||
| 5a2fca32a0 | |||
| 9efc5a1bd1 | |||
| 8d9b7f413d | |||
| 5b9bec8e28 | |||
| c2c91b8c58 | |||
| 8a2d1f7045 | |||
| 94d695c6bc | |||
| 905f3a5708 | |||
| dabb3a0962 | |||
| 69d5274891 | |||
| 3ff5728209 | |||
| a1b16248d3 | |||
| 55dd8835a7 | |||
| e20a9a3814 | |||
| 7f6469d5d4 | |||
| ee66958e7f | |||
| c306400bd9 | |||
| 05b372b1eb | |||
| e4ab3cb2c5 | |||
| a9f184be6a | |||
| a9a6ed6767 | |||
| fe71c479dc | |||
| 95e1f62870 | |||
| 9378490dcb | |||
| a2fcf33bc1 | |||
| b363d1d6d0 | |||
| 9a0f224a1c | |||
| 13379e38d8 | |||
| 09d2110d65 | |||
| 6931092eea | |||
| cff0bfc17d | |||
| 163cc35cb0 | |||
| a2ad10b1e4 | |||
| 7c2014087f | |||
| 37df8d679d | |||
| 5c78a41552 | |||
| 388302baa7 | |||
| dc04270c19 | |||
| 90f143cd88 | |||
| 1ecff5437c | |||
| aa7276c863 | |||
| b838a0e3b6 | |||
| b01596ad92 | |||
| 1565d409e8 | |||
| 400ee9b896 | |||
| 2730db4285 | |||
| bb896dac2c | |||
| a14c42ef73 | |||
| af61523c87 | |||
| d2fdf70692 | |||
| 09cd8b37ed | |||
| 1ea50a3cf3 | |||
| f4247faa51 | |||
| 92e79bbb3f | |||
| eb3121b818 | |||
| 787648177f | |||
| fc9b28910a | |||
| 26d0ed2d81 | |||
| 618aff9368 | |||
| 32e545d9c7 | |||
| e712e6c0c4 | |||
| 59a898c8ce | |||
| fa174230ba | |||
| b840eaa7e1 | |||
| b143c96abd | |||
| 678b920992 | |||
| cb11f076c8 | |||
| 9ed44f5fa3 | |||
| 8786f06862 | |||
| 8c96c5a941 | |||
| 16445714f7 | |||
| 64bc388b33 | |||
| 09e151aafc | |||
| 2d335d4394 | |||
| 8b8b051885 | |||
| 07c8d4cd1f | |||
| 09e6feea8e | |||
| 601a8a3ef0 | |||
| 9d597012cc | |||
| 8625226be8 | |||
| c2b1738cd9 | |||
| 83acc38281 | |||
| 1899dac84c | |||
| aab2a0b603 | |||
| bab08c0900 | |||
| 6431fb8788 | |||
| c8053417d5 | |||
| 6eba9d8ca0 | |||
| 0e1b738cbb | |||
| 2ff5ae9c90 | |||
| ecc443dcfe | |||
| aa6039bcd3 | |||
| 8352d19113 | |||
| ceb9f7d3f7 | |||
| 9b667bd472 | |||
| eca548851b | |||
| e91f458446 | |||
| 0416572457 | |||
| 39375fb338 | |||
| 8bf261ca77 | |||
| 3a927c8419 | |||
| ec92af20e7 | |||
| 01db5d5fa0 | |||
| 21ee417793 | |||
| 37e4fee9f8 | |||
| c277cce2ed | |||
| 048e6deb96 | |||
| 2dc2dd4670 | |||
| e6a355d395 | |||
| 7b181f3ce5 | |||
| 0606788ab6 | |||
| 7d62a2f558 | |||
| 26fd169ccc | |||
| 99db5b309f | |||
| 7cfe5b9dc7 | |||
| 092b9c0c57 | |||
| fcf0546831 | |||
| 85bad2d0ae | |||
| 11560f5cc9 | |||
| 3507531344 | |||
| c9162beb2b | |||
| f587e50cab | |||
| d1a0f4f767 | |||
| 3ca27c6e93 | |||
| b4482e1a5e | |||
| ae82895d86 | |||
| 9f40d97a05 | |||
| 6fe28dcdd3 | |||
| 41b1ecb67c | |||
| e3c9b4bd54 | |||
| e132b154e3 | |||
| 62ea3bbf9a | |||
| 430527625b | |||
| 4115d033a8 | |||
| 93fc063e70 | |||
| 4bd24621f0 | |||
| 5315816521 | |||
| d5797572ea | |||
| 311b750511 | |||
| 42f07ae1ab | |||
| 773962c070 | |||
| 8ccf58bb83 | |||
| 06f0c7d35c | |||
| ab94e77310 | |||
| 23622ea1a0 | |||
| 6aeb217349 | |||
| 003e7bf278 | |||
| 719246c996 | |||
| e013e80689 | |||
| 6f33cde9a9 | |||
| 9c2560d000 | |||
| 9f38d48e26 | |||
| 2c1e50428a | |||
| 9e76940e75 | |||
| 53614200bd | |||
| 41ed0769f6 | |||
| 1c59653946 | |||
| 4bbb08ce2d | |||
| 912669a924 | |||
| 8ec67289da | |||
| 9cd3805542 | |||
| b81168dae9 | |||
| d03bdd75d6 | |||
| 8ff7bb1f20 | |||
| 5fcfc677f3 | |||
| dd14aad47c | |||
| 96434d9977 | |||
| 1507100632 | |||
| 21db1c3b72 | |||
| 3592ad308f | |||
| 9669a92976 | |||
| 2dfee68f20 | |||
| 505d3b2a8f | |||
| f4eb0158fe | |||
| b716d64184 | |||
| 198208aef3 | |||
| cda163fc48 | |||
| 26ed99dafd | |||
| a26fd95854 | |||
| 2965b4e364 | |||
| 242998ae9d | |||
| 5f497c7f5d | |||
| e65e38a3f1 | |||
| 8fb664bfdf | |||
| 3dcd99a2d1 | |||
| 75abc8b863 | |||
| 91dca97d83 | |||
| 80c686ddce | |||
| 0679efb299 | |||
| b21591f8d5 | |||
| d1c31483bd | |||
| 51c08de1bc | |||
| faeacf686a | |||
| 9943697054 | |||
| ff966bbfa0 | |||
| 1b025e552c | |||
| 4e53327079 | |||
| 3d369bc142 | |||
| 5de458482c | |||
| f3d8cbf968 | |||
| e6f96d32e2 | |||
| 51313a5909 | |||
| 8d2b889605 | |||
| bfe3dee781 | |||
| a7d122a0f9 | |||
| 8bfd4bde94 | |||
| 383d7cb722 | |||
| a89ba8ef81 | |||
| 31b6a6f237 | |||
| 5ac4fc1aba | |||
| f96fe0c1b6 | |||
| dcb199a6b7 | |||
| 3403689495 | |||
| 2ee23fdec1 | |||
| 96c41773ea | |||
| 16be6fab85 | |||
| 83b42b7850 | |||
| 5a95212697 | |||
| 975ea1bd8e | |||
| 88db4ceb21 | |||
| ad0b5505ba | |||
| 572c6915f6 | |||
| 01319638cd | |||
| a7eb9b5fbd | |||
| fee4208d89 | |||
| 4df454ebdc | |||
| 715e528a56 | |||
| d0e73f5438 | |||
| 8753024add | |||
| 621ea321ab | |||
| 6d8f695778 | |||
| ba231dab79 | |||
| 83d11db852 | |||
| cbb1e26a4f | |||
| 69a2cb00ba | |||
| ec20ca81dd | |||
| cbe2b4bd99 | |||
| 0038ef99d4 | |||
| b5ec1bd7ee | |||
| 4c739f6259 | |||
| 5306bdc8cb | |||
| 647034221a | |||
| ddbd190993 | |||
| 36be4c354b | |||
| befb2d31db | |||
| 12ad2dcdf4 | |||
| ee2c979e71 | |||
| ad87e50eb7 | |||
| 92c9ba7e46 | |||
| ac51d87046 | |||
| 520e555b82 | |||
| 6be05639e4 | |||
| e8e0cf74d1 | |||
| 211e951678 | |||
| ef16f3c993 | |||
| 2201d9e8d2 | |||
| 7ed33e9a55 | |||
| 4eff928a6d | |||
| 644fc77b6c | |||
| 4947578139 | |||
| 460f502bb9 | |||
| 6a92fa26eb | |||
| e8a484f8e4 | |||
| 74134e9b63 | |||
| 983a4a001c | |||
| 1fb3f9c72b | |||
| 8afb99c5b1 | |||
| ae16909f79 | |||
| b7d62c0f85 | |||
| 2277d822f1 | |||
| b736cfa333 | |||
| 73ebd27c59 | |||
| c914818f48 | |||
| 5262566f6a | |||
| f274d20715 | |||
| f12ca4e9c1 | |||
| 4d8afa6448 | |||
| 5a3adad093 | |||
| e50f9fc8e5 | |||
| 724375e1fa | |||
| 28d0e5759a | |||
| 9adf9aa499 | |||
| dcc8bbb619 | |||
| b258ff3ea2 | |||
| 31b694aa29 | |||
| 293087bd06 | |||
| 35248f8167 | |||
| e018af09ae | |||
| 7135e6cd4d | |||
| 77a9788a69 | |||
| 555e389109 | |||
| 8f0762f95c | |||
| 67bcad1674 | |||
| 3b53120092 | |||
| 89ed86d903 | |||
| 387f4879d1 | |||
| e2f95f4df3 | |||
| f43e7e367a | |||
| 3262749db7 | |||
| cd7eaba4a4 | |||
| 6add1cb685 | |||
| 742bc7bca3 | |||
| cbd4f9b502 | |||
| 4e2be87c73 | |||
| fbee0ba2ab | |||
| ea6eef6ed5 | |||
| fd818d9102 | |||
| 2c0a6c0adb | |||
| 3a4338e1df | |||
| feb8db6655 | |||
| ebdde7b5aa | |||
| 24e73207d2 | |||
| 303bf3060a | |||
| 788b4cf51a | |||
| b78468ca32 | |||
| c48618825d | |||
| d7cdae8a0f | |||
| df17788ec3 | |||
| 209deffc8a | |||
| 0a7e13f0ed | |||
| 811c217ee6 | |||
| d03ca45bd8 | |||
| 8bddbfb9bb | |||
| 79ac01308c | |||
| dfc0bb4ec8 | |||
| 09b4c846a4 | |||
| 66bd027161 | |||
| 8c16b316ac | |||
| d4ed859b19 | |||
| 79094d70d3 | |||
| 2ed2587fc9 | |||
| d61bca78ab | |||
| a3492cf82f | |||
| 733ef86e62 | |||
| 0caf8647c5 | |||
| c33ab32c33 | |||
| 193e8f9cb8 | |||
| 10b39dad1c | |||
| 2248108b54 | |||
| a762df6042 | |||
| 01b9e89e83 | |||
| d2825af045 | |||
| 0f483dd744 | |||
| 0197b515c1 | |||
| f27f4ddd85 | |||
| 7e377ede36 | |||
| d733b78dba | |||
| e397ce25a6 | |||
| 630aac703d | |||
| 286752c517 | |||
| 390ac2eb26 | |||
| 13d730ae5c | |||
| 4e9fa87477 | |||
| 47ae20d29c | |||
| f7757fa726 | |||
| 5bc4d01eea | |||
| c83fc1582d | |||
| 22d93b39ae | |||
| bdadec7519 | |||
| d020bbc066 | |||
| 00f10dbec3 | |||
| d75886b180 | |||
| 5ca6f26933 | |||
| a3c9ac61e6 | |||
| d4785b9e26 | |||
| cef217358f | |||
| 338672214c | |||
| c2046e6aa4 | |||
| 30b5811d39 |
6
.github/workflows/tox.yml
vendored
6
.github/workflows/tox.yml
vendored
@ -16,10 +16,10 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
python: ['3.9', '3.10', '3.11', '3.12', '3.13']
|
||||
python: ["3.10", "3.11", "3.12", "3.13"]
|
||||
toxenv: [core, interop, lint, wheel, demos]
|
||||
include:
|
||||
- python: '3.10'
|
||||
- python: "3.10"
|
||||
toxenv: docs
|
||||
fail-fast: false
|
||||
steps:
|
||||
@ -46,7 +46,7 @@ jobs:
|
||||
runs-on: windows-latest
|
||||
strategy:
|
||||
matrix:
|
||||
python-version: ['3.11', '3.12', '3.13']
|
||||
python-version: ["3.11", "3.12", "3.13"]
|
||||
toxenv: [core, wheel]
|
||||
fail-fast: false
|
||||
steps:
|
||||
|
||||
10
.gitignore
vendored
10
.gitignore
vendored
@ -146,6 +146,9 @@ instance/
|
||||
# PyBuilder
|
||||
target/
|
||||
|
||||
# PyRight Config
|
||||
pyrightconfig.json
|
||||
|
||||
# Jupyter Notebook
|
||||
.ipynb_checkpoints
|
||||
|
||||
@ -171,3 +174,10 @@ env.bak/
|
||||
|
||||
# mkdocs documentation
|
||||
/site
|
||||
|
||||
#lockfiles
|
||||
uv.lock
|
||||
poetry.lock
|
||||
|
||||
# Sphinx documentation build
|
||||
_build/
|
||||
|
||||
@ -1,59 +1,49 @@
|
||||
exclude: '.project-template|docs/conf.py|.*pb2\..*'
|
||||
repos:
|
||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||
rev: v5.0.0
|
||||
hooks:
|
||||
- id: check-yaml
|
||||
- id: check-toml
|
||||
- id: end-of-file-fixer
|
||||
- id: trailing-whitespace
|
||||
- repo: https://github.com/asottile/pyupgrade
|
||||
rev: v3.15.0
|
||||
- id: check-yaml
|
||||
- id: check-toml
|
||||
- id: end-of-file-fixer
|
||||
- id: trailing-whitespace
|
||||
- repo: https://github.com/asottile/pyupgrade
|
||||
rev: v3.20.0
|
||||
hooks:
|
||||
- id: pyupgrade
|
||||
args: [--py39-plus]
|
||||
- repo: https://github.com/psf/black
|
||||
rev: 23.9.1
|
||||
- id: pyupgrade
|
||||
args: [--py310-plus]
|
||||
- repo: https://github.com/astral-sh/ruff-pre-commit
|
||||
rev: v0.11.10
|
||||
hooks:
|
||||
- id: black
|
||||
- repo: https://github.com/PyCQA/flake8
|
||||
rev: 6.1.0
|
||||
hooks:
|
||||
- id: flake8
|
||||
additional_dependencies:
|
||||
- flake8-bugbear==23.9.16
|
||||
exclude: setup.py
|
||||
- repo: https://github.com/PyCQA/autoflake
|
||||
rev: v2.2.1
|
||||
hooks:
|
||||
- id: autoflake
|
||||
- repo: https://github.com/pycqa/isort
|
||||
rev: 5.12.0
|
||||
hooks:
|
||||
- id: isort
|
||||
- repo: https://github.com/pycqa/pydocstyle
|
||||
rev: 6.3.0
|
||||
hooks:
|
||||
- id: pydocstyle
|
||||
additional_dependencies:
|
||||
- tomli # required until >= python311
|
||||
- repo: https://github.com/executablebooks/mdformat
|
||||
- id: ruff
|
||||
args: [--fix, --exit-non-zero-on-fix]
|
||||
- id: ruff-format
|
||||
- repo: https://github.com/executablebooks/mdformat
|
||||
rev: 0.7.22
|
||||
hooks:
|
||||
- id: mdformat
|
||||
- id: mdformat
|
||||
additional_dependencies:
|
||||
- mdformat-gfm
|
||||
- repo: local
|
||||
- mdformat-gfm
|
||||
- repo: local
|
||||
hooks:
|
||||
- id: mypy-local
|
||||
- id: mypy-local
|
||||
name: run mypy with all dev dependencies present
|
||||
entry: python -m mypy -p libp2p
|
||||
entry: mypy -p libp2p
|
||||
language: system
|
||||
always_run: true
|
||||
pass_filenames: false
|
||||
- repo: local
|
||||
- repo: local
|
||||
hooks:
|
||||
- id: check-rst-files
|
||||
- id: pyrefly-local
|
||||
name: run pyrefly typecheck locally
|
||||
entry: pyrefly check
|
||||
language: system
|
||||
always_run: true
|
||||
pass_filenames: false
|
||||
|
||||
- repo: local
|
||||
hooks:
|
||||
- id: check-rst-files
|
||||
name: Check for .rst files in the top-level directory
|
||||
entry: python -c "import glob, sys; rst_files = glob.glob('*.rst'); sys.exit(1) if rst_files else sys.exit(0)"
|
||||
language: system
|
||||
|
||||
@ -1,71 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import os
|
||||
import sys
|
||||
import re
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
def _find_files(project_root):
|
||||
path_exclude_pattern = r"\.git($|\/)|venv|_build"
|
||||
file_exclude_pattern = r"fill_template_vars\.py|\.swp$"
|
||||
filepaths = []
|
||||
for dir_path, _dir_names, file_names in os.walk(project_root):
|
||||
if not re.search(path_exclude_pattern, dir_path):
|
||||
for file in file_names:
|
||||
if not re.search(file_exclude_pattern, file):
|
||||
filepaths.append(str(Path(dir_path, file)))
|
||||
|
||||
return filepaths
|
||||
|
||||
|
||||
def _replace(pattern, replacement, project_root):
|
||||
print(f"Replacing values: {pattern}")
|
||||
for file in _find_files(project_root):
|
||||
try:
|
||||
with open(file) as f:
|
||||
content = f.read()
|
||||
content = re.sub(pattern, replacement, content)
|
||||
with open(file, "w") as f:
|
||||
f.write(content)
|
||||
except UnicodeDecodeError:
|
||||
pass
|
||||
|
||||
|
||||
def main():
|
||||
project_root = Path(os.path.realpath(sys.argv[0])).parent.parent
|
||||
|
||||
module_name = input("What is your python module name? ")
|
||||
|
||||
pypi_input = input(f"What is your pypi package name? (default: {module_name}) ")
|
||||
pypi_name = pypi_input or module_name
|
||||
|
||||
repo_input = input(f"What is your github project name? (default: {pypi_name}) ")
|
||||
repo_name = repo_input or pypi_name
|
||||
|
||||
rtd_input = input(
|
||||
f"What is your readthedocs.org project name? (default: {pypi_name}) "
|
||||
)
|
||||
rtd_name = rtd_input or pypi_name
|
||||
|
||||
project_input = input(
|
||||
f"What is your project name (ex: at the top of the README)? (default: {repo_name}) "
|
||||
)
|
||||
project_name = project_input or repo_name
|
||||
|
||||
short_description = input("What is a one-liner describing the project? ")
|
||||
|
||||
_replace("<MODULE_NAME>", module_name, project_root)
|
||||
_replace("<PYPI_NAME>", pypi_name, project_root)
|
||||
_replace("<REPO_NAME>", repo_name, project_root)
|
||||
_replace("<RTD_NAME>", rtd_name, project_root)
|
||||
_replace("<PROJECT_NAME>", project_name, project_root)
|
||||
_replace("<SHORT_DESCRIPTION>", short_description, project_root)
|
||||
|
||||
os.makedirs(project_root / module_name, exist_ok=True)
|
||||
Path(project_root / module_name / "__init__.py").touch()
|
||||
Path(project_root / module_name / "py.typed").touch()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@ -1,39 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import os
|
||||
import sys
|
||||
from pathlib import Path
|
||||
import subprocess
|
||||
|
||||
|
||||
def main():
|
||||
template_dir = Path(os.path.dirname(sys.argv[0]))
|
||||
template_vars_file = template_dir / "template_vars.txt"
|
||||
fill_template_vars_script = template_dir / "fill_template_vars.py"
|
||||
|
||||
with open(template_vars_file, "r") as input_file:
|
||||
content_lines = input_file.readlines()
|
||||
|
||||
process = subprocess.Popen(
|
||||
[sys.executable, str(fill_template_vars_script)],
|
||||
stdin=subprocess.PIPE,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
text=True,
|
||||
)
|
||||
|
||||
for line in content_lines:
|
||||
process.stdin.write(line)
|
||||
process.stdin.flush()
|
||||
|
||||
stdout, stderr = process.communicate()
|
||||
|
||||
if process.returncode != 0:
|
||||
print(f"Error occurred: {stderr}")
|
||||
sys.exit(1)
|
||||
|
||||
print(stdout)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@ -1,6 +0,0 @@
|
||||
libp2p
|
||||
libp2p
|
||||
py-libp2p
|
||||
py-libp2p
|
||||
py-libp2p
|
||||
The Python implementation of the libp2p networking stack
|
||||
27
Makefile
27
Makefile
@ -7,12 +7,14 @@ help:
|
||||
@echo "clean-pyc - remove Python file artifacts"
|
||||
@echo "clean - run clean-build and clean-pyc"
|
||||
@echo "dist - build package and cat contents of the dist directory"
|
||||
@echo "fix - fix formatting & linting issues with ruff"
|
||||
@echo "lint - fix linting issues with pre-commit"
|
||||
@echo "test - run tests quickly with the default Python"
|
||||
@echo "docs - generate docs and open in browser (linux-docs for version on linux)"
|
||||
@echo "package-test - build package and install it in a venv for manual testing"
|
||||
@echo "notes - consume towncrier newsfragments and update release notes in docs - requires bump to be set"
|
||||
@echo "release - package and upload a release (does not run notes target) - requires bump to be set"
|
||||
@echo "pr - run clean, fix, lint, typecheck, and test i.e basically everything you need to do before creating a PR"
|
||||
|
||||
clean-build:
|
||||
rm -fr build/
|
||||
@ -37,8 +39,16 @@ lint:
|
||||
&& pre-commit run --all-files --show-diff-on-failure \
|
||||
)
|
||||
|
||||
fix:
|
||||
python -m ruff check --fix
|
||||
|
||||
typecheck:
|
||||
pre-commit run mypy-local --all-files && pre-commit run pyrefly-local --all-files
|
||||
|
||||
test:
|
||||
python -m pytest tests
|
||||
python -m pytest tests -n auto
|
||||
|
||||
pr: clean fix lint typecheck test
|
||||
|
||||
# protobufs management
|
||||
|
||||
@ -48,13 +58,19 @@ PB = libp2p/crypto/pb/crypto.proto \
|
||||
libp2p/security/secio/pb/spipe.proto \
|
||||
libp2p/security/noise/pb/noise.proto \
|
||||
libp2p/identity/identify/pb/identify.proto \
|
||||
libp2p/host/autonat/pb/autonat.proto
|
||||
libp2p/host/autonat/pb/autonat.proto \
|
||||
libp2p/relay/circuit_v2/pb/circuit.proto \
|
||||
libp2p/relay/circuit_v2/pb/dcutr.proto \
|
||||
libp2p/kad_dht/pb/kademlia.proto
|
||||
|
||||
PY = $(PB:.proto=_pb2.py)
|
||||
PYI = $(PB:.proto=_pb2.pyi)
|
||||
|
||||
## Set default to `protobufs`, otherwise `format` is called when typing only `make`
|
||||
all: protobufs
|
||||
|
||||
.PHONY: protobufs clean-proto
|
||||
|
||||
protobufs: $(PY)
|
||||
|
||||
%_pb2.py: %.proto
|
||||
@ -63,6 +79,11 @@ protobufs: $(PY)
|
||||
clean-proto:
|
||||
rm -f $(PY) $(PYI)
|
||||
|
||||
# Force protobuf regeneration by making them always out of date
|
||||
$(PY): FORCE
|
||||
|
||||
FORCE:
|
||||
|
||||
# docs commands
|
||||
|
||||
docs: check-docs
|
||||
@ -80,7 +101,7 @@ validate-newsfragments:
|
||||
check-docs: build-docs validate-newsfragments
|
||||
|
||||
build-docs:
|
||||
sphinx-apidoc -o docs/ . setup.py "*conftest*" tests/
|
||||
sphinx-apidoc -o docs/ . "*conftest*" tests/
|
||||
$(MAKE) -C docs clean
|
||||
$(MAKE) -C docs html
|
||||
$(MAKE) -C docs doctest
|
||||
|
||||
52
README.md
52
README.md
@ -12,13 +12,13 @@
|
||||
[](https://github.com/libp2p/py-libp2p/actions/workflows/tox.yml)
|
||||
[](http://py-libp2p.readthedocs.io/en/latest/?badge=latest)
|
||||
|
||||
> ⚠️ **Warning:** py-libp2p is an experimental and work-in-progress repo under development. We do not yet recommend using py-libp2p in production environments.
|
||||
> py-libp2p has moved beyond its experimental roots and is steadily progressing toward production readiness. The core features are stable, and we’re focused on refining performance, expanding protocol support, and ensuring smooth interop with other libp2p implementations. We welcome contributions and real-world usage feedback to help us reach full production maturity.
|
||||
|
||||
Read more in the [documentation on ReadTheDocs](https://py-libp2p.readthedocs.io/). [View the release notes](https://py-libp2p.readthedocs.io/en/latest/release_notes.html).
|
||||
|
||||
## Maintainers
|
||||
|
||||
Currently maintained by [@pacrob](https://github.com/pacrob), [@seetadev](https://github.com/seetadev) and [@dhuseby](https://github.com/dhuseby), looking for assistance!
|
||||
Currently maintained by [@pacrob](https://github.com/pacrob), [@seetadev](https://github.com/seetadev) and [@dhuseby](https://github.com/dhuseby). Please reach out to us for collaboration or active feedback. If you have questions, feel free to open a new [discussion](https://github.com/libp2p/py-libp2p/discussions). We are also available on the libp2p Discord — join us at #py-libp2p [sub-channel](https://discord.gg/d92MEugb).
|
||||
|
||||
## Feature Breakdown
|
||||
|
||||
@ -34,19 +34,19 @@ ______________________________________________________________________
|
||||
| -------------------------------------- | :--------: | :---------------------------------------------------------------------------------: |
|
||||
| **`libp2p-tcp`** | ✅ | [source](https://github.com/libp2p/py-libp2p/blob/main/libp2p/transport/tcp/tcp.py) |
|
||||
| **`libp2p-quic`** | 🌱 | |
|
||||
| **`libp2p-websocket`** | ❌ | |
|
||||
| **`libp2p-webrtc-browser-to-server`** | ❌ | |
|
||||
| **`libp2p-webrtc-private-to-private`** | ❌ | |
|
||||
| **`libp2p-websocket`** | 🌱 | |
|
||||
| **`libp2p-webrtc-browser-to-server`** | 🌱 | |
|
||||
| **`libp2p-webrtc-private-to-private`** | 🌱 | |
|
||||
|
||||
______________________________________________________________________
|
||||
|
||||
### NAT Traversal
|
||||
|
||||
| **NAT Traversal** | **Status** |
|
||||
| ----------------------------- | :--------: |
|
||||
| **`libp2p-circuit-relay-v2`** | ❌ |
|
||||
| **`libp2p-autonat`** | ❌ |
|
||||
| **`libp2p-hole-punching`** | ❌ |
|
||||
| **NAT Traversal** | **Status** | **Source** |
|
||||
| ----------------------------- | :--------: | :-----------------------------------------------------------------------------: |
|
||||
| **`libp2p-circuit-relay-v2`** | ✅ | [source](https://github.com/libp2p/py-libp2p/tree/main/libp2p/relay/circuit_v2) |
|
||||
| **`libp2p-autonat`** | ✅ | [source](https://github.com/libp2p/py-libp2p/tree/main/libp2p/host/autonat) |
|
||||
| **`libp2p-hole-punching`** | ✅ | [source](https://github.com/libp2p/py-libp2p/tree/main/libp2p/relay/circuit_v2) |
|
||||
|
||||
______________________________________________________________________
|
||||
|
||||
@ -54,27 +54,27 @@ ______________________________________________________________________
|
||||
|
||||
| **Secure Communication** | **Status** | **Source** |
|
||||
| ------------------------ | :--------: | :---------------------------------------------------------------------------: |
|
||||
| **`libp2p-noise`** | 🌱 | [source](https://github.com/libp2p/py-libp2p/tree/main/libp2p/security/noise) |
|
||||
| **`libp2p-tls`** | ❌ | |
|
||||
| **`libp2p-noise`** | ✅ | [source](https://github.com/libp2p/py-libp2p/tree/main/libp2p/security/noise) |
|
||||
| **`libp2p-tls`** | 🌱 | |
|
||||
|
||||
______________________________________________________________________
|
||||
|
||||
### Discovery
|
||||
|
||||
| **Discovery** | **Status** |
|
||||
| -------------------- | :--------: |
|
||||
| **`bootstrap`** | ❌ |
|
||||
| **`random-walk`** | ❌ |
|
||||
| **`mdns-discovery`** | ❌ |
|
||||
| **`rendezvous`** | ❌ |
|
||||
| **Discovery** | **Status** | **Source** |
|
||||
| -------------------- | :--------: | :--------------------------------------------------------------------------------: |
|
||||
| **`bootstrap`** | ✅ | [source](https://github.com/libp2p/py-libp2p/tree/main/libp2p/discovery/bootstrap) |
|
||||
| **`random-walk`** | 🌱 | |
|
||||
| **`mdns-discovery`** | ✅ | [source](https://github.com/libp2p/py-libp2p/tree/main/libp2p/discovery/mdns) |
|
||||
| **`rendezvous`** | 🌱 | |
|
||||
|
||||
______________________________________________________________________
|
||||
|
||||
### Peer Routing
|
||||
|
||||
| **Peer Routing** | **Status** |
|
||||
| -------------------- | :--------: |
|
||||
| **`libp2p-kad-dht`** | ❌ |
|
||||
| **Peer Routing** | **Status** | **Source** |
|
||||
| -------------------- | :--------: | :--------------------------------------------------------------------: |
|
||||
| **`libp2p-kad-dht`** | ✅ | [source](https://github.com/libp2p/py-libp2p/tree/main/libp2p/kad_dht) |
|
||||
|
||||
______________________________________________________________________
|
||||
|
||||
@ -89,10 +89,10 @@ ______________________________________________________________________
|
||||
|
||||
### Stream Muxers
|
||||
|
||||
| **Stream Muxers** | **Status** | **Status** |
|
||||
| ------------------ | :--------: | :----------------------------------------------------------------------------------------: |
|
||||
| **`libp2p-yamux`** | 🌱 | |
|
||||
| **`libp2p-mplex`** | 🛠️ | [source](https://github.com/libp2p/py-libp2p/blob/main/libp2p/stream_muxer/mplex/mplex.py) |
|
||||
| **Stream Muxers** | **Status** | **Source** |
|
||||
| ------------------ | :--------: | :-------------------------------------------------------------------------------: |
|
||||
| **`libp2p-yamux`** | ✅ | [source](https://github.com/libp2p/py-libp2p/tree/main/libp2p/stream_muxer/yamux) |
|
||||
| **`libp2p-mplex`** | ✅ | [source](https://github.com/libp2p/py-libp2p/tree/main/libp2p/stream_muxer/mplex) |
|
||||
|
||||
______________________________________________________________________
|
||||
|
||||
@ -100,7 +100,7 @@ ______________________________________________________________________
|
||||
|
||||
| **Storage** | **Status** |
|
||||
| ------------------- | :--------: |
|
||||
| **`libp2p-record`** | ❌ |
|
||||
| **`libp2p-record`** | 🌱 |
|
||||
|
||||
______________________________________________________________________
|
||||
|
||||
|
||||
30
docs/conf.py
30
docs/conf.py
@ -15,14 +15,24 @@
|
||||
# documentation root, use os.path.abspath to make it absolute, like shown here.
|
||||
# sys.path.insert(0, os.path.abspath('.'))
|
||||
|
||||
import doctest
|
||||
import os
|
||||
import sys
|
||||
from unittest.mock import MagicMock
|
||||
|
||||
DIR = os.path.dirname(__file__)
|
||||
with open(os.path.join(DIR, "../setup.py"), "r") as f:
|
||||
for line in f:
|
||||
if "version=" in line:
|
||||
setup_version = line.split('"')[1]
|
||||
break
|
||||
try:
|
||||
import tomllib
|
||||
except ModuleNotFoundError:
|
||||
# For Python < 3.11
|
||||
import tomli as tomllib # type: ignore (In case of >3.11 Pyrefly doesnt find tomli , which is right but a false flag)
|
||||
|
||||
# Path to pyproject.toml (assuming conf.py is in a 'docs' subdirectory)
|
||||
pyproject_path = os.path.join(os.path.dirname(__file__), "..", "pyproject.toml")
|
||||
|
||||
with open(pyproject_path, "rb") as f:
|
||||
pyproject_data = tomllib.load(f)
|
||||
|
||||
setup_version = pyproject_data["project"]["version"]
|
||||
|
||||
# -- General configuration ------------------------------------------------
|
||||
|
||||
@ -302,7 +312,6 @@ intersphinx_mapping = {
|
||||
|
||||
# -- Doctest configuration ----------------------------------------
|
||||
|
||||
import doctest
|
||||
|
||||
doctest_default_flags = (
|
||||
0
|
||||
@ -317,10 +326,9 @@ doctest_default_flags = (
|
||||
# Mock out dependencies that are unbuildable on readthedocs, as recommended here:
|
||||
# https://docs.readthedocs.io/en/rel/faq.html#i-get-import-errors-on-libraries-that-depend-on-c-modules
|
||||
|
||||
import sys
|
||||
from unittest.mock import MagicMock
|
||||
|
||||
# Add new modules to mock here (it should be the same list as those excluded in setup.py)
|
||||
# Add new modules to mock here (it should be the same list
|
||||
# as those excluded in pyproject.toml)
|
||||
MOCK_MODULES = [
|
||||
"fastecdsa",
|
||||
"fastecdsa.encoding",
|
||||
@ -338,4 +346,4 @@ todo_include_todos = True
|
||||
|
||||
# Allow duplicate object descriptions
|
||||
nitpicky = False
|
||||
nitpick_ignore = [("py:class", "type")]
|
||||
nitpick_ignore = [("py:class", "type")]
|
||||
|
||||
499
docs/examples.circuit_relay.rst
Normal file
499
docs/examples.circuit_relay.rst
Normal file
@ -0,0 +1,499 @@
|
||||
Circuit Relay v2 Example
|
||||
========================
|
||||
|
||||
This example demonstrates how to use Circuit Relay v2 in py-libp2p. It includes three components:
|
||||
|
||||
1. A relay node that provides relay services
|
||||
2. A destination node that accepts relayed connections
|
||||
3. A source node that connects to the destination through the relay
|
||||
|
||||
Prerequisites
|
||||
-------------
|
||||
|
||||
First, ensure you have py-libp2p installed:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ python -m pip install libp2p
|
||||
Collecting libp2p
|
||||
...
|
||||
Successfully installed libp2p-x.x.x
|
||||
|
||||
Relay Node
|
||||
----------
|
||||
|
||||
Create a file named ``relay_node.py`` with the following content:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
import trio
|
||||
import logging
|
||||
import multiaddr
|
||||
import traceback
|
||||
|
||||
from libp2p import new_host
|
||||
from libp2p.relay.circuit_v2.protocol import CircuitV2Protocol
|
||||
from libp2p.relay.circuit_v2.transport import CircuitV2Transport
|
||||
from libp2p.relay.circuit_v2.config import RelayConfig
|
||||
from libp2p.tools.async_service import background_trio_service
|
||||
|
||||
logging.basicConfig(level=logging.DEBUG)
|
||||
logger = logging.getLogger("relay_node")
|
||||
|
||||
async def run_relay():
|
||||
listen_addr = multiaddr.Multiaddr("/ip4/0.0.0.0/tcp/9000")
|
||||
host = new_host()
|
||||
|
||||
config = RelayConfig(
|
||||
enable_hop=True, # Act as a relay
|
||||
enable_stop=True, # Accept relayed connections
|
||||
enable_client=False, # Don't use other relays
|
||||
max_circuit_duration=3600, # 1 hour
|
||||
max_circuit_bytes=1024 * 1024 * 10, # 10MB
|
||||
)
|
||||
|
||||
# Initialize the relay protocol with allow_hop=True to act as a relay
|
||||
protocol = CircuitV2Protocol(host, limits=config.limits, allow_hop=True)
|
||||
print(f"Created relay protocol with hop enabled: {protocol.allow_hop}")
|
||||
|
||||
# Start the protocol service
|
||||
async with host.run(listen_addrs=[listen_addr]):
|
||||
peer_id = host.get_id()
|
||||
print("\n" + "="*50)
|
||||
print(f"Relay node started with ID: {peer_id}")
|
||||
print(f"Relay node multiaddr: /ip4/127.0.0.1/tcp/9000/p2p/{peer_id}")
|
||||
print("="*50 + "\n")
|
||||
print(f"Listening on: {host.get_addrs()}")
|
||||
|
||||
try:
|
||||
async with background_trio_service(protocol):
|
||||
print("Protocol service started")
|
||||
|
||||
transport = CircuitV2Transport(host, protocol, config)
|
||||
print("Relay service started successfully")
|
||||
print(f"Relay limits: {protocol.limits}")
|
||||
|
||||
while True:
|
||||
await trio.sleep(10)
|
||||
print("Relay node still running...")
|
||||
print(f"Active connections: {len(host.get_network().connections)}")
|
||||
except Exception as e:
|
||||
print(f"Error in relay service: {e}")
|
||||
traceback.print_exc()
|
||||
|
||||
if __name__ == "__main__":
|
||||
try:
|
||||
trio.run(run_relay)
|
||||
except Exception as e:
|
||||
print(f"Error running relay: {e}")
|
||||
traceback.print_exc()
|
||||
|
||||
Destination Node
|
||||
----------------
|
||||
|
||||
Create a file named ``destination_node.py`` with the following content:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
import trio
|
||||
import logging
|
||||
import multiaddr
|
||||
import traceback
|
||||
import sys
|
||||
|
||||
from libp2p import new_host
|
||||
from libp2p.relay.circuit_v2.protocol import CircuitV2Protocol
|
||||
from libp2p.relay.circuit_v2.transport import CircuitV2Transport
|
||||
from libp2p.relay.circuit_v2.config import RelayConfig
|
||||
from libp2p.peer.peerinfo import info_from_p2p_addr
|
||||
from libp2p.tools.async_service import background_trio_service
|
||||
|
||||
logging.basicConfig(level=logging.DEBUG)
|
||||
logger = logging.getLogger("destination_node")
|
||||
|
||||
async def handle_echo_stream(stream):
|
||||
"""Handle incoming stream by echoing received data."""
|
||||
try:
|
||||
print(f"New echo stream from: {stream.get_protocol()}")
|
||||
while True:
|
||||
data = await stream.read(1024)
|
||||
if not data:
|
||||
print("Stream closed by remote")
|
||||
break
|
||||
|
||||
message = data.decode('utf-8')
|
||||
print(f"Received: {message}")
|
||||
|
||||
response = f"Echo: {message}".encode('utf-8')
|
||||
await stream.write(response)
|
||||
print(f"Sent response: Echo: {message}")
|
||||
except Exception as e:
|
||||
print(f"Error handling stream: {e}")
|
||||
traceback.print_exc()
|
||||
finally:
|
||||
await stream.close()
|
||||
print("Stream closed")
|
||||
|
||||
async def run_destination(relay_peer_id=None):
|
||||
"""
|
||||
Run a simple destination node that accepts connections.
|
||||
This is a simplified version that doesn't use the relay functionality.
|
||||
"""
|
||||
listen_addr = multiaddr.Multiaddr(f"/ip4/0.0.0.0/tcp/9001")
|
||||
host = new_host()
|
||||
|
||||
# Configure as a relay receiver (stop)
|
||||
config = RelayConfig(
|
||||
enable_stop=True, # Accept relayed connections
|
||||
enable_client=True, # Use relays for outbound connections
|
||||
max_circuit_duration=3600, # 1 hour
|
||||
max_circuit_bytes=1024 * 1024 * 10, # 10MB
|
||||
)
|
||||
|
||||
# Initialize the relay protocol
|
||||
protocol = CircuitV2Protocol(host, limits=config.limits, allow_hop=False)
|
||||
|
||||
async with host.run(listen_addrs=[listen_addr]):
|
||||
# Print host information
|
||||
dest_peer_id = host.get_id()
|
||||
print("\n" + "="*50)
|
||||
print(f"Destination node started with ID: {dest_peer_id}")
|
||||
print(f"Use this ID in the source node: {dest_peer_id}")
|
||||
print("="*50 + "\n")
|
||||
print(f"Listening on: {host.get_addrs()}")
|
||||
|
||||
# Set stream handler for the echo protocol
|
||||
host.set_stream_handler("/echo/1.0.0", handle_echo_stream)
|
||||
print("Registered echo protocol handler")
|
||||
|
||||
# Start the protocol service in the background
|
||||
async with background_trio_service(protocol):
|
||||
print("Protocol service started")
|
||||
|
||||
# Create and register the transport
|
||||
transport = CircuitV2Transport(host, protocol, config)
|
||||
print("Transport created")
|
||||
|
||||
# Create a listener for relayed connections
|
||||
listener = transport.create_listener(handle_echo_stream)
|
||||
print("Created relay listener")
|
||||
|
||||
# Start listening for relayed connections
|
||||
async with trio.open_nursery() as nursery:
|
||||
await listener.listen("/p2p-circuit", nursery)
|
||||
print("Destination node ready to accept relayed connections")
|
||||
|
||||
if not relay_peer_id:
|
||||
print("No relay peer ID provided. Please enter the relay's peer ID:")
|
||||
print("Waiting for relay peer ID input...")
|
||||
while True:
|
||||
if sys.stdin.isatty(): # Only try to read from stdin if it's a terminal
|
||||
try:
|
||||
relay_peer_id = input("Enter relay peer ID: ").strip()
|
||||
if relay_peer_id:
|
||||
break
|
||||
except EOFError:
|
||||
await trio.sleep(5)
|
||||
else:
|
||||
print("No terminal detected. Waiting for relay peer ID as command line argument.")
|
||||
await trio.sleep(10)
|
||||
continue
|
||||
|
||||
# Connect to the relay node with the provided relay peer ID
|
||||
relay_addr_str = f"/ip4/127.0.0.1/tcp/9000/p2p/{relay_peer_id}"
|
||||
print(f"Connecting to relay at {relay_addr_str}")
|
||||
|
||||
try:
|
||||
# Convert string address to multiaddr, then to peer info
|
||||
relay_maddr = multiaddr.Multiaddr(relay_addr_str)
|
||||
relay_peer_info = info_from_p2p_addr(relay_maddr)
|
||||
await host.connect(relay_peer_info)
|
||||
print("Connected to relay successfully")
|
||||
|
||||
# Add the relay to the transport's discovery
|
||||
transport.discovery._add_relay(relay_peer_info.peer_id)
|
||||
print(f"Added relay {relay_peer_info.peer_id} to discovery")
|
||||
|
||||
# Keep the node running
|
||||
while True:
|
||||
await trio.sleep(10)
|
||||
print("Destination node still running...")
|
||||
except Exception as e:
|
||||
print(f"Failed to connect to relay: {e}")
|
||||
traceback.print_exc()
|
||||
|
||||
if __name__ == "__main__":
|
||||
print("Starting destination node...")
|
||||
relay_id = None
|
||||
if len(sys.argv) > 1:
|
||||
relay_id = sys.argv[1]
|
||||
print(f"Using provided relay ID: {relay_id}")
|
||||
trio.run(run_destination, relay_id)
|
||||
|
||||
Source Node
|
||||
-----------
|
||||
|
||||
Create a file named ``source_node.py`` with the following content:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
import trio
|
||||
import logging
|
||||
import multiaddr
|
||||
import traceback
|
||||
import sys
|
||||
|
||||
from libp2p import new_host
|
||||
from libp2p.peer.peerinfo import PeerInfo
|
||||
from libp2p.peer.id import ID
|
||||
from libp2p.relay.circuit_v2.protocol import CircuitV2Protocol
|
||||
from libp2p.relay.circuit_v2.transport import CircuitV2Transport
|
||||
from libp2p.relay.circuit_v2.config import RelayConfig
|
||||
from libp2p.peer.peerinfo import info_from_p2p_addr
|
||||
from libp2p.tools.async_service import background_trio_service
|
||||
from libp2p.relay.circuit_v2.discovery import RelayInfo
|
||||
|
||||
# Configure logging
|
||||
logging.basicConfig(level=logging.DEBUG)
|
||||
logger = logging.getLogger("source_node")
|
||||
|
||||
async def run_source(relay_peer_id=None, destination_peer_id=None):
|
||||
# Create a libp2p host
|
||||
listen_addr = multiaddr.Multiaddr("/ip4/0.0.0.0/tcp/9002")
|
||||
host = new_host()
|
||||
|
||||
# Configure as a relay client
|
||||
config = RelayConfig(
|
||||
enable_client=True, # Use relays for outbound connections
|
||||
max_circuit_duration=3600, # 1 hour
|
||||
max_circuit_bytes=1024 * 1024 * 10, # 10MB
|
||||
)
|
||||
|
||||
# Initialize the relay protocol
|
||||
protocol = CircuitV2Protocol(host, limits=config.limits, allow_hop=False)
|
||||
|
||||
# Start the protocol service
|
||||
async with host.run(listen_addrs=[listen_addr]):
|
||||
# Print host information
|
||||
print(f"Source node started with ID: {host.get_id()}")
|
||||
print(f"Listening on: {host.get_addrs()}")
|
||||
|
||||
# Start the protocol service in the background
|
||||
async with background_trio_service(protocol):
|
||||
print("Protocol service started")
|
||||
|
||||
# Create and register the transport
|
||||
transport = CircuitV2Transport(host, protocol, config)
|
||||
|
||||
# Get relay peer ID if not provided
|
||||
if not relay_peer_id:
|
||||
print("No relay peer ID provided. Please enter the relay's peer ID:")
|
||||
while True:
|
||||
if sys.stdin.isatty(): # Only try to read from stdin if it's a terminal
|
||||
try:
|
||||
relay_peer_id = input("Enter relay peer ID: ").strip()
|
||||
if relay_peer_id:
|
||||
break
|
||||
except EOFError:
|
||||
await trio.sleep(5)
|
||||
else:
|
||||
print("No terminal detected. Waiting for relay peer ID as command line argument.")
|
||||
await trio.sleep(10)
|
||||
continue
|
||||
|
||||
# Connect to the relay node with the provided relay peer ID
|
||||
relay_addr_str = f"/ip4/127.0.0.1/tcp/9000/p2p/{relay_peer_id}"
|
||||
print(f"Connecting to relay at {relay_addr_str}")
|
||||
|
||||
try:
|
||||
# Convert string address to multiaddr, then to peer info
|
||||
relay_maddr = multiaddr.Multiaddr(relay_addr_str)
|
||||
relay_peer_info = info_from_p2p_addr(relay_maddr)
|
||||
await host.connect(relay_peer_info)
|
||||
print("Connected to relay successfully")
|
||||
|
||||
# Manually add the relay to the discovery service
|
||||
relay_id = relay_peer_info.peer_id
|
||||
now = trio.current_time()
|
||||
|
||||
# Create relay info and add it to discovery
|
||||
relay_info = RelayInfo(
|
||||
peer_id=relay_id,
|
||||
discovered_at=now,
|
||||
last_seen=now
|
||||
)
|
||||
transport.discovery._discovered_relays[relay_id] = relay_info
|
||||
print(f"Added relay {relay_id} to discovery")
|
||||
|
||||
# Start relay discovery in the background
|
||||
async with background_trio_service(transport.discovery):
|
||||
print("Relay discovery started")
|
||||
|
||||
# Wait for relay discovery
|
||||
await trio.sleep(5)
|
||||
print("Relay discovery completed")
|
||||
|
||||
# Get destination peer ID if not provided
|
||||
if not destination_peer_id:
|
||||
print("No destination peer ID provided. Please enter the destination's peer ID:")
|
||||
while True:
|
||||
if sys.stdin.isatty(): # Only try to read from stdin if it's a terminal
|
||||
try:
|
||||
destination_peer_id = input("Enter destination peer ID: ").strip()
|
||||
if destination_peer_id:
|
||||
break
|
||||
except EOFError:
|
||||
await trio.sleep(5)
|
||||
else:
|
||||
print("No terminal detected. Waiting for destination peer ID as command line argument.")
|
||||
await trio.sleep(10)
|
||||
continue
|
||||
|
||||
print(f"Attempting to connect to {destination_peer_id} via relay")
|
||||
|
||||
# Check if we have any discovered relays
|
||||
discovered_relays = list(transport.discovery._discovered_relays.keys())
|
||||
print(f"Discovered relays: {discovered_relays}")
|
||||
|
||||
try:
|
||||
# Create a circuit relay multiaddr for the destination
|
||||
dest_id = ID.from_base58(destination_peer_id)
|
||||
|
||||
# Create a circuit multiaddr that includes the relay
|
||||
# Format: /ip4/127.0.0.1/tcp/9000/p2p/RELAY_ID/p2p-circuit/p2p/DEST_ID
|
||||
circuit_addr = multiaddr.Multiaddr(f"{relay_addr_str}/p2p-circuit/p2p/{destination_peer_id}")
|
||||
print(f"Created circuit address: {circuit_addr}")
|
||||
|
||||
# Dial using the circuit address
|
||||
connection = await transport.dial(circuit_addr)
|
||||
print("Connection established through relay!")
|
||||
|
||||
# Open a stream using the echo protocol
|
||||
stream = await connection.new_stream("/echo/1.0.0")
|
||||
|
||||
# Send messages periodically
|
||||
for i in range(5):
|
||||
message = f"Hello from source, message {i+1}"
|
||||
print(f"Sending: {message}")
|
||||
|
||||
await stream.write(message.encode('utf-8'))
|
||||
response = await stream.read(1024)
|
||||
|
||||
print(f"Received: {response.decode('utf-8')}")
|
||||
await trio.sleep(1)
|
||||
|
||||
# Close the stream
|
||||
await stream.close()
|
||||
print("Stream closed")
|
||||
except Exception as e:
|
||||
print(f"Error connecting through relay: {e}")
|
||||
print("Detailed error:")
|
||||
traceback.print_exc()
|
||||
|
||||
# Keep the node running for a while
|
||||
await trio.sleep(30)
|
||||
print("Source node shutting down")
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error: {e}")
|
||||
traceback.print_exc()
|
||||
|
||||
if __name__ == "__main__":
|
||||
relay_id = None
|
||||
dest_id = None
|
||||
|
||||
# Parse command line arguments if provided
|
||||
if len(sys.argv) > 1:
|
||||
relay_id = sys.argv[1]
|
||||
print(f"Using provided relay ID: {relay_id}")
|
||||
|
||||
if len(sys.argv) > 2:
|
||||
dest_id = sys.argv[2]
|
||||
print(f"Using provided destination ID: {dest_id}")
|
||||
|
||||
trio.run(run_source, relay_id, dest_id)
|
||||
|
||||
Running the Example
|
||||
-------------------
|
||||
|
||||
1. First, start the relay node:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ python relay_node.py
|
||||
Created relay protocol with hop enabled: True
|
||||
|
||||
==================================================
|
||||
Relay node started with ID: QmaUigQJ9nJERa6GaZuyfaiX91QjYwoQJ46JS3k7ys7SLx
|
||||
Relay node multiaddr: /ip4/127.0.0.1/tcp/9000/p2p/QmaUigQJ9nJERa6GaZuyfaiX91QjYwoQJ46JS3k7ys7SLx
|
||||
==================================================
|
||||
|
||||
Listening on: [<Multiaddr /ip4/0.0.0.0/tcp/9000/p2p/QmaUigQJ9nJERa6GaZuyfaiX91QjYwoQJ46JS3k7ys7SLx>]
|
||||
Protocol service started
|
||||
Relay service started successfully
|
||||
Relay limits: RelayLimits(duration=3600, data=10485760, max_circuit_conns=8, max_reservations=4)
|
||||
|
||||
Note the relay node\'s peer ID (in this example: `QmaUigQJ9nJERa6GaZuyfaiX91QjYwoQJ46JS3k7ys7SLx`). You\'ll need this for the other nodes.
|
||||
|
||||
2. Next, start the destination node:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ python destination_node.py
|
||||
Starting destination node...
|
||||
|
||||
==================================================
|
||||
Destination node started with ID: QmPBr38KeQG2ibyL4fxq6yJWpfoVNCqJMHBdNyn1Qe4h5s
|
||||
Use this ID in the source node: QmPBr38KeQG2ibyL4fxq6yJWpfoVNCqJMHBdNyn1Qe4h5s
|
||||
==================================================
|
||||
|
||||
Listening on: [<Multiaddr /ip4/0.0.0.0/tcp/9001/p2p/QmPBr38KeQG2ibyL4fxq6yJWpfoVNCqJMHBdNyn1Qe4h5s>]
|
||||
Registered echo protocol handler
|
||||
Protocol service started
|
||||
Transport created
|
||||
Created relay listener
|
||||
Destination node ready to accept relayed connections
|
||||
No relay peer ID provided. Please enter the relay\'s peer ID:
|
||||
Waiting for relay peer ID input...
|
||||
Enter relay peer ID: QmaUigQJ9nJERa6GaZuyfaiX91QjYwoQJ46JS3k7ys7SLx
|
||||
Connecting to relay at /ip4/127.0.0.1/tcp/9000/p2p/QmaUigQJ9nJERa6GaZuyfaiX91QjYwoQJ46JS3k7ys7SLx
|
||||
Connected to relay successfully
|
||||
Added relay QmaUigQJ9nJERa6GaZuyfaiX91QjYwoQJ46JS3k7ys7SLx to discovery
|
||||
Destination node still running...
|
||||
|
||||
Note the destination node's peer ID (in this example: `QmPBr38KeQG2ibyL4fxq6yJWpfoVNCqJMHBdNyn1Qe4h5s`). You'll need this for the source node.
|
||||
|
||||
3. Finally, start the source node:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ python source_node.py
|
||||
Source node started with ID: QmPyM56cgmFoHTgvMgGfDWRdVRQznmxCDDDg2dJ8ygVXj3
|
||||
Listening on: [<Multiaddr /ip4/0.0.0.0/tcp/9002/p2p/QmPyM56cgmFoHTgvMgGfDWRdVRQznmxCDDDg2dJ8ygVXj3>]
|
||||
Protocol service started
|
||||
No relay peer ID provided. Please enter the relay\'s peer ID:
|
||||
Enter relay peer ID: QmaUigQJ9nJERa6GaZuyfaiX91QjYwoQJ46JS3k7ys7SLx
|
||||
Connecting to relay at /ip4/127.0.0.1/tcp/9000/p2p/QmaUigQJ9nJERa6GaZuyfaiX91QjYwoQJ46JS3k7ys7SLx
|
||||
Connected to relay successfully
|
||||
Added relay QmaUigQJ9nJERa6GaZuyfaiX91QjYwoQJ46JS3k7ys7SLx to discovery
|
||||
Relay discovery started
|
||||
Relay discovery completed
|
||||
No destination peer ID provided. Please enter the destination\'s peer ID:
|
||||
Enter destination peer ID: QmPBr38KeQG2ibyL4fxq6yJWpfoVNCqJMHBdNyn1Qe4h5s
|
||||
Attempting to connect to QmPBr38KeQG2ibyL4fxq6yJWpfoVNCqJMHBdNyn1Qe4h5s via relay
|
||||
Discovered relays: [<libp2p.peer.id.ID (QmaUigQJ9nJERa6GaZuyfaiX91QjYwoQJ46JS3k7ys7SLx)>]
|
||||
Created circuit address: /ip4/127.0.0.1/tcp/9000/p2p/QmaUigQJ9nJERa6GaZuyfaiX91QjYwoQJ46JS3k7ys7SLx/p2p-circuit/p2p/QmPBr38KeQG2ibyL4fxq6yJWpfoVNCqJMHBdNyn1Qe4h5s
|
||||
|
||||
At this point, the source node will establish a connection through the relay to the destination node and start sending messages.
|
||||
|
||||
4. Alternatively, you can provide the peer IDs as command-line arguments:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# For the destination node (provide relay ID)
|
||||
$ python destination_node.py QmaUigQJ9nJERa6GaZuyfaiX91QjYwoQJ46JS3k7ys7SLx
|
||||
|
||||
# For the source node (provide both relay and destination IDs)
|
||||
$ python source_node.py QmaUigQJ9nJERa6GaZuyfaiX91QjYwoQJ46JS3k7ys7SLx QmPBr38KeQG2ibyL4fxq6yJWpfoVNCqJMHBdNyn1Qe4h5s
|
||||
|
||||
This example demonstrates how to use Circuit Relay v2 to establish connections between peers that cannot connect directly. The peer IDs are dynamically generated for each node, and the relay facilitates communication between the source and destination nodes.
|
||||
124
docs/examples.kademlia.rst
Normal file
124
docs/examples.kademlia.rst
Normal file
@ -0,0 +1,124 @@
|
||||
Kademlia DHT Demo
|
||||
=================
|
||||
|
||||
This example demonstrates a Kademlia Distributed Hash Table (DHT) implementation with both value storage/retrieval and content provider advertisement/discovery functionality.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ python -m pip install libp2p
|
||||
Collecting libp2p
|
||||
...
|
||||
Successfully installed libp2p-x.x.x
|
||||
$ cd examples/kademlia
|
||||
$ python kademlia.py --mode server
|
||||
2025-06-13 19:51:25,424 - kademlia-example - INFO - Running in server mode on port 0
|
||||
2025-06-13 19:51:25,426 - kademlia-example - INFO - Connected to bootstrap nodes: []
|
||||
2025-06-13 19:51:25,426 - kademlia-example - INFO - To connect to this node, use: --bootstrap /ip4/127.0.0.1/tcp/28910/p2p/16Uiu2HAm7EsNv5vvjPAehGAVfChjYjD63ZHyWogQRdzntSbAg9ef
|
||||
2025-06-13 19:51:25,426 - kademlia-example - INFO - Saved server address to log: /ip4/127.0.0.1/tcp/28910/p2p/16Uiu2HAm7EsNv5vvjPAehGAVfChjYjD63ZHyWogQRdzntSbAg9ef
|
||||
2025-06-13 19:51:25,427 - kademlia-example - INFO - DHT service started in SERVER mode
|
||||
2025-06-13 19:51:25,427 - kademlia-example - INFO - Stored value 'Hello message from Sumanjeet' with key: FVDjasarSFDoLPMdgnp1dHSbW2ZAfN8NU2zNbCQeczgP
|
||||
2025-06-13 19:51:25,427 - kademlia-example - INFO - Successfully advertised as server for content: 361f2ed1183bca491b8aec11f0b9e5c06724759b0f7480ae7fb4894901993bc8
|
||||
|
||||
|
||||
Copy the line that starts with ``--bootstrap``, open a new terminal in the same folder and run the client:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ python kademlia.py --mode client --bootstrap /ip4/127.0.0.1/tcp/28910/p2p/16Uiu2HAm7EsNv5vvjPAehGAVfChjYjD63ZHyWogQRdzntSbAg9ef
|
||||
2025-06-13 19:51:37,022 - kademlia-example - INFO - Running in client mode on port 0
|
||||
2025-06-13 19:51:37,026 - kademlia-example - INFO - Connected to bootstrap nodes: [<libp2p.peer.id.ID (16Uiu2HAm7EsNv5vvjPAehGAVfChjYjD63ZHyWogQRdzntSbAg9ef)>]
|
||||
2025-06-13 19:51:37,027 - kademlia-example - INFO - DHT service started in CLIENT mode
|
||||
2025-06-13 19:51:37,027 - kademlia-example - INFO - Looking up key: FVDjasarSFDoLPMdgnp1dHSbW2ZAfN8NU2zNbCQeczgP
|
||||
2025-06-13 19:51:37,031 - kademlia-example - INFO - Retrieved value: Hello message from Sumanjeet
|
||||
2025-06-13 19:51:37,031 - kademlia-example - INFO - Looking for servers of content: 361f2ed1183bca491b8aec11f0b9e5c06724759b0f7480ae7fb4894901993bc8
|
||||
2025-06-13 19:51:37,035 - kademlia-example - INFO - Found 1 servers for content: ['16Uiu2HAm7EsNv5vvjPAehGAVfChjYjD63ZHyWogQRdzntSbAg9ef']
|
||||
|
||||
Alternatively, if you run the server first, the client can automatically extract the bootstrap address from the server log file:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ python kademlia.py --mode client
|
||||
2025-06-13 19:51:37,022 - kademlia-example - INFO - Running in client mode on port 0
|
||||
2025-06-13 19:51:37,026 - kademlia-example - INFO - Connected to bootstrap nodes: [<libp2p.peer.id.ID (16Uiu2HAm7EsNv5vvjPAehGAVfChjYjD63ZHyWogQRdzntSbAg9ef)>]
|
||||
2025-06-13 19:51:37,027 - kademlia-example - INFO - DHT service started in CLIENT mode
|
||||
2025-06-13 19:51:37,027 - kademlia-example - INFO - Looking up key: FVDjasarSFDoLPMdgnp1dHSbW2ZAfN8NU2zNbCQeczgP
|
||||
2025-06-13 19:51:37,031 - kademlia-example - INFO - Retrieved value: Hello message from Sumanjeet
|
||||
2025-06-13 19:51:37,031 - kademlia-example - INFO - Looking for servers of content: 361f2ed1183bca491b8aec11f0b9e5c06724759b0f7480ae7fb4894901993bc8
|
||||
2025-06-13 19:51:37,035 - kademlia-example - INFO - Found 1 servers for content: ['16Uiu2HAm7EsNv5vvjPAehGAVfChjYjD63ZHyWogQRdzntSbAg9ef']
|
||||
|
||||
The demo showcases key DHT operations:
|
||||
|
||||
- **Value Storage & Retrieval**: The server stores a value, and the client retrieves it
|
||||
- **Content Provider Discovery**: The server advertises content, and the client finds providers
|
||||
- **Peer Discovery**: Automatic bootstrap and peer routing using the Kademlia algorithm
|
||||
- **Network Resilience**: Distributed storage across multiple nodes (when available)
|
||||
|
||||
Command Line Options
|
||||
--------------------
|
||||
|
||||
The Kademlia demo supports several command line options for customization:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ python kademlia.py --help
|
||||
usage: kademlia.py [-h] [--mode MODE] [--port PORT] [--bootstrap [BOOTSTRAP ...]] [--verbose]
|
||||
|
||||
Kademlia DHT example with content server functionality
|
||||
|
||||
options:
|
||||
-h, --help show this help message and exit
|
||||
--mode MODE Run as a server or client node (default: server)
|
||||
--port PORT Port to listen on (0 for random) (default: 0)
|
||||
--bootstrap [BOOTSTRAP ...]
|
||||
Multiaddrs of bootstrap nodes. Provide a space-separated list of addresses.
|
||||
This is required for client mode.
|
||||
--verbose Enable verbose logging
|
||||
|
||||
**Examples:**
|
||||
|
||||
Start server on a specific port:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ python kademlia.py --mode server --port 8000
|
||||
|
||||
Start client with verbose logging:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ python kademlia.py --mode client --verbose
|
||||
|
||||
Connect to multiple bootstrap nodes:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ python kademlia.py --mode client --bootstrap /ip4/127.0.0.1/tcp/8000/p2p/... /ip4/127.0.0.1/tcp/8001/p2p/...
|
||||
|
||||
How It Works
|
||||
------------
|
||||
|
||||
The Kademlia DHT implementation demonstrates several key concepts:
|
||||
|
||||
**Server Mode:**
|
||||
- Stores key-value pairs in the distributed hash table
|
||||
- Advertises itself as a content provider for specific content
|
||||
- Handles incoming DHT requests from other nodes
|
||||
- Maintains routing table with known peers
|
||||
|
||||
**Client Mode:**
|
||||
- Connects to bootstrap nodes to join the network
|
||||
- Retrieves values by their keys from the DHT
|
||||
- Discovers content providers for specific content
|
||||
- Performs network lookups using the Kademlia algorithm
|
||||
|
||||
**Key Components:**
|
||||
- **Routing Table**: Organizes peers in k-buckets based on XOR distance
|
||||
- **Value Store**: Manages key-value storage with TTL (time-to-live)
|
||||
- **Provider Store**: Tracks which peers provide specific content
|
||||
- **Peer Routing**: Implements iterative lookups to find closest peers
|
||||
|
||||
The full source code for this example is below:
|
||||
|
||||
.. literalinclude:: ../examples/kademlia/kademlia.py
|
||||
:language: python
|
||||
:linenos:
|
||||
64
docs/examples.mDNS.rst
Normal file
64
docs/examples.mDNS.rst
Normal file
@ -0,0 +1,64 @@
|
||||
mDNS Peer Discovery Example
|
||||
===========================
|
||||
|
||||
This example demonstrates how to use mDNS (Multicast DNS) for peer discovery in py-libp2p.
|
||||
|
||||
Prerequisites
|
||||
-------------
|
||||
|
||||
First, ensure you have py-libp2p installed and your environment is activated:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ python -m pip install libp2p
|
||||
|
||||
Running the Example
|
||||
-------------------
|
||||
|
||||
The mDNS demo script allows you to discover peers on your local network using mDNS. To start a peer, run:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ mdns-demo
|
||||
|
||||
You should see output similar to:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
Run this from another console to start another peer on a different port:
|
||||
|
||||
python mdns-demo -p <ANOTHER_PORT>
|
||||
|
||||
Waiting for mDNS peer discovery events...
|
||||
|
||||
2025-06-20 23:28:12,052 - libp2p.example.discovery.mdns - INFO - Starting peer Discovery
|
||||
|
||||
To discover peers, open another terminal and run the same command with a different port:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ python mdns-demo -p 9001
|
||||
|
||||
You should see output indicating that a new peer has been discovered:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
Run this from the same folder in another console to start another peer on a different port:
|
||||
|
||||
python mdns-demo -p <ANOTHER_PORT>
|
||||
|
||||
Waiting for mDNS peer discovery events...
|
||||
|
||||
2025-06-20 23:43:43,786 - libp2p.example.discovery.mdns - INFO - Starting peer Discovery
|
||||
2025-06-20 23:43:43,790 - libp2p.example.discovery.mdns - INFO - Discovered: 16Uiu2HAmGxy5NdQEjZWtrYUMrzdp3Syvg7MB2E5Lx8weA9DanYxj
|
||||
|
||||
When a new peer is discovered, its peer ID will be printed in the console output.
|
||||
|
||||
How it Works
|
||||
------------
|
||||
|
||||
- Each node advertises itself on the local network using mDNS.
|
||||
- When a new peer is discovered, the handler prints its peer ID.
|
||||
- This is useful for local peer discovery without requiring a DHT or bootstrap nodes.
|
||||
|
||||
You can modify the script to perform additional actions when peers are discovered, such as opening streams or exchanging messages.
|
||||
194
docs/examples.multiple_connections.rst
Normal file
194
docs/examples.multiple_connections.rst
Normal file
@ -0,0 +1,194 @@
|
||||
Multiple Connections Per Peer
|
||||
=============================
|
||||
|
||||
This example demonstrates how to use the multiple connections per peer feature in py-libp2p.
|
||||
|
||||
Overview
|
||||
--------
|
||||
|
||||
The multiple connections per peer feature allows a libp2p node to maintain multiple network connections to the same peer. This provides several benefits:
|
||||
|
||||
- **Improved reliability**: If one connection fails, others remain available
|
||||
- **Better performance**: Load can be distributed across multiple connections
|
||||
- **Enhanced throughput**: Multiple streams can be created in parallel
|
||||
- **Fault tolerance**: Redundant connections provide backup paths
|
||||
|
||||
Configuration
|
||||
-------------
|
||||
|
||||
The feature is configured through the `ConnectionConfig` class:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from libp2p.network.swarm import ConnectionConfig
|
||||
|
||||
# Default configuration
|
||||
config = ConnectionConfig()
|
||||
print(f"Max connections per peer: {config.max_connections_per_peer}")
|
||||
print(f"Load balancing strategy: {config.load_balancing_strategy}")
|
||||
|
||||
# Custom configuration
|
||||
custom_config = ConnectionConfig(
|
||||
max_connections_per_peer=5,
|
||||
connection_timeout=60.0,
|
||||
load_balancing_strategy="least_loaded"
|
||||
)
|
||||
|
||||
Load Balancing Strategies
|
||||
-------------------------
|
||||
|
||||
Two load balancing strategies are available:
|
||||
|
||||
**Round Robin** (default)
|
||||
Cycles through connections in order, distributing load evenly.
|
||||
|
||||
**Least Loaded**
|
||||
Selects the connection with the fewest active streams.
|
||||
|
||||
API Usage
|
||||
---------
|
||||
|
||||
The new API provides direct access to multiple connections:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from libp2p import new_swarm
|
||||
|
||||
# Create swarm with multiple connections support
|
||||
swarm = new_swarm()
|
||||
|
||||
# Dial a peer - returns list of connections
|
||||
connections = await swarm.dial_peer(peer_id)
|
||||
print(f"Established {len(connections)} connections")
|
||||
|
||||
# Get all connections to a peer
|
||||
peer_connections = swarm.get_connections(peer_id)
|
||||
|
||||
# Get all connections (across all peers)
|
||||
all_connections = swarm.get_connections()
|
||||
|
||||
# Get the complete connections map
|
||||
connections_map = swarm.get_connections_map()
|
||||
|
||||
# Backward compatibility - get single connection
|
||||
single_conn = swarm.get_connection(peer_id)
|
||||
|
||||
Backward Compatibility
|
||||
----------------------
|
||||
|
||||
Existing code continues to work through backward compatibility features:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# Legacy 1:1 mapping (returns first connection for each peer)
|
||||
legacy_connections = swarm.connections_legacy
|
||||
|
||||
# Single connection access (returns first available connection)
|
||||
conn = swarm.get_connection(peer_id)
|
||||
|
||||
Example
|
||||
-------
|
||||
|
||||
A complete working example is available in the `examples/doc-examples/multiple_connections_example.py` file.
|
||||
|
||||
Production Configuration
|
||||
-------------------------
|
||||
|
||||
For production use, consider these settings:
|
||||
|
||||
**RetryConfig Parameters**
|
||||
|
||||
The `RetryConfig` class controls connection retry behavior with exponential backoff:
|
||||
|
||||
- **max_retries**: Maximum number of retry attempts before giving up (default: 3)
|
||||
- **initial_delay**: Initial delay in seconds before the first retry (default: 0.1s)
|
||||
- **max_delay**: Maximum delay cap to prevent excessive wait times (default: 30.0s)
|
||||
- **backoff_multiplier**: Exponential backoff multiplier - each retry multiplies delay by this factor (default: 2.0)
|
||||
- **jitter_factor**: Random jitter (0.0-1.0) to prevent synchronized retries (default: 0.1)
|
||||
|
||||
**ConnectionConfig Parameters**
|
||||
|
||||
The `ConnectionConfig` class manages multi-connection behavior:
|
||||
|
||||
- **max_connections_per_peer**: Maximum connections allowed to a single peer (default: 3)
|
||||
- **connection_timeout**: Timeout for establishing new connections in seconds (default: 30.0s)
|
||||
- **load_balancing_strategy**: Strategy for distributing streams ("round_robin" or "least_loaded")
|
||||
|
||||
**Load Balancing Strategies Explained**
|
||||
|
||||
- **round_robin**: Cycles through connections in order, distributing load evenly. Simple and predictable.
|
||||
- **least_loaded**: Selects the connection with the fewest active streams. Better for performance but more complex.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from libp2p.network.swarm import ConnectionConfig, RetryConfig
|
||||
|
||||
# Production-ready configuration
|
||||
retry_config = RetryConfig(
|
||||
max_retries=3, # Maximum retry attempts before giving up
|
||||
initial_delay=0.1, # Start with 100ms delay
|
||||
max_delay=30.0, # Cap exponential backoff at 30 seconds
|
||||
backoff_multiplier=2.0, # Double delay each retry (100ms -> 200ms -> 400ms)
|
||||
jitter_factor=0.1 # Add 10% random jitter to prevent thundering herd
|
||||
)
|
||||
|
||||
connection_config = ConnectionConfig(
|
||||
max_connections_per_peer=3, # Allow up to 3 connections per peer
|
||||
connection_timeout=30.0, # 30 second timeout for new connections
|
||||
load_balancing_strategy="round_robin" # Simple, predictable load distribution
|
||||
)
|
||||
|
||||
swarm = new_swarm(
|
||||
retry_config=retry_config,
|
||||
connection_config=connection_config
|
||||
)
|
||||
|
||||
**How RetryConfig Works in Practice**
|
||||
|
||||
With the configuration above, connection retries follow this pattern:
|
||||
|
||||
1. **Attempt 1**: Immediate connection attempt
|
||||
2. **Attempt 2**: Wait 100ms ± 10ms jitter, then retry
|
||||
3. **Attempt 3**: Wait 200ms ± 20ms jitter, then retry
|
||||
4. **Attempt 4**: Wait 400ms ± 40ms jitter, then retry
|
||||
5. **Attempt 5**: Wait 800ms ± 80ms jitter, then retry
|
||||
6. **Attempt 6**: Wait 1.6s ± 160ms jitter, then retry
|
||||
7. **Attempt 7**: Wait 3.2s ± 320ms jitter, then retry
|
||||
8. **Attempt 8**: Wait 6.4s ± 640ms jitter, then retry
|
||||
9. **Attempt 9**: Wait 12.8s ± 1.28s jitter, then retry
|
||||
10. **Attempt 10**: Wait 25.6s ± 2.56s jitter, then retry
|
||||
11. **Attempt 11**: Wait 30.0s (capped) ± 3.0s jitter, then retry
|
||||
12. **Attempt 12**: Wait 30.0s (capped) ± 3.0s jitter, then retry
|
||||
13. **Give up**: After 12 retries (3 initial + 9 retries), connection fails
|
||||
|
||||
The jitter prevents multiple clients from retrying simultaneously, reducing server load.
|
||||
|
||||
**Parameter Tuning Guidelines**
|
||||
|
||||
**For Development/Testing:**
|
||||
- Use lower `max_retries` (1-2) and shorter delays for faster feedback
|
||||
- Example: `RetryConfig(max_retries=2, initial_delay=0.01, max_delay=0.1)`
|
||||
|
||||
**For Production:**
|
||||
- Use moderate `max_retries` (3-5) with reasonable delays for reliability
|
||||
- Example: `RetryConfig(max_retries=5, initial_delay=0.1, max_delay=60.0)`
|
||||
|
||||
**For High-Latency Networks:**
|
||||
- Use higher `max_retries` (5-10) with longer delays
|
||||
- Example: `RetryConfig(max_retries=8, initial_delay=0.5, max_delay=120.0)`
|
||||
|
||||
**For Load Balancing:**
|
||||
- Use `round_robin` for simple, predictable behavior
|
||||
- Use `least_loaded` when you need optimal performance and can handle complexity
|
||||
|
||||
Architecture
|
||||
------------
|
||||
|
||||
The implementation follows the same architectural patterns as the Go and JavaScript reference implementations:
|
||||
|
||||
- **Core data structure**: `dict[ID, list[INetConn]]` for 1:many mapping
|
||||
- **API consistency**: Methods like `get_connections()` match reference implementations
|
||||
- **Load balancing**: Integrated at the API level for optimal performance
|
||||
- **Backward compatibility**: Maintains existing interfaces for gradual migration
|
||||
|
||||
This design ensures consistency across libp2p implementations while providing the benefits of multiple connections per peer.
|
||||
131
docs/examples.random_walk.rst
Normal file
131
docs/examples.random_walk.rst
Normal file
@ -0,0 +1,131 @@
|
||||
Random Walk Example
|
||||
===================
|
||||
|
||||
This example demonstrates the Random Walk module's peer discovery capabilities using real libp2p hosts and Kademlia DHT.
|
||||
It shows how the Random Walk module automatically discovers new peers and maintains routing table health.
|
||||
|
||||
The Random Walk implementation performs the following key operations:
|
||||
|
||||
* **Automatic Peer Discovery**: Generates random peer IDs and queries the DHT network to discover new peers
|
||||
* **Routing Table Maintenance**: Periodically refreshes the routing table to maintain network connectivity
|
||||
* **Connection Management**: Maintains optimal connections to healthy peers in the network
|
||||
* **Real-time Statistics**: Displays routing table size, connected peers, and peerstore statistics
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ python -m pip install libp2p
|
||||
Collecting libp2p
|
||||
...
|
||||
Successfully installed libp2p-x.x.x
|
||||
$ cd examples/random_walk
|
||||
$ python random_walk.py --mode server
|
||||
2025-08-12 19:51:25,424 - random-walk-example - INFO - === Random Walk Example for py-libp2p ===
|
||||
2025-08-12 19:51:25,424 - random-walk-example - INFO - Mode: server, Port: 0 Demo interval: 30s
|
||||
2025-08-12 19:51:25,426 - random-walk-example - INFO - Starting server node on port 45123
|
||||
2025-08-12 19:51:25,426 - random-walk-example - INFO - Node peer ID: 16Uiu2HAm7EsNv5vvjPAehGAVfChjYjD63ZHyWogQRdzntSbAg9ef
|
||||
2025-08-12 19:51:25,426 - random-walk-example - INFO - Node address: /ip4/0.0.0.0/tcp/45123/p2p/16Uiu2HAm7EsNv5vvjPAehGAVfChjYjD63ZHyWogQRdzntSbAg9ef
|
||||
2025-08-12 19:51:25,427 - random-walk-example - INFO - Initial routing table size: 0
|
||||
2025-08-12 19:51:25,427 - random-walk-example - INFO - DHT service started in SERVER mode
|
||||
2025-08-12 19:51:25,430 - libp2p.discovery.random_walk.rt_refresh_manager - INFO - RT Refresh Manager started
|
||||
2025-08-12 19:51:55,432 - random-walk-example - INFO - --- Iteration 1 ---
|
||||
2025-08-12 19:51:55,432 - random-walk-example - INFO - Routing table size: 15
|
||||
2025-08-12 19:51:55,432 - random-walk-example - INFO - Connected peers: 8
|
||||
2025-08-12 19:51:55,432 - random-walk-example - INFO - Peerstore size: 42
|
||||
|
||||
You can also run the example in client mode:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ python random_walk.py --mode client
|
||||
2025-08-12 19:52:15,424 - random-walk-example - INFO - === Random Walk Example for py-libp2p ===
|
||||
2025-08-12 19:52:15,424 - random-walk-example - INFO - Mode: client, Port: 0 Demo interval: 30s
|
||||
2025-08-12 19:52:15,426 - random-walk-example - INFO - Starting client node on port 51234
|
||||
2025-08-12 19:52:15,426 - random-walk-example - INFO - Node peer ID: 16Uiu2HAmAbc123xyz...
|
||||
2025-08-12 19:52:15,427 - random-walk-example - INFO - DHT service started in CLIENT mode
|
||||
2025-08-12 19:52:45,432 - random-walk-example - INFO - --- Iteration 1 ---
|
||||
2025-08-12 19:52:45,432 - random-walk-example - INFO - Routing table size: 8
|
||||
2025-08-12 19:52:45,432 - random-walk-example - INFO - Connected peers: 5
|
||||
2025-08-12 19:52:45,432 - random-walk-example - INFO - Peerstore size: 25
|
||||
|
||||
Command Line Options
|
||||
--------------------
|
||||
|
||||
The example supports several command-line options:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ python random_walk.py --help
|
||||
usage: random_walk.py [-h] [--mode {server,client}] [--port PORT]
|
||||
[--demo-interval DEMO_INTERVAL] [--verbose]
|
||||
|
||||
Random Walk Example for py-libp2p Kademlia DHT
|
||||
|
||||
optional arguments:
|
||||
-h, --help show this help message and exit
|
||||
--mode {server,client}
|
||||
Node mode: server (DHT server), or client (DHT client)
|
||||
--port PORT Port to listen on (0 for random)
|
||||
--demo-interval DEMO_INTERVAL
|
||||
Interval between random walk demonstrations in seconds
|
||||
--verbose Enable verbose logging
|
||||
|
||||
Key Features Demonstrated
|
||||
-------------------------
|
||||
|
||||
**Automatic Random Walk Discovery**
|
||||
The example shows how the Random Walk module automatically:
|
||||
|
||||
* Generates random 256-bit peer IDs for discovery queries
|
||||
* Performs concurrent random walks to maximize peer discovery
|
||||
* Validates discovered peers and adds them to the routing table
|
||||
* Maintains routing table health through periodic refreshes
|
||||
|
||||
**Real-time Network Statistics**
|
||||
The example displays live statistics every 30 seconds (configurable):
|
||||
|
||||
* **Routing Table Size**: Number of peers in the Kademlia routing table
|
||||
* **Connected Peers**: Number of actively connected peers
|
||||
* **Peerstore Size**: Total number of known peers with addresses
|
||||
|
||||
**Connection Management**
|
||||
The example includes sophisticated connection management:
|
||||
|
||||
* Automatically maintains connections to healthy peers
|
||||
* Filters for compatible peers (TCP + IPv4 addresses)
|
||||
* Reconnects to maintain optimal network connectivity
|
||||
* Handles connection failures gracefully
|
||||
|
||||
**DHT Integration**
|
||||
Shows seamless integration between Random Walk and Kademlia DHT:
|
||||
|
||||
* RT Refresh Manager coordinates with the DHT routing table
|
||||
* Peer discovery feeds directly into DHT operations
|
||||
* Both SERVER and CLIENT modes supported
|
||||
* Bootstrap connectivity to public IPFS nodes
|
||||
|
||||
Understanding the Output
|
||||
------------------------
|
||||
|
||||
When you run the example, you'll see periodic statistics that show how the Random Walk module is working:
|
||||
|
||||
* **Initial Phase**: Routing table starts empty and quickly discovers peers
|
||||
* **Growth Phase**: Routing table size increases as more peers are discovered
|
||||
* **Maintenance Phase**: Routing table size stabilizes as the system maintains optimal peer connections
|
||||
|
||||
The Random Walk module runs automatically in the background, performing peer discovery queries every few minutes to ensure the routing table remains populated with fresh, reachable peers.
|
||||
|
||||
Configuration
|
||||
-------------
|
||||
|
||||
The Random Walk module can be configured through the following parameters in ``libp2p.discovery.random_walk.config``:
|
||||
|
||||
* ``RANDOM_WALK_ENABLED``: Enable/disable automatic random walks (default: True)
|
||||
* ``REFRESH_INTERVAL``: Time between automatic refreshes in seconds (default: 300)
|
||||
* ``RANDOM_WALK_CONCURRENCY``: Number of concurrent random walks (default: 3)
|
||||
* ``MIN_RT_REFRESH_THRESHOLD``: Minimum routing table size before triggering refresh (default: 4)
|
||||
|
||||
See Also
|
||||
--------
|
||||
|
||||
* :doc:`examples.kademlia` - Kademlia DHT value storage and content routing
|
||||
* :doc:`libp2p.discovery.random_walk` - Random Walk module API documentation
|
||||
@ -11,3 +11,8 @@ Examples
|
||||
examples.echo
|
||||
examples.ping
|
||||
examples.pubsub
|
||||
examples.circuit_relay
|
||||
examples.kademlia
|
||||
examples.mDNS
|
||||
examples.random_walk
|
||||
examples.multiple_connections
|
||||
|
||||
@ -12,10 +12,6 @@ The Python implementation of the libp2p networking stack
|
||||
getting_started
|
||||
release_notes
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
:caption: Community
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
:caption: py-libp2p
|
||||
|
||||
13
docs/libp2p.discovery.bootstrap.rst
Normal file
13
docs/libp2p.discovery.bootstrap.rst
Normal file
@ -0,0 +1,13 @@
|
||||
libp2p.discovery.bootstrap package
|
||||
==================================
|
||||
|
||||
Submodules
|
||||
----------
|
||||
|
||||
Module contents
|
||||
---------------
|
||||
|
||||
.. automodule:: libp2p.discovery.bootstrap
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
21
docs/libp2p.discovery.events.rst
Normal file
21
docs/libp2p.discovery.events.rst
Normal file
@ -0,0 +1,21 @@
|
||||
libp2p.discovery.events package
|
||||
===============================
|
||||
|
||||
Submodules
|
||||
----------
|
||||
|
||||
libp2p.discovery.events.peerDiscovery module
|
||||
--------------------------------------------
|
||||
|
||||
.. automodule:: libp2p.discovery.events.peerDiscovery
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
|
||||
Module contents
|
||||
---------------
|
||||
|
||||
.. automodule:: libp2p.discovery.events
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
45
docs/libp2p.discovery.mdns.rst
Normal file
45
docs/libp2p.discovery.mdns.rst
Normal file
@ -0,0 +1,45 @@
|
||||
libp2p.discovery.mdns package
|
||||
=============================
|
||||
|
||||
Submodules
|
||||
----------
|
||||
|
||||
libp2p.discovery.mdns.broadcaster module
|
||||
----------------------------------------
|
||||
|
||||
.. automodule:: libp2p.discovery.mdns.broadcaster
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
|
||||
libp2p.discovery.mdns.listener module
|
||||
-------------------------------------
|
||||
|
||||
.. automodule:: libp2p.discovery.mdns.listener
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
|
||||
libp2p.discovery.mdns.mdns module
|
||||
---------------------------------
|
||||
|
||||
.. automodule:: libp2p.discovery.mdns.mdns
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
|
||||
libp2p.discovery.mdns.utils module
|
||||
----------------------------------
|
||||
|
||||
.. automodule:: libp2p.discovery.mdns.utils
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
|
||||
Module contents
|
||||
---------------
|
||||
|
||||
.. automodule:: libp2p.discovery.mdns
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
48
docs/libp2p.discovery.random_walk.rst
Normal file
48
docs/libp2p.discovery.random_walk.rst
Normal file
@ -0,0 +1,48 @@
|
||||
libp2p.discovery.random_walk package
|
||||
====================================
|
||||
|
||||
The Random Walk module implements a peer discovery mechanism.
|
||||
It performs random walks through the DHT network to discover new peers and maintain routing table health through periodic refreshes.
|
||||
|
||||
Submodules
|
||||
----------
|
||||
|
||||
libp2p.discovery.random_walk.config module
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. automodule:: libp2p.discovery.random_walk.config
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
|
||||
libp2p.discovery.random_walk.exceptions module
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. automodule:: libp2p.discovery.random_walk.exceptions
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
|
||||
libp2p.discovery.random_walk.random_walk module
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. automodule:: libp2p.discovery.random_walk.random_walk
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
|
||||
libp2p.discovery.random_walk.rt_refresh_manager module
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. automodule:: libp2p.discovery.random_walk.rt_refresh_manager
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
|
||||
Module contents
|
||||
---------------
|
||||
|
||||
.. automodule:: libp2p.discovery.random_walk
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
24
docs/libp2p.discovery.rst
Normal file
24
docs/libp2p.discovery.rst
Normal file
@ -0,0 +1,24 @@
|
||||
libp2p.discovery package
|
||||
========================
|
||||
|
||||
Subpackages
|
||||
-----------
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 4
|
||||
|
||||
libp2p.discovery.bootstrap
|
||||
libp2p.discovery.events
|
||||
libp2p.discovery.mdns
|
||||
libp2p.discovery.random_walk
|
||||
|
||||
Submodules
|
||||
----------
|
||||
|
||||
Module contents
|
||||
---------------
|
||||
|
||||
.. automodule:: libp2p.discovery
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
22
docs/libp2p.kad_dht.pb.rst
Normal file
22
docs/libp2p.kad_dht.pb.rst
Normal file
@ -0,0 +1,22 @@
|
||||
libp2p.kad\_dht.pb package
|
||||
==========================
|
||||
|
||||
Submodules
|
||||
----------
|
||||
|
||||
libp2p.kad_dht.pb.kademlia_pb2 module
|
||||
-------------------------------------
|
||||
|
||||
.. automodule:: libp2p.kad_dht.pb.kademlia_pb2
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
|
||||
Module contents
|
||||
---------------
|
||||
|
||||
.. automodule:: libp2p.kad_dht.pb
|
||||
:no-index:
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
77
docs/libp2p.kad_dht.rst
Normal file
77
docs/libp2p.kad_dht.rst
Normal file
@ -0,0 +1,77 @@
|
||||
libp2p.kad\_dht package
|
||||
=======================
|
||||
|
||||
Subpackages
|
||||
-----------
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 4
|
||||
|
||||
libp2p.kad_dht.pb
|
||||
|
||||
Submodules
|
||||
----------
|
||||
|
||||
libp2p.kad\_dht.kad\_dht module
|
||||
-------------------------------
|
||||
|
||||
.. automodule:: libp2p.kad_dht.kad_dht
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
|
||||
libp2p.kad\_dht.peer\_routing module
|
||||
------------------------------------
|
||||
|
||||
.. automodule:: libp2p.kad_dht.peer_routing
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
|
||||
libp2p.kad\_dht.provider\_store module
|
||||
--------------------------------------
|
||||
|
||||
.. automodule:: libp2p.kad_dht.provider_store
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
|
||||
libp2p.kad\_dht.routing\_table module
|
||||
-------------------------------------
|
||||
|
||||
.. automodule:: libp2p.kad_dht.routing_table
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
|
||||
libp2p.kad\_dht.utils module
|
||||
----------------------------
|
||||
|
||||
.. automodule:: libp2p.kad_dht.utils
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
|
||||
libp2p.kad\_dht.value\_store module
|
||||
-----------------------------------
|
||||
|
||||
.. automodule:: libp2p.kad_dht.value_store
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
|
||||
libp2p.kad\_dht.pb
|
||||
------------------
|
||||
|
||||
.. automodule:: libp2p.kad_dht.pb
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
|
||||
Module contents
|
||||
---------------
|
||||
|
||||
.. automodule:: libp2p.kad_dht
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
22
docs/libp2p.relay.circuit_v2.pb.rst
Normal file
22
docs/libp2p.relay.circuit_v2.pb.rst
Normal file
@ -0,0 +1,22 @@
|
||||
libp2p.relay.circuit_v2.pb package
|
||||
==================================
|
||||
|
||||
Submodules
|
||||
----------
|
||||
|
||||
libp2p.relay.circuit_v2.pb.circuit_pb2 module
|
||||
---------------------------------------------
|
||||
|
||||
.. automodule:: libp2p.relay.circuit_v2.pb.circuit_pb2
|
||||
:members:
|
||||
:show-inheritance:
|
||||
:undoc-members:
|
||||
|
||||
Module contents
|
||||
---------------
|
||||
|
||||
.. automodule:: libp2p.relay.circuit_v2.pb
|
||||
:members:
|
||||
:show-inheritance:
|
||||
:undoc-members:
|
||||
:no-index:
|
||||
70
docs/libp2p.relay.circuit_v2.rst
Normal file
70
docs/libp2p.relay.circuit_v2.rst
Normal file
@ -0,0 +1,70 @@
|
||||
libp2p.relay.circuit_v2 package
|
||||
===============================
|
||||
|
||||
Subpackages
|
||||
-----------
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 4
|
||||
|
||||
libp2p.relay.circuit_v2.pb
|
||||
|
||||
Submodules
|
||||
----------
|
||||
|
||||
libp2p.relay.circuit_v2.protocol module
|
||||
---------------------------------------
|
||||
|
||||
.. automodule:: libp2p.relay.circuit_v2.protocol
|
||||
:members:
|
||||
:show-inheritance:
|
||||
:undoc-members:
|
||||
|
||||
libp2p.relay.circuit_v2.transport module
|
||||
----------------------------------------
|
||||
|
||||
.. automodule:: libp2p.relay.circuit_v2.transport
|
||||
:members:
|
||||
:show-inheritance:
|
||||
:undoc-members:
|
||||
|
||||
libp2p.relay.circuit_v2.discovery module
|
||||
----------------------------------------
|
||||
|
||||
.. automodule:: libp2p.relay.circuit_v2.discovery
|
||||
:members:
|
||||
:show-inheritance:
|
||||
:undoc-members:
|
||||
|
||||
libp2p.relay.circuit_v2.resources module
|
||||
----------------------------------------
|
||||
|
||||
.. automodule:: libp2p.relay.circuit_v2.resources
|
||||
:members:
|
||||
:show-inheritance:
|
||||
:undoc-members:
|
||||
|
||||
libp2p.relay.circuit_v2.config module
|
||||
-------------------------------------
|
||||
|
||||
.. automodule:: libp2p.relay.circuit_v2.config
|
||||
:members:
|
||||
:show-inheritance:
|
||||
:undoc-members:
|
||||
|
||||
libp2p.relay.circuit_v2.protocol_buffer module
|
||||
----------------------------------------------
|
||||
|
||||
.. automodule:: libp2p.relay.circuit_v2.protocol_buffer
|
||||
:members:
|
||||
:show-inheritance:
|
||||
:undoc-members:
|
||||
|
||||
Module contents
|
||||
---------------
|
||||
|
||||
.. automodule:: libp2p.relay.circuit_v2
|
||||
:members:
|
||||
:show-inheritance:
|
||||
:undoc-members:
|
||||
:no-index:
|
||||
19
docs/libp2p.relay.rst
Normal file
19
docs/libp2p.relay.rst
Normal file
@ -0,0 +1,19 @@
|
||||
libp2p.relay package
|
||||
====================
|
||||
|
||||
Subpackages
|
||||
-----------
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 4
|
||||
|
||||
libp2p.relay.circuit_v2
|
||||
|
||||
Module contents
|
||||
---------------
|
||||
|
||||
.. automodule:: libp2p.relay
|
||||
:members:
|
||||
:show-inheritance:
|
||||
:undoc-members:
|
||||
:no-index:
|
||||
@ -8,13 +8,16 @@ Subpackages
|
||||
:maxdepth: 4
|
||||
|
||||
libp2p.crypto
|
||||
libp2p.discovery
|
||||
libp2p.host
|
||||
libp2p.identity
|
||||
libp2p.io
|
||||
libp2p.kad_dht
|
||||
libp2p.network
|
||||
libp2p.peer
|
||||
libp2p.protocol_muxer
|
||||
libp2p.pubsub
|
||||
libp2p.relay
|
||||
libp2p.security
|
||||
libp2p.stream_muxer
|
||||
libp2p.tools
|
||||
|
||||
@ -3,6 +3,110 @@ Release Notes
|
||||
|
||||
.. towncrier release notes start
|
||||
|
||||
py-libp2p v0.2.9 (2025-07-09)
|
||||
-----------------------------
|
||||
|
||||
Breaking Changes
|
||||
~~~~~~~~~~~~~~~~
|
||||
|
||||
- Reordered the arguments to ``upgrade_security`` to place ``is_initiator`` before ``peer_id``, and made ``peer_id`` optional.
|
||||
This allows the method to reflect the fact that peer identity is not required for inbound connections. (`#681 <https://github.com/libp2p/py-libp2p/issues/681>`__)
|
||||
|
||||
|
||||
Bugfixes
|
||||
~~~~~~~~
|
||||
|
||||
- Add timeout wrappers in:
|
||||
1. ``multiselect.py``: ``negotiate`` function
|
||||
2. ``multiselect_client.py``: ``select_one_of`` , ``query_multistream_command`` functions
|
||||
to prevent indefinite hangs when a remote peer does not respond. (`#696 <https://github.com/libp2p/py-libp2p/issues/696>`__)
|
||||
- Align stream creation logic with yamux specification (`#701 <https://github.com/libp2p/py-libp2p/issues/701>`__)
|
||||
- Fixed an issue in ``Pubsub`` where async validators were not handled reliably under concurrency. Now uses a safe aggregator list for consistent behavior. (`#702 <https://github.com/libp2p/py-libp2p/issues/702>`__)
|
||||
|
||||
|
||||
Features
|
||||
~~~~~~~~
|
||||
|
||||
- Added support for ``Kademlia DHT`` in py-libp2p. (`#579 <https://github.com/libp2p/py-libp2p/issues/579>`__)
|
||||
- Limit concurrency in ``push_identify_to_peers`` to prevent resource congestion under high peer counts. (`#621 <https://github.com/libp2p/py-libp2p/issues/621>`__)
|
||||
- Store public key and peer ID in peerstore during handshake
|
||||
|
||||
Modified the InsecureTransport class to accept an optional peerstore parameter and updated the handshake process to store the received public key and peer ID in the peerstore when available.
|
||||
|
||||
Added test cases to verify:
|
||||
1. The peerstore remains unchanged when handshake fails due to peer ID mismatch
|
||||
2. The handshake correctly adds a public key to a peer ID that already exists in the peerstore but doesn't have a public key yet (`#631 <https://github.com/libp2p/py-libp2p/issues/631>`__)
|
||||
- Fixed several flow-control and concurrency issues in the ``YamuxStream`` class. Previously, stress-testing revealed that transferring data over ``DEFAULT_WINDOW_SIZE`` would break the stream due to inconsistent window update handling and lock management. The fixes include:
|
||||
|
||||
- Removed sending of window updates during writes to maintain correct flow-control.
|
||||
- Added proper timeout handling when releasing and acquiring locks to prevent concurrency errors.
|
||||
- Corrected the ``read`` function to properly handle window updates for both ``read_until_EOF`` and ``read_n_bytes``.
|
||||
- Added event logging at ``send_window_updates`` and ``waiting_for_window_updates`` for better observability. (`#639 <https://github.com/libp2p/py-libp2p/issues/639>`__)
|
||||
- Added support for ``Multicast DNS`` in py-libp2p (`#649 <https://github.com/libp2p/py-libp2p/issues/649>`__)
|
||||
- Optimized pubsub publishing to send multiple topics in a single message instead of separate messages per topic. (`#685 <https://github.com/libp2p/py-libp2p/issues/685>`__)
|
||||
- Optimized pubsub message writing by implementing a write_msg() method that uses pre-allocated buffers and single write operations, improving performance by eliminating separate varint prefix encoding and write operations in FloodSub and GossipSub. (`#687 <https://github.com/libp2p/py-libp2p/issues/687>`__)
|
||||
- Added peer exchange and backoff logic as part of Gossipsub v1.1 upgrade (`#690 <https://github.com/libp2p/py-libp2p/issues/690>`__)
|
||||
|
||||
|
||||
Internal Changes - for py-libp2p Contributors
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
- Added sparse connect utility function to pubsub test utilities for creating test networks with configurable connectivity. (`#679 <https://github.com/libp2p/py-libp2p/issues/679>`__)
|
||||
- Added comprehensive tests for pubsub connection utility functions to verify degree limits are enforced, excess peers are handled correctly, and edge cases (degree=0, negative values, empty lists) are managed gracefully. (`#707 <https://github.com/libp2p/py-libp2p/issues/707>`__)
|
||||
- Added extra tests for identify push concurrency cap under high peer load (`#708 <https://github.com/libp2p/py-libp2p/issues/708>`__)
|
||||
|
||||
|
||||
Miscellaneous Changes
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
- `#678 <https://github.com/libp2p/py-libp2p/issues/678>`__, `#684 <https://github.com/libp2p/py-libp2p/issues/684>`__
|
||||
|
||||
|
||||
py-libp2p v0.2.8 (2025-06-10)
|
||||
-----------------------------
|
||||
|
||||
Breaking Changes
|
||||
~~~~~~~~~~~~~~~~
|
||||
|
||||
- The `NetStream.state` property is now async and requires `await`. Update any direct state access to use `await stream.state`. (`#300 <https://github.com/libp2p/py-libp2p/issues/300>`__)
|
||||
|
||||
|
||||
Bugfixes
|
||||
~~~~~~~~
|
||||
|
||||
- Added proper state management and resource cleanup to `NetStream`, fixing memory leaks and improved error handling. (`#300 <https://github.com/libp2p/py-libp2p/issues/300>`__)
|
||||
|
||||
|
||||
Improved Documentation
|
||||
~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
- Updated examples to automatically use random port, when `-p` flag is not given (`#661 <https://github.com/libp2p/py-libp2p/issues/661>`__)
|
||||
|
||||
|
||||
Features
|
||||
~~~~~~~~
|
||||
|
||||
- Allow passing `listen_addrs` to `new_swarm` to customize swarm listening behavior. (`#616 <https://github.com/libp2p/py-libp2p/issues/616>`__)
|
||||
- Feature: Support for sending `ls` command over `multistream-select` to list supported protocols from remote peer.
|
||||
This allows inspecting which protocol handlers a peer supports at runtime. (`#622 <https://github.com/libp2p/py-libp2p/issues/622>`__)
|
||||
- implement AsyncContextManager for IMuxedStream to support async with (`#629 <https://github.com/libp2p/py-libp2p/issues/629>`__)
|
||||
- feat: add method to compute time since last message published by a peer and remove fanout peers based on ttl. (`#636 <https://github.com/libp2p/py-libp2p/issues/636>`__)
|
||||
- implement blacklist management for `pubsub.Pubsub` with methods to get, add, remove, check, and clear blacklisted peer IDs. (`#641 <https://github.com/libp2p/py-libp2p/issues/641>`__)
|
||||
- fix: remove expired peers from peerstore based on TTL (`#650 <https://github.com/libp2p/py-libp2p/issues/650>`__)
|
||||
|
||||
|
||||
Internal Changes - for py-libp2p Contributors
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
- Modernizes several aspects of the project, notably using ``pyproject.toml`` for project info instead of ``setup.py``, using ``ruff`` to replace several separate linting tools, and ``pyrefly`` in addition to ``mypy`` for typing. Also includes changes across the codebase to conform to new linting and typing rules. (`#618 <https://github.com/libp2p/py-libp2p/issues/618>`__)
|
||||
|
||||
|
||||
Removals
|
||||
~~~~~~~~
|
||||
|
||||
- Removes support for python 3.9 and updates some code conventions, notably using ``|`` operator in typing instead of ``Optional`` or ``Union`` (`#618 <https://github.com/libp2p/py-libp2p/issues/618>`__)
|
||||
|
||||
|
||||
py-libp2p v0.2.7 (2025-05-22)
|
||||
-----------------------------
|
||||
|
||||
|
||||
63
examples/advanced/network_discover.py
Normal file
63
examples/advanced/network_discover.py
Normal file
@ -0,0 +1,63 @@
|
||||
"""
|
||||
Advanced demonstration of Thin Waist address handling.
|
||||
|
||||
Run:
|
||||
python -m examples.advanced.network_discovery
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from multiaddr import Multiaddr
|
||||
|
||||
try:
|
||||
from libp2p.utils.address_validation import (
|
||||
expand_wildcard_address,
|
||||
get_available_interfaces,
|
||||
get_optimal_binding_address,
|
||||
)
|
||||
except ImportError:
|
||||
# Fallbacks if utilities are missing
|
||||
def get_available_interfaces(port: int, protocol: str = "tcp"):
|
||||
return [Multiaddr(f"/ip4/0.0.0.0/{protocol}/{port}")]
|
||||
|
||||
def expand_wildcard_address(addr: Multiaddr, port: int | None = None):
|
||||
if port is None:
|
||||
return [addr]
|
||||
addr_str = str(addr).rsplit("/", 1)[0]
|
||||
return [Multiaddr(addr_str + f"/{port}")]
|
||||
|
||||
def get_optimal_binding_address(port: int, protocol: str = "tcp"):
|
||||
return Multiaddr(f"/ip4/0.0.0.0/{protocol}/{port}")
|
||||
|
||||
|
||||
def main() -> None:
|
||||
port = 8080
|
||||
interfaces = get_available_interfaces(port)
|
||||
print(f"Discovered interfaces for port {port}:")
|
||||
for a in interfaces:
|
||||
print(f" - {a}")
|
||||
|
||||
wildcard_v4 = Multiaddr(f"/ip4/0.0.0.0/tcp/{port}")
|
||||
expanded_v4 = expand_wildcard_address(wildcard_v4)
|
||||
print("\nExpanded IPv4 wildcard:")
|
||||
for a in expanded_v4:
|
||||
print(f" - {a}")
|
||||
|
||||
wildcard_v6 = Multiaddr(f"/ip6/::/tcp/{port}")
|
||||
expanded_v6 = expand_wildcard_address(wildcard_v6)
|
||||
print("\nExpanded IPv6 wildcard:")
|
||||
for a in expanded_v6:
|
||||
print(f" - {a}")
|
||||
|
||||
print("\nOptimal binding address heuristic result:")
|
||||
print(f" -> {get_optimal_binding_address(port)}")
|
||||
|
||||
override_port = 9000
|
||||
overridden = expand_wildcard_address(wildcard_v4, port=override_port)
|
||||
print(f"\nPort override expansion to {override_port}:")
|
||||
for a in overridden:
|
||||
print(f" - {a}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
136
examples/bootstrap/bootstrap.py
Normal file
136
examples/bootstrap/bootstrap.py
Normal file
@ -0,0 +1,136 @@
|
||||
import argparse
|
||||
import logging
|
||||
import secrets
|
||||
|
||||
import multiaddr
|
||||
import trio
|
||||
|
||||
from libp2p import new_host
|
||||
from libp2p.abc import PeerInfo
|
||||
from libp2p.crypto.secp256k1 import create_new_key_pair
|
||||
from libp2p.discovery.events.peerDiscovery import peerDiscovery
|
||||
|
||||
# Configure logging
|
||||
logger = logging.getLogger("libp2p.discovery.bootstrap")
|
||||
logger.setLevel(logging.INFO)
|
||||
handler = logging.StreamHandler()
|
||||
handler.setFormatter(
|
||||
logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
|
||||
)
|
||||
logger.addHandler(handler)
|
||||
|
||||
# Configure root logger to only show warnings and above to reduce noise
|
||||
# This prevents verbose DEBUG messages from multiaddr, DNS, etc.
|
||||
logging.getLogger().setLevel(logging.WARNING)
|
||||
|
||||
# Specifically silence noisy libraries
|
||||
logging.getLogger("multiaddr").setLevel(logging.WARNING)
|
||||
logging.getLogger("root").setLevel(logging.WARNING)
|
||||
|
||||
|
||||
def on_peer_discovery(peer_info: PeerInfo) -> None:
|
||||
"""Handler for peer discovery events."""
|
||||
logger.info(f"🔍 Discovered peer: {peer_info.peer_id}")
|
||||
logger.debug(f" Addresses: {[str(addr) for addr in peer_info.addrs]}")
|
||||
|
||||
|
||||
# Example bootstrap peers
|
||||
BOOTSTRAP_PEERS = [
|
||||
"/dnsaddr/github.com/p2p/QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN",
|
||||
"/dnsaddr/cloudflare.com/p2p/QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN",
|
||||
"/dnsaddr/google.com/p2p/QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN",
|
||||
"/dnsaddr/bootstrap.libp2p.io/p2p/QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN",
|
||||
"/dnsaddr/bootstrap.libp2p.io/p2p/QmbLHAnMoJPWSCR5Zhtx6BHJX9KiKNN6tpvbUcqanj75Nb",
|
||||
"/ip4/104.131.131.82/tcp/4001/p2p/QmaCpDMGvV2BGHeYERUEnRQAwe3N8SzbUtfsmvsqQLuvuJ",
|
||||
"/ip6/2604:a880:1:20::203:d001/tcp/4001/p2p/QmSoLPppuBtQSGwKDZT2M73ULpjvfd3aZ6ha4oFGL1KrGM",
|
||||
"/ip4/128.199.219.111/tcp/4001/p2p/QmSoLV4Bbm51jM9C4gDYZQ9Cy3U6aXMJDAbzgu2fzaDs64",
|
||||
"/ip4/104.236.76.40/tcp/4001/p2p/QmSoLV4Bbm51jM9C4gDYZQ9Cy3U6aXMJDAbzgu2fzaDs64",
|
||||
"/ip4/178.62.158.247/tcp/4001/p2p/QmSoLer265NRgSp2LA3dPaeykiS1J6DifTC88f5uVQKNAd",
|
||||
"/ip6/2604:a880:1:20::203:d001/tcp/4001/p2p/QmSoLPppuBtQSGwKDZT2M73ULpjvfd3aZ6ha4oFGL1KrGM",
|
||||
"/ip6/2400:6180:0:d0::151:6001/tcp/4001/p2p/QmSoLSafTMBsPKadTEgaXctDQVcqN88CNLHXMkTNwMKPnu",
|
||||
"/ip6/2a03:b0c0:0:1010::23:1001/tcp/4001/p2p/QmSoLueR4xBeUbY9WZ9xGUUxunbKWcrNFTDAadQJmocnWm",
|
||||
]
|
||||
|
||||
|
||||
async def run(port: int, bootstrap_addrs: list[str]) -> None:
|
||||
"""Run the bootstrap discovery example."""
|
||||
# Generate key pair
|
||||
secret = secrets.token_bytes(32)
|
||||
key_pair = create_new_key_pair(secret)
|
||||
|
||||
# Create listen address
|
||||
listen_addr = multiaddr.Multiaddr(f"/ip4/0.0.0.0/tcp/{port}")
|
||||
|
||||
# Register peer discovery handler
|
||||
peerDiscovery.register_peer_discovered_handler(on_peer_discovery)
|
||||
|
||||
logger.info("🚀 Starting Bootstrap Discovery Example")
|
||||
logger.info(f"📍 Listening on: {listen_addr}")
|
||||
logger.info(f"🌐 Bootstrap peers: {len(bootstrap_addrs)}")
|
||||
|
||||
print("\n" + "=" * 60)
|
||||
print("Bootstrap Discovery Example")
|
||||
print("=" * 60)
|
||||
print("This example demonstrates connecting to bootstrap peers.")
|
||||
print("Watch the logs for peer discovery events!")
|
||||
print("Press Ctrl+C to exit.")
|
||||
print("=" * 60)
|
||||
|
||||
# Create and run host with bootstrap discovery
|
||||
host = new_host(key_pair=key_pair, bootstrap=bootstrap_addrs)
|
||||
|
||||
try:
|
||||
async with host.run(listen_addrs=[listen_addr]):
|
||||
# Keep running and log peer discovery events
|
||||
await trio.sleep_forever()
|
||||
except KeyboardInterrupt:
|
||||
logger.info("👋 Shutting down...")
|
||||
|
||||
|
||||
def main() -> None:
|
||||
"""Main entry point."""
|
||||
description = """
|
||||
Bootstrap Discovery Example for py-libp2p
|
||||
|
||||
This example demonstrates how to use bootstrap peers for peer discovery.
|
||||
Bootstrap peers are predefined peers that help new nodes join the network.
|
||||
|
||||
Usage:
|
||||
python bootstrap.py -p 8000
|
||||
python bootstrap.py -p 8001 --custom-bootstrap \\
|
||||
"/ip4/127.0.0.1/tcp/8000/p2p/QmYourPeerID"
|
||||
"""
|
||||
|
||||
parser = argparse.ArgumentParser(
|
||||
description=description, formatter_class=argparse.RawDescriptionHelpFormatter
|
||||
)
|
||||
parser.add_argument(
|
||||
"-p", "--port", default=0, type=int, help="Port to listen on (default: random)"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--custom-bootstrap",
|
||||
nargs="*",
|
||||
help="Custom bootstrap addresses (space-separated)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-v", "--verbose", action="store_true", help="Enable verbose output"
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
if args.verbose:
|
||||
logger.setLevel(logging.DEBUG)
|
||||
|
||||
# Use custom bootstrap addresses if provided, otherwise use defaults
|
||||
bootstrap_addrs = (
|
||||
args.custom_bootstrap if args.custom_bootstrap else BOOTSTRAP_PEERS
|
||||
)
|
||||
|
||||
try:
|
||||
trio.run(run, args.port, bootstrap_addrs)
|
||||
except KeyboardInterrupt:
|
||||
logger.info("Exiting...")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@ -40,10 +40,12 @@ async def write_data(stream: INetStream) -> None:
|
||||
|
||||
|
||||
async def run(port: int, destination: str) -> None:
|
||||
localhost_ip = "127.0.0.1"
|
||||
listen_addr = multiaddr.Multiaddr(f"/ip4/0.0.0.0/tcp/{port}")
|
||||
host = new_host()
|
||||
async with host.run(listen_addrs=[listen_addr]), trio.open_nursery() as nursery:
|
||||
# Start the peer-store cleanup task
|
||||
nursery.start_soon(host.get_peerstore().start_cleanup_task, 60)
|
||||
|
||||
if not destination: # its the server
|
||||
|
||||
async def stream_handler(stream: INetStream) -> None:
|
||||
@ -54,8 +56,8 @@ async def run(port: int, destination: str) -> None:
|
||||
|
||||
print(
|
||||
"Run this from the same folder in another console:\n\n"
|
||||
f"chat-demo -p {int(port) + 1} "
|
||||
f"-d /ip4/{localhost_ip}/tcp/{port}/p2p/{host.get_id().pretty()}\n"
|
||||
f"chat-demo "
|
||||
f"-d {host.get_addrs()[0]}\n"
|
||||
)
|
||||
print("Waiting for incoming connection...")
|
||||
|
||||
@ -87,9 +89,7 @@ def main() -> None:
|
||||
"/ip4/127.0.0.1/tcp/8000/p2p/QmQn4SwGkDZKkUEpBRBvTmheQycxAHJUNmVEnjA2v1qe8Q"
|
||||
)
|
||||
parser = argparse.ArgumentParser(description=description)
|
||||
parser.add_argument(
|
||||
"-p", "--port", default=8000, type=int, help="source port number"
|
||||
)
|
||||
parser.add_argument("-p", "--port", default=0, type=int, help="source port number")
|
||||
parser.add_argument(
|
||||
"-d",
|
||||
"--destination",
|
||||
@ -98,9 +98,6 @@ def main() -> None:
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
if not args.port:
|
||||
raise RuntimeError("was not able to determine a local port")
|
||||
|
||||
try:
|
||||
trio.run(run, *(args.port, args.destination))
|
||||
except KeyboardInterrupt:
|
||||
|
||||
@ -27,6 +27,9 @@ async def main():
|
||||
# secure_bytes_provider: Optional function to generate secure random bytes
|
||||
# (defaults to secrets.token_bytes)
|
||||
secure_bytes_provider=None, # Use default implementation
|
||||
# peerstore: Optional peerstore to store peer IDs and public keys
|
||||
# (defaults to None)
|
||||
peerstore=None,
|
||||
)
|
||||
|
||||
# Create a security options dictionary mapping protocol ID to transport
|
||||
|
||||
@ -9,8 +9,10 @@ from libp2p import (
|
||||
from libp2p.crypto.secp256k1 import (
|
||||
create_new_key_pair,
|
||||
)
|
||||
from libp2p.security.noise.transport import PROTOCOL_ID as NOISE_PROTOCOL_ID
|
||||
from libp2p.security.noise.transport import Transport as NoiseTransport
|
||||
from libp2p.security.noise.transport import (
|
||||
PROTOCOL_ID as NOISE_PROTOCOL_ID,
|
||||
Transport as NoiseTransport,
|
||||
)
|
||||
|
||||
|
||||
async def main():
|
||||
@ -22,13 +24,8 @@ async def main():
|
||||
noise_transport = NoiseTransport(
|
||||
# local_key_pair: The key pair used for libp2p identity and authentication
|
||||
libp2p_keypair=key_pair,
|
||||
# noise_privkey: The private key used for Noise protocol encryption
|
||||
noise_privkey=key_pair.private_key,
|
||||
# early_data: Optional data to send during the handshake
|
||||
# (None means no early data)
|
||||
early_data=None,
|
||||
# with_noise_pipes: Whether to use Noise pipes for additional security features
|
||||
with_noise_pipes=False,
|
||||
# TODO: add early data
|
||||
)
|
||||
|
||||
# Create a security options dictionary mapping protocol ID to transport
|
||||
|
||||
@ -9,8 +9,10 @@ from libp2p import (
|
||||
from libp2p.crypto.secp256k1 import (
|
||||
create_new_key_pair,
|
||||
)
|
||||
from libp2p.security.secio.transport import ID as SECIO_PROTOCOL_ID
|
||||
from libp2p.security.secio.transport import Transport as SecioTransport
|
||||
from libp2p.security.secio.transport import (
|
||||
ID as SECIO_PROTOCOL_ID,
|
||||
Transport as SecioTransport,
|
||||
)
|
||||
|
||||
|
||||
async def main():
|
||||
@ -22,9 +24,6 @@ async def main():
|
||||
secio_transport = SecioTransport(
|
||||
# local_key_pair: The key pair used for libp2p identity and authentication
|
||||
local_key_pair=key_pair,
|
||||
# secure_bytes_provider: Optional function to generate secure random bytes
|
||||
# (defaults to secrets.token_bytes)
|
||||
secure_bytes_provider=None, # Use default implementation
|
||||
)
|
||||
|
||||
# Create a security options dictionary mapping protocol ID to transport
|
||||
|
||||
@ -9,10 +9,9 @@ from libp2p import (
|
||||
from libp2p.crypto.secp256k1 import (
|
||||
create_new_key_pair,
|
||||
)
|
||||
from libp2p.security.noise.transport import PROTOCOL_ID as NOISE_PROTOCOL_ID
|
||||
from libp2p.security.noise.transport import Transport as NoiseTransport
|
||||
from libp2p.stream_muxer.mplex.mplex import (
|
||||
MPLEX_PROTOCOL_ID,
|
||||
from libp2p.security.noise.transport import (
|
||||
PROTOCOL_ID as NOISE_PROTOCOL_ID,
|
||||
Transport as NoiseTransport,
|
||||
)
|
||||
|
||||
|
||||
@ -29,22 +28,14 @@ async def main():
|
||||
noise_privkey=key_pair.private_key,
|
||||
# early_data: Optional data to send during the handshake
|
||||
# (None means no early data)
|
||||
early_data=None,
|
||||
# with_noise_pipes: Whether to use Noise pipes for additional security features
|
||||
with_noise_pipes=False,
|
||||
# TODO: add early data
|
||||
)
|
||||
|
||||
# Create a security options dictionary mapping protocol ID to transport
|
||||
security_options = {NOISE_PROTOCOL_ID: noise_transport}
|
||||
|
||||
# Create a muxer options dictionary mapping protocol ID to muxer class
|
||||
# We don't need to instantiate the muxer here, the host will do that for us
|
||||
muxer_options = {MPLEX_PROTOCOL_ID: None}
|
||||
|
||||
# Create a host with the key pair, Noise security, and mplex multiplexer
|
||||
host = new_host(
|
||||
key_pair=key_pair, sec_opt=security_options, muxer_opt=muxer_options
|
||||
)
|
||||
host = new_host(key_pair=key_pair, sec_opt=security_options)
|
||||
|
||||
# Configure the listening address
|
||||
port = 8000
|
||||
|
||||
263
examples/doc-examples/example_net_stream.py
Normal file
263
examples/doc-examples/example_net_stream.py
Normal file
@ -0,0 +1,263 @@
|
||||
"""
|
||||
Enhanced NetStream Example for py-libp2p with State Management
|
||||
|
||||
This example demonstrates the new NetStream features including:
|
||||
- State tracking and transitions
|
||||
- Proper error handling and validation
|
||||
- Resource cleanup and event notifications
|
||||
- Thread-safe operations with Trio locks
|
||||
|
||||
Based on the standard echo demo but enhanced to show NetStream state management.
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import random
|
||||
import secrets
|
||||
|
||||
import multiaddr
|
||||
import trio
|
||||
|
||||
from libp2p import (
|
||||
new_host,
|
||||
)
|
||||
from libp2p.crypto.secp256k1 import (
|
||||
create_new_key_pair,
|
||||
)
|
||||
from libp2p.custom_types import (
|
||||
TProtocol,
|
||||
)
|
||||
from libp2p.network.stream.exceptions import (
|
||||
StreamClosed,
|
||||
StreamEOF,
|
||||
StreamReset,
|
||||
)
|
||||
from libp2p.network.stream.net_stream import (
|
||||
NetStream,
|
||||
StreamState,
|
||||
)
|
||||
from libp2p.peer.peerinfo import (
|
||||
info_from_p2p_addr,
|
||||
)
|
||||
|
||||
PROTOCOL_ID = TProtocol("/echo/1.0.0")
|
||||
|
||||
|
||||
async def enhanced_echo_handler(stream: NetStream) -> None:
|
||||
"""
|
||||
Enhanced echo handler that demonstrates NetStream state management.
|
||||
"""
|
||||
print(f"New connection established: {stream}")
|
||||
print(f"Initial stream state: {await stream.state}")
|
||||
|
||||
try:
|
||||
# Verify stream is in expected initial state
|
||||
assert await stream.state == StreamState.OPEN
|
||||
assert await stream.is_readable()
|
||||
assert await stream.is_writable()
|
||||
print("✓ Stream initialized in OPEN state")
|
||||
|
||||
# Read incoming data with proper state checking
|
||||
print("Waiting for client data...")
|
||||
|
||||
while await stream.is_readable():
|
||||
try:
|
||||
# Read data from client
|
||||
data = await stream.read(1024)
|
||||
if not data:
|
||||
print("Received empty data, client may have closed")
|
||||
break
|
||||
|
||||
print(f"Received: {data.decode('utf-8').strip()}")
|
||||
|
||||
# Check if we can still write before echoing
|
||||
if await stream.is_writable():
|
||||
await stream.write(data)
|
||||
print(f"Echoed: {data.decode('utf-8').strip()}")
|
||||
else:
|
||||
print("Cannot echo - stream not writable")
|
||||
break
|
||||
|
||||
except StreamEOF:
|
||||
print("Client closed their write side (EOF)")
|
||||
break
|
||||
except StreamReset:
|
||||
print("Stream was reset by client")
|
||||
return
|
||||
except StreamClosed as e:
|
||||
print(f"Stream operation failed: {e}")
|
||||
break
|
||||
|
||||
# Demonstrate graceful closure
|
||||
current_state = await stream.state
|
||||
print(f"Current state before close: {current_state}")
|
||||
|
||||
if current_state not in [StreamState.CLOSE_BOTH, StreamState.RESET]:
|
||||
await stream.close()
|
||||
print("Server closed write side")
|
||||
|
||||
final_state = await stream.state
|
||||
print(f"Final stream state: {final_state}")
|
||||
|
||||
except Exception as e:
|
||||
print(f"Handler error: {e}")
|
||||
# Reset stream on unexpected errors
|
||||
if await stream.state not in [StreamState.RESET, StreamState.CLOSE_BOTH]:
|
||||
await stream.reset()
|
||||
print("Stream reset due to error")
|
||||
|
||||
|
||||
async def enhanced_client_demo(stream: NetStream) -> None:
|
||||
"""
|
||||
Enhanced client that demonstrates various NetStream state scenarios.
|
||||
"""
|
||||
print(f"Client stream established: {stream}")
|
||||
print(f"Initial state: {await stream.state}")
|
||||
|
||||
try:
|
||||
# Verify initial state
|
||||
assert await stream.state == StreamState.OPEN
|
||||
print("✓ Client stream in OPEN state")
|
||||
|
||||
# Scenario 1: Normal communication
|
||||
message = b"Hello from enhanced NetStream client!\n"
|
||||
|
||||
if await stream.is_writable():
|
||||
await stream.write(message)
|
||||
print(f"Sent: {message.decode('utf-8').strip()}")
|
||||
else:
|
||||
print("Cannot write - stream not writable")
|
||||
return
|
||||
|
||||
# Close write side to signal EOF to server
|
||||
await stream.close()
|
||||
print("Client closed write side")
|
||||
|
||||
# Verify state transition
|
||||
state_after_close = await stream.state
|
||||
print(f"State after close: {state_after_close}")
|
||||
assert state_after_close == StreamState.CLOSE_WRITE
|
||||
assert await stream.is_readable() # Should still be readable
|
||||
assert not await stream.is_writable() # Should not be writable
|
||||
|
||||
# Try to write (should fail)
|
||||
try:
|
||||
await stream.write(b"This should fail")
|
||||
print("ERROR: Write succeeded when it should have failed!")
|
||||
except StreamClosed as e:
|
||||
print(f"✓ Expected error when writing to closed stream: {e}")
|
||||
|
||||
# Read the echo response
|
||||
if await stream.is_readable():
|
||||
try:
|
||||
response = await stream.read()
|
||||
print(f"Received echo: {response.decode('utf-8').strip()}")
|
||||
except StreamEOF:
|
||||
print("Server closed their write side")
|
||||
except StreamReset:
|
||||
print("Stream was reset")
|
||||
|
||||
# Check final state
|
||||
final_state = await stream.state
|
||||
print(f"Final client state: {final_state}")
|
||||
|
||||
except Exception as e:
|
||||
print(f"Client error: {e}")
|
||||
# Reset on error
|
||||
await stream.reset()
|
||||
print("Client reset stream due to error")
|
||||
|
||||
|
||||
async def run_enhanced_demo(
|
||||
port: int, destination: str, seed: int | None = None
|
||||
) -> None:
|
||||
"""
|
||||
Run enhanced echo demo with NetStream state management.
|
||||
"""
|
||||
listen_addr = multiaddr.Multiaddr(f"/ip4/0.0.0.0/tcp/{port}")
|
||||
|
||||
# Generate or use provided key
|
||||
if seed:
|
||||
random.seed(seed)
|
||||
secret_number = random.getrandbits(32 * 8)
|
||||
secret = secret_number.to_bytes(length=32, byteorder="big")
|
||||
else:
|
||||
secret = secrets.token_bytes(32)
|
||||
|
||||
host = new_host(key_pair=create_new_key_pair(secret))
|
||||
|
||||
async with host.run(listen_addrs=[listen_addr]):
|
||||
print(f"Host ID: {host.get_id().to_string()}")
|
||||
print("=" * 60)
|
||||
|
||||
if not destination: # Server mode
|
||||
print("🖥️ ENHANCED ECHO SERVER MODE")
|
||||
print("=" * 60)
|
||||
|
||||
# type: ignore: Stream is type of NetStream
|
||||
host.set_stream_handler(PROTOCOL_ID, enhanced_echo_handler)
|
||||
|
||||
print(
|
||||
"Run client from another console:\n"
|
||||
f"python3 example_net_stream.py "
|
||||
f"-d {host.get_addrs()[0]}\n"
|
||||
)
|
||||
print("Waiting for connections...")
|
||||
print("Press Ctrl+C to stop server")
|
||||
await trio.sleep_forever()
|
||||
|
||||
else: # Client mode
|
||||
print("📱 ENHANCED ECHO CLIENT MODE")
|
||||
print("=" * 60)
|
||||
|
||||
# Connect to server
|
||||
maddr = multiaddr.Multiaddr(destination)
|
||||
info = info_from_p2p_addr(maddr)
|
||||
await host.connect(info)
|
||||
print(f"Connected to server: {info.peer_id.pretty()}")
|
||||
|
||||
# Create stream and run enhanced demo
|
||||
stream = await host.new_stream(info.peer_id, [PROTOCOL_ID])
|
||||
if isinstance(stream, NetStream):
|
||||
await enhanced_client_demo(stream)
|
||||
|
||||
print("\n" + "=" * 60)
|
||||
print("CLIENT DEMO COMPLETE")
|
||||
|
||||
|
||||
def main() -> None:
|
||||
example_maddr = (
|
||||
"/ip4/127.0.0.1/tcp/8000/p2p/QmQn4SwGkDZKkUEpBRBvTmheQycxAHJUNmVEnjA2v1qe8Q"
|
||||
)
|
||||
|
||||
parser = argparse.ArgumentParser(
|
||||
formatter_class=argparse.RawDescriptionHelpFormatter
|
||||
)
|
||||
parser.add_argument("-p", "--port", default=0, type=int, help="source port number")
|
||||
parser.add_argument(
|
||||
"-d",
|
||||
"--destination",
|
||||
type=str,
|
||||
help=f"destination multiaddr string, e.g. {example_maddr}",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-s",
|
||||
"--seed",
|
||||
type=int,
|
||||
help="seed for deterministic peer ID generation",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--demo-states", action="store_true", help="run state transition demo only"
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
try:
|
||||
trio.run(run_enhanced_demo, args.port, args.destination, args.seed)
|
||||
except KeyboardInterrupt:
|
||||
print("\n👋 Demo interrupted by user")
|
||||
except Exception as e:
|
||||
print(f"❌ Demo failed: {e}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@ -12,10 +12,9 @@ from libp2p.crypto.secp256k1 import (
|
||||
from libp2p.peer.peerinfo import (
|
||||
info_from_p2p_addr,
|
||||
)
|
||||
from libp2p.security.noise.transport import PROTOCOL_ID as NOISE_PROTOCOL_ID
|
||||
from libp2p.security.noise.transport import Transport as NoiseTransport
|
||||
from libp2p.stream_muxer.mplex.mplex import (
|
||||
MPLEX_PROTOCOL_ID,
|
||||
from libp2p.security.noise.transport import (
|
||||
PROTOCOL_ID as NOISE_PROTOCOL_ID,
|
||||
Transport as NoiseTransport,
|
||||
)
|
||||
|
||||
|
||||
@ -32,22 +31,14 @@ async def main():
|
||||
noise_privkey=key_pair.private_key,
|
||||
# early_data: Optional data to send during the handshake
|
||||
# (None means no early data)
|
||||
early_data=None,
|
||||
# with_noise_pipes: Whether to use Noise pipes for additional security features
|
||||
with_noise_pipes=False,
|
||||
# TODO: add early data
|
||||
)
|
||||
|
||||
# Create a security options dictionary mapping protocol ID to transport
|
||||
security_options = {NOISE_PROTOCOL_ID: noise_transport}
|
||||
|
||||
# Create a muxer options dictionary mapping protocol ID to muxer class
|
||||
# We don't need to instantiate the muxer here, the host will do that for us
|
||||
muxer_options = {MPLEX_PROTOCOL_ID: None}
|
||||
|
||||
# Create a host with the key pair, Noise security, and mplex multiplexer
|
||||
host = new_host(
|
||||
key_pair=key_pair, sec_opt=security_options, muxer_opt=muxer_options
|
||||
)
|
||||
host = new_host(key_pair=key_pair, sec_opt=security_options)
|
||||
|
||||
# Configure the listening address
|
||||
port = 8000
|
||||
|
||||
@ -9,10 +9,9 @@ from libp2p import (
|
||||
from libp2p.crypto.secp256k1 import (
|
||||
create_new_key_pair,
|
||||
)
|
||||
from libp2p.security.noise.transport import PROTOCOL_ID as NOISE_PROTOCOL_ID
|
||||
from libp2p.security.noise.transport import Transport as NoiseTransport
|
||||
from libp2p.stream_muxer.mplex.mplex import (
|
||||
MPLEX_PROTOCOL_ID,
|
||||
from libp2p.security.noise.transport import (
|
||||
PROTOCOL_ID as NOISE_PROTOCOL_ID,
|
||||
Transport as NoiseTransport,
|
||||
)
|
||||
|
||||
|
||||
@ -29,22 +28,14 @@ async def main():
|
||||
noise_privkey=key_pair.private_key,
|
||||
# early_data: Optional data to send during the handshake
|
||||
# (None means no early data)
|
||||
early_data=None,
|
||||
# with_noise_pipes: Whether to use Noise pipes for additional security features
|
||||
with_noise_pipes=False,
|
||||
# TODO: add early data
|
||||
)
|
||||
|
||||
# Create a security options dictionary mapping protocol ID to transport
|
||||
security_options = {NOISE_PROTOCOL_ID: noise_transport}
|
||||
|
||||
# Create a muxer options dictionary mapping protocol ID to muxer class
|
||||
# We don't need to instantiate the muxer here, the host will do that for us
|
||||
muxer_options = {MPLEX_PROTOCOL_ID: None}
|
||||
|
||||
# Create a host with the key pair, Noise security, and mplex multiplexer
|
||||
host = new_host(
|
||||
key_pair=key_pair, sec_opt=security_options, muxer_opt=muxer_options
|
||||
)
|
||||
host = new_host(key_pair=key_pair, sec_opt=security_options)
|
||||
|
||||
# Configure the listening address
|
||||
port = 8000
|
||||
|
||||
170
examples/doc-examples/multiple_connections_example.py
Normal file
170
examples/doc-examples/multiple_connections_example.py
Normal file
@ -0,0 +1,170 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Example demonstrating multiple connections per peer support in libp2p.
|
||||
|
||||
This example shows how to:
|
||||
1. Configure multiple connections per peer
|
||||
2. Use different load balancing strategies
|
||||
3. Access multiple connections through the new API
|
||||
4. Maintain backward compatibility
|
||||
"""
|
||||
|
||||
import logging
|
||||
|
||||
import trio
|
||||
|
||||
from libp2p import new_swarm
|
||||
from libp2p.network.swarm import ConnectionConfig, RetryConfig
|
||||
|
||||
# Set up logging
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def example_basic_multiple_connections() -> None:
|
||||
"""Example of basic multiple connections per peer usage."""
|
||||
logger.info("Creating swarm with multiple connections support...")
|
||||
|
||||
# Create swarm with default configuration
|
||||
swarm = new_swarm()
|
||||
default_connection = ConnectionConfig()
|
||||
|
||||
logger.info(f"Swarm created with peer ID: {swarm.get_peer_id()}")
|
||||
logger.info(
|
||||
f"Connection config: max_connections_per_peer="
|
||||
f"{default_connection.max_connections_per_peer}"
|
||||
)
|
||||
|
||||
await swarm.close()
|
||||
logger.info("Basic multiple connections example completed")
|
||||
|
||||
|
||||
async def example_custom_connection_config() -> None:
|
||||
"""Example of custom connection configuration."""
|
||||
logger.info("Creating swarm with custom connection configuration...")
|
||||
|
||||
# Custom connection configuration for high-performance scenarios
|
||||
connection_config = ConnectionConfig(
|
||||
max_connections_per_peer=5, # More connections per peer
|
||||
connection_timeout=60.0, # Longer timeout
|
||||
load_balancing_strategy="least_loaded", # Use least loaded strategy
|
||||
)
|
||||
|
||||
# Create swarm with custom connection config
|
||||
swarm = new_swarm(connection_config=connection_config)
|
||||
|
||||
logger.info("Custom connection config applied:")
|
||||
logger.info(
|
||||
f" Max connections per peer: {connection_config.max_connections_per_peer}"
|
||||
)
|
||||
logger.info(f" Connection timeout: {connection_config.connection_timeout}s")
|
||||
logger.info(
|
||||
f" Load balancing strategy: {connection_config.load_balancing_strategy}"
|
||||
)
|
||||
|
||||
await swarm.close()
|
||||
logger.info("Custom connection config example completed")
|
||||
|
||||
|
||||
async def example_multiple_connections_api() -> None:
|
||||
"""Example of using the new multiple connections API."""
|
||||
logger.info("Demonstrating multiple connections API...")
|
||||
|
||||
connection_config = ConnectionConfig(
|
||||
max_connections_per_peer=3, load_balancing_strategy="round_robin"
|
||||
)
|
||||
|
||||
swarm = new_swarm(connection_config=connection_config)
|
||||
|
||||
logger.info("Multiple connections API features:")
|
||||
logger.info(" - dial_peer() returns list[INetConn]")
|
||||
logger.info(" - get_connections(peer_id) returns list[INetConn]")
|
||||
logger.info(" - get_connections_map() returns dict[ID, list[INetConn]]")
|
||||
logger.info(
|
||||
" - get_connection(peer_id) returns INetConn | None (backward compatibility)"
|
||||
)
|
||||
|
||||
await swarm.close()
|
||||
logger.info("Multiple connections API example completed")
|
||||
|
||||
|
||||
async def example_backward_compatibility() -> None:
|
||||
"""Example of backward compatibility features."""
|
||||
logger.info("Demonstrating backward compatibility...")
|
||||
|
||||
swarm = new_swarm()
|
||||
|
||||
logger.info("Backward compatibility features:")
|
||||
logger.info(" - connections_legacy property provides 1:1 mapping")
|
||||
logger.info(" - get_connection() method for single connection access")
|
||||
logger.info(" - Existing code continues to work")
|
||||
|
||||
await swarm.close()
|
||||
logger.info("Backward compatibility example completed")
|
||||
|
||||
|
||||
async def example_production_ready_config() -> None:
|
||||
"""Example of production-ready configuration."""
|
||||
logger.info("Creating swarm with production-ready configuration...")
|
||||
|
||||
# Production-ready retry configuration
|
||||
retry_config = RetryConfig(
|
||||
max_retries=3, # Reasonable retry limit
|
||||
initial_delay=0.1, # Quick initial retry
|
||||
max_delay=30.0, # Cap exponential backoff
|
||||
backoff_multiplier=2.0, # Standard exponential backoff
|
||||
jitter_factor=0.1, # Small jitter to prevent thundering herd
|
||||
)
|
||||
|
||||
# Production-ready connection configuration
|
||||
connection_config = ConnectionConfig(
|
||||
max_connections_per_peer=3, # Balance between performance and resource usage
|
||||
connection_timeout=30.0, # Reasonable timeout
|
||||
load_balancing_strategy="round_robin", # Simple, predictable strategy
|
||||
)
|
||||
|
||||
# Create swarm with production config
|
||||
swarm = new_swarm(retry_config=retry_config, connection_config=connection_config)
|
||||
|
||||
logger.info("Production-ready configuration applied:")
|
||||
logger.info(
|
||||
f" Retry: {retry_config.max_retries} retries, "
|
||||
f"{retry_config.max_delay}s max delay"
|
||||
)
|
||||
logger.info(f" Connections: {connection_config.max_connections_per_peer} per peer")
|
||||
logger.info(f" Load balancing: {connection_config.load_balancing_strategy}")
|
||||
|
||||
await swarm.close()
|
||||
logger.info("Production-ready configuration example completed")
|
||||
|
||||
|
||||
async def main() -> None:
|
||||
"""Run all examples."""
|
||||
logger.info("Multiple Connections Per Peer Examples")
|
||||
logger.info("=" * 50)
|
||||
|
||||
try:
|
||||
await example_basic_multiple_connections()
|
||||
logger.info("-" * 30)
|
||||
|
||||
await example_custom_connection_config()
|
||||
logger.info("-" * 30)
|
||||
|
||||
await example_multiple_connections_api()
|
||||
logger.info("-" * 30)
|
||||
|
||||
await example_backward_compatibility()
|
||||
logger.info("-" * 30)
|
||||
|
||||
await example_production_ready_config()
|
||||
logger.info("-" * 30)
|
||||
|
||||
logger.info("All examples completed successfully!")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Example failed: {e}")
|
||||
raise
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
trio.run(main)
|
||||
@ -1,4 +1,6 @@
|
||||
import argparse
|
||||
import random
|
||||
import secrets
|
||||
|
||||
import multiaddr
|
||||
import trio
|
||||
@ -12,49 +14,71 @@ from libp2p.crypto.secp256k1 import (
|
||||
from libp2p.custom_types import (
|
||||
TProtocol,
|
||||
)
|
||||
from libp2p.network.stream.exceptions import (
|
||||
StreamEOF,
|
||||
)
|
||||
from libp2p.network.stream.net_stream import (
|
||||
INetStream,
|
||||
)
|
||||
from libp2p.peer.peerinfo import (
|
||||
info_from_p2p_addr,
|
||||
)
|
||||
from libp2p.utils.address_validation import (
|
||||
find_free_port,
|
||||
get_available_interfaces,
|
||||
)
|
||||
|
||||
PROTOCOL_ID = TProtocol("/echo/1.0.0")
|
||||
MAX_READ_LEN = 2**32 - 1
|
||||
|
||||
|
||||
async def _echo_stream_handler(stream: INetStream) -> None:
|
||||
# Wait until EOF
|
||||
msg = await stream.read()
|
||||
await stream.write(msg)
|
||||
await stream.close()
|
||||
try:
|
||||
peer_id = stream.muxed_conn.peer_id
|
||||
print(f"Received connection from {peer_id}")
|
||||
# Wait until EOF
|
||||
msg = await stream.read(MAX_READ_LEN)
|
||||
print(f"Echoing message: {msg.decode('utf-8')}")
|
||||
await stream.write(msg)
|
||||
except StreamEOF:
|
||||
print("Stream closed by remote peer.")
|
||||
except Exception as e:
|
||||
print(f"Error in echo handler: {e}")
|
||||
finally:
|
||||
await stream.close()
|
||||
|
||||
|
||||
async def run(port: int, destination: str, seed: int = None) -> None:
|
||||
localhost_ip = "127.0.0.1"
|
||||
listen_addr = multiaddr.Multiaddr(f"/ip4/0.0.0.0/tcp/{port}")
|
||||
async def run(port: int, destination: str, seed: int | None = None) -> None:
|
||||
if port <= 0:
|
||||
port = find_free_port()
|
||||
listen_addr = get_available_interfaces(port)
|
||||
|
||||
if seed:
|
||||
import random
|
||||
|
||||
random.seed(seed)
|
||||
secret_number = random.getrandbits(32 * 8)
|
||||
secret = secret_number.to_bytes(length=32, byteorder="big")
|
||||
else:
|
||||
import secrets
|
||||
|
||||
secret = secrets.token_bytes(32)
|
||||
|
||||
host = new_host(key_pair=create_new_key_pair(secret))
|
||||
async with host.run(listen_addrs=[listen_addr]):
|
||||
async with host.run(listen_addrs=listen_addr), trio.open_nursery() as nursery:
|
||||
# Start the peer-store cleanup task
|
||||
nursery.start_soon(host.get_peerstore().start_cleanup_task, 60)
|
||||
|
||||
print(f"I am {host.get_id().to_string()}")
|
||||
|
||||
if not destination: # its the server
|
||||
host.set_stream_handler(PROTOCOL_ID, _echo_stream_handler)
|
||||
|
||||
# Print all listen addresses with peer ID (JS parity)
|
||||
print("Listener ready, listening on:\n")
|
||||
peer_id = host.get_id().to_string()
|
||||
for addr in listen_addr:
|
||||
print(f"{addr}/p2p/{peer_id}")
|
||||
|
||||
print(
|
||||
"Run this from the same folder in another console:\n\n"
|
||||
f"echo-demo -p {int(port) + 1} "
|
||||
f"-d /ip4/{localhost_ip}/tcp/{port}/p2p/{host.get_id().pretty()}\n"
|
||||
"\nRun this from the same folder in another console:\n\n"
|
||||
f"echo-demo -d {host.get_addrs()[0]}\n"
|
||||
)
|
||||
print("Waiting for incoming connections...")
|
||||
await trio.sleep_forever()
|
||||
@ -73,9 +97,8 @@ async def run(port: int, destination: str, seed: int = None) -> None:
|
||||
msg = b"hi, there!\n"
|
||||
|
||||
await stream.write(msg)
|
||||
# Notify the other side about EOF
|
||||
await stream.close()
|
||||
response = await stream.read()
|
||||
await stream.close()
|
||||
|
||||
print(f"Sent: {msg.decode('utf-8')}")
|
||||
print(f"Got: {response.decode('utf-8')}")
|
||||
@ -94,9 +117,7 @@ def main() -> None:
|
||||
"/ip4/127.0.0.1/tcp/8000/p2p/QmQn4SwGkDZKkUEpBRBvTmheQycxAHJUNmVEnjA2v1qe8Q"
|
||||
)
|
||||
parser = argparse.ArgumentParser(description=description)
|
||||
parser.add_argument(
|
||||
"-p", "--port", default=8000, type=int, help="source port number"
|
||||
)
|
||||
parser.add_argument("-p", "--port", default=0, type=int, help="source port number")
|
||||
parser.add_argument(
|
||||
"-d",
|
||||
"--destination",
|
||||
@ -110,10 +131,6 @@ def main() -> None:
|
||||
help="provide a seed to the random number generator (e.g. to fix peer IDs across runs)", # noqa: E501
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
if not args.port:
|
||||
raise RuntimeError("was not able to determine a local port")
|
||||
|
||||
try:
|
||||
trio.run(run, args.port, args.destination, args.seed)
|
||||
except KeyboardInterrupt:
|
||||
|
||||
@ -1,6 +1,7 @@
|
||||
import argparse
|
||||
import base64
|
||||
import logging
|
||||
import sys
|
||||
|
||||
import multiaddr
|
||||
import trio
|
||||
@ -8,10 +9,13 @@ import trio
|
||||
from libp2p import (
|
||||
new_host,
|
||||
)
|
||||
from libp2p.identity.identify.identify import ID as IDENTIFY_PROTOCOL_ID
|
||||
from libp2p.identity.identify.pb.identify_pb2 import (
|
||||
Identify,
|
||||
from libp2p.identity.identify.identify import (
|
||||
ID as IDENTIFY_PROTOCOL_ID,
|
||||
identify_handler_for,
|
||||
parse_identify_response,
|
||||
)
|
||||
from libp2p.identity.identify.pb.identify_pb2 import Identify
|
||||
from libp2p.peer.envelope import debug_dump_envelope, unmarshal_envelope
|
||||
from libp2p.peer.peerinfo import (
|
||||
info_from_p2p_addr,
|
||||
)
|
||||
@ -30,10 +34,11 @@ def decode_multiaddrs(raw_addrs):
|
||||
return decoded_addrs
|
||||
|
||||
|
||||
def print_identify_response(identify_response):
|
||||
def print_identify_response(identify_response: Identify):
|
||||
"""Pretty-print Identify response."""
|
||||
public_key_b64 = base64.b64encode(identify_response.public_key).decode("utf-8")
|
||||
listen_addrs = decode_multiaddrs(identify_response.listen_addrs)
|
||||
signed_peer_record = unmarshal_envelope(identify_response.signedPeerRecord)
|
||||
try:
|
||||
observed_addr_decoded = decode_multiaddrs([identify_response.observed_addr])
|
||||
except Exception:
|
||||
@ -49,8 +54,10 @@ def print_identify_response(identify_response):
|
||||
f" Agent Version: {identify_response.agent_version}"
|
||||
)
|
||||
|
||||
debug_dump_envelope(signed_peer_record)
|
||||
|
||||
async def run(port: int, destination: str) -> None:
|
||||
|
||||
async def run(port: int, destination: str, use_varint_format: bool = True) -> None:
|
||||
localhost_ip = "0.0.0.0"
|
||||
|
||||
if not destination:
|
||||
@ -58,39 +65,159 @@ async def run(port: int, destination: str) -> None:
|
||||
listen_addr = multiaddr.Multiaddr(f"/ip4/{localhost_ip}/tcp/{port}")
|
||||
host_a = new_host()
|
||||
|
||||
async with host_a.run(listen_addrs=[listen_addr]):
|
||||
# Set up identify handler with specified format
|
||||
# Set use_varint_format = False, if want to checkout the Signed-PeerRecord
|
||||
identify_handler = identify_handler_for(
|
||||
host_a, use_varint_format=use_varint_format
|
||||
)
|
||||
host_a.set_stream_handler(IDENTIFY_PROTOCOL_ID, identify_handler)
|
||||
|
||||
async with (
|
||||
host_a.run(listen_addrs=[listen_addr]),
|
||||
trio.open_nursery() as nursery,
|
||||
):
|
||||
# Start the peer-store cleanup task
|
||||
nursery.start_soon(host_a.get_peerstore().start_cleanup_task, 60)
|
||||
|
||||
# Get the actual address and replace 0.0.0.0 with 127.0.0.1 for client
|
||||
# connections
|
||||
server_addr = str(host_a.get_addrs()[0])
|
||||
client_addr = server_addr.replace("/ip4/0.0.0.0/", "/ip4/127.0.0.1/")
|
||||
|
||||
format_name = "length-prefixed" if use_varint_format else "raw protobuf"
|
||||
format_flag = "--raw-format" if not use_varint_format else ""
|
||||
print(
|
||||
"First host listening. Run this from another console:\n\n"
|
||||
f"identify-demo -p {int(port) + 1} "
|
||||
f"-d /ip4/{localhost_ip}/tcp/{port}/p2p/{host_a.get_id().pretty()}\n"
|
||||
f"First host listening (using {format_name} format). "
|
||||
f"Run this from another console:\n\n"
|
||||
f"identify-demo {format_flag} -d {client_addr}\n"
|
||||
)
|
||||
print("Waiting for incoming identify request...")
|
||||
await trio.sleep_forever()
|
||||
|
||||
# Add a custom handler to show connection events
|
||||
async def custom_identify_handler(stream):
|
||||
peer_id = stream.muxed_conn.peer_id
|
||||
print(f"\n🔗 Received identify request from peer: {peer_id}")
|
||||
|
||||
# Show remote address in multiaddr format
|
||||
try:
|
||||
from libp2p.identity.identify.identify import (
|
||||
_remote_address_to_multiaddr,
|
||||
)
|
||||
|
||||
remote_address = stream.get_remote_address()
|
||||
if remote_address:
|
||||
observed_multiaddr = _remote_address_to_multiaddr(
|
||||
remote_address
|
||||
)
|
||||
# Add the peer ID to create a complete multiaddr
|
||||
complete_multiaddr = f"{observed_multiaddr}/p2p/{peer_id}"
|
||||
print(f" Remote address: {complete_multiaddr}")
|
||||
else:
|
||||
print(f" Remote address: {remote_address}")
|
||||
except Exception:
|
||||
print(f" Remote address: {stream.get_remote_address()}")
|
||||
|
||||
# Call the original handler
|
||||
await identify_handler(stream)
|
||||
|
||||
print(f"✅ Successfully processed identify request from {peer_id}")
|
||||
|
||||
# Replace the handler with our custom one
|
||||
host_a.set_stream_handler(IDENTIFY_PROTOCOL_ID, custom_identify_handler)
|
||||
|
||||
try:
|
||||
await trio.sleep_forever()
|
||||
except KeyboardInterrupt:
|
||||
print("\n🛑 Shutting down listener...")
|
||||
logger.info("Listener interrupted by user")
|
||||
return
|
||||
|
||||
else:
|
||||
# Create second host (dialer)
|
||||
print(f"dialer (host_b) listening on /ip4/{localhost_ip}/tcp/{port}")
|
||||
listen_addr = multiaddr.Multiaddr(f"/ip4/{localhost_ip}/tcp/{port}")
|
||||
host_b = new_host()
|
||||
|
||||
async with host_b.run(listen_addrs=[listen_addr]):
|
||||
async with (
|
||||
host_b.run(listen_addrs=[listen_addr]),
|
||||
trio.open_nursery() as nursery,
|
||||
):
|
||||
# Start the peer-store cleanup task
|
||||
nursery.start_soon(host_b.get_peerstore().start_cleanup_task, 60)
|
||||
|
||||
# Connect to the first host
|
||||
print(f"dialer (host_b) listening on {host_b.get_addrs()[0]}")
|
||||
maddr = multiaddr.Multiaddr(destination)
|
||||
info = info_from_p2p_addr(maddr)
|
||||
print(f"Second host connecting to peer: {info.peer_id}")
|
||||
|
||||
await host_b.connect(info)
|
||||
try:
|
||||
await host_b.connect(info)
|
||||
except Exception as e:
|
||||
error_msg = str(e)
|
||||
if "unable to connect" in error_msg or "SwarmException" in error_msg:
|
||||
print(f"\n❌ Cannot connect to peer: {info.peer_id}")
|
||||
print(f" Address: {destination}")
|
||||
print(f" Error: {error_msg}")
|
||||
print(
|
||||
"\n💡 Make sure the peer is running and the address is correct."
|
||||
)
|
||||
return
|
||||
else:
|
||||
# Re-raise other exceptions
|
||||
raise
|
||||
|
||||
stream = await host_b.new_stream(info.peer_id, (IDENTIFY_PROTOCOL_ID,))
|
||||
|
||||
try:
|
||||
print("Starting identify protocol...")
|
||||
response = await stream.read()
|
||||
|
||||
# Read the response using the utility function
|
||||
from libp2p.utils.varint import read_length_prefixed_protobuf
|
||||
|
||||
response = await read_length_prefixed_protobuf(
|
||||
stream, use_varint_format
|
||||
)
|
||||
full_response = response
|
||||
|
||||
await stream.close()
|
||||
identify_msg = Identify()
|
||||
identify_msg.ParseFromString(response)
|
||||
|
||||
# Parse the response using the robust protocol-level function
|
||||
# This handles both old and new formats automatically
|
||||
identify_msg = parse_identify_response(full_response)
|
||||
print_identify_response(identify_msg)
|
||||
|
||||
except Exception as e:
|
||||
print(f"Identify protocol error: {e}")
|
||||
error_msg = str(e)
|
||||
print(f"Identify protocol error: {error_msg}")
|
||||
|
||||
# Check for specific format mismatch errors
|
||||
if "Error parsing message" in error_msg or "DecodeError" in error_msg:
|
||||
print("\n" + "=" * 60)
|
||||
print("FORMAT MISMATCH DETECTED!")
|
||||
print("=" * 60)
|
||||
if use_varint_format:
|
||||
print(
|
||||
"You are using length-prefixed format (default) but the "
|
||||
"listener"
|
||||
)
|
||||
print("is using raw protobuf format.")
|
||||
print(
|
||||
"\nTo fix this, run the dialer with the --raw-format flag:"
|
||||
)
|
||||
print(f"identify-demo --raw-format -d {destination}")
|
||||
else:
|
||||
print("You are using raw protobuf format but the listener")
|
||||
print("is using length-prefixed format (default).")
|
||||
print(
|
||||
"\nTo fix this, run the dialer without the --raw-format "
|
||||
"flag:"
|
||||
)
|
||||
print(f"identify-demo -d {destination}")
|
||||
print("=" * 60)
|
||||
else:
|
||||
import traceback
|
||||
|
||||
traceback.print_exc()
|
||||
|
||||
return
|
||||
|
||||
@ -98,34 +225,55 @@ async def run(port: int, destination: str) -> None:
|
||||
def main() -> None:
|
||||
description = """
|
||||
This program demonstrates the libp2p identify protocol.
|
||||
First run identify-demo -p <PORT>' to start a listener.
|
||||
First run 'identify-demo -p <PORT> [--raw-format]' to start a listener.
|
||||
Then run 'identify-demo <ANOTHER_PORT> -d <DESTINATION>'
|
||||
where <DESTINATION> is the multiaddress shown by the listener.
|
||||
|
||||
Use --raw-format to send raw protobuf messages (old format) instead of
|
||||
length-prefixed protobuf messages (new format, default).
|
||||
"""
|
||||
|
||||
example_maddr = (
|
||||
"/ip4/127.0.0.1/tcp/8888/p2p/QmQn4SwGkDZkUEpBRBvTmheQycxAHJUNmVEnjA2v1qe8Q"
|
||||
"/ip4/127.0.0.1/tcp/8888/p2p/QmQn4SwGkDZKkUEpBRBvTmheQycxAHJUNmVEnjA2v1qe8Q"
|
||||
)
|
||||
|
||||
parser = argparse.ArgumentParser(description=description)
|
||||
parser.add_argument(
|
||||
"-p", "--port", default=8888, type=int, help="source port number"
|
||||
)
|
||||
parser.add_argument("-p", "--port", default=0, type=int, help="source port number")
|
||||
parser.add_argument(
|
||||
"-d",
|
||||
"--destination",
|
||||
type=str,
|
||||
help=f"destination multiaddr string, e.g. {example_maddr}",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--raw-format",
|
||||
action="store_true",
|
||||
help=(
|
||||
"use raw protobuf format (old format) instead of "
|
||||
"length-prefixed (new format)"
|
||||
),
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
if not args.port:
|
||||
raise RuntimeError("failed to determine local port")
|
||||
# Determine format: use varint (length-prefixed) if --raw-format is specified,
|
||||
# otherwise use raw protobuf format (old format)
|
||||
use_varint_format = args.raw_format
|
||||
|
||||
try:
|
||||
trio.run(run, *(args.port, args.destination))
|
||||
if args.destination:
|
||||
# Run in dialer mode
|
||||
trio.run(run, *(args.port, args.destination, use_varint_format))
|
||||
else:
|
||||
# Run in listener mode
|
||||
trio.run(run, *(args.port, args.destination, use_varint_format))
|
||||
except KeyboardInterrupt:
|
||||
pass
|
||||
print("\n👋 Goodbye!")
|
||||
logger.info("Application interrupted by user")
|
||||
except Exception as e:
|
||||
print(f"\n❌ Error: {str(e)}")
|
||||
logger.error("Error: %s", str(e))
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
@ -11,23 +11,26 @@ This example shows how to:
|
||||
|
||||
import logging
|
||||
|
||||
import multiaddr
|
||||
import trio
|
||||
|
||||
from libp2p import (
|
||||
new_host,
|
||||
)
|
||||
from libp2p.abc import (
|
||||
INetStream,
|
||||
)
|
||||
from libp2p.crypto.secp256k1 import (
|
||||
create_new_key_pair,
|
||||
)
|
||||
from libp2p.custom_types import (
|
||||
TProtocol,
|
||||
)
|
||||
from libp2p.identity.identify import (
|
||||
identify_handler_for,
|
||||
from libp2p.identity.identify.pb.identify_pb2 import (
|
||||
Identify,
|
||||
)
|
||||
from libp2p.identity.identify_push import (
|
||||
ID_PUSH,
|
||||
identify_push_handler_for,
|
||||
push_identify_to_peer,
|
||||
)
|
||||
from libp2p.peer.peerinfo import (
|
||||
@ -38,8 +41,145 @@ from libp2p.peer.peerinfo import (
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def create_custom_identify_handler(host, host_name: str):
|
||||
"""Create a custom identify handler that displays received information."""
|
||||
|
||||
async def handle_identify(stream: INetStream) -> None:
|
||||
peer_id = stream.muxed_conn.peer_id
|
||||
print(f"\n🔍 {host_name} received identify request from peer: {peer_id}")
|
||||
|
||||
# Get the standard identify response using the existing function
|
||||
from libp2p.identity.identify.identify import (
|
||||
_mk_identify_protobuf,
|
||||
_remote_address_to_multiaddr,
|
||||
)
|
||||
|
||||
# Get observed address
|
||||
observed_multiaddr = None
|
||||
try:
|
||||
remote_address = stream.get_remote_address()
|
||||
if remote_address:
|
||||
observed_multiaddr = _remote_address_to_multiaddr(remote_address)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# Build the identify protobuf
|
||||
identify_msg = _mk_identify_protobuf(host, observed_multiaddr)
|
||||
response_data = identify_msg.SerializeToString()
|
||||
|
||||
print(f" 📋 {host_name} identify information:")
|
||||
if identify_msg.HasField("protocol_version"):
|
||||
print(f" Protocol Version: {identify_msg.protocol_version}")
|
||||
if identify_msg.HasField("agent_version"):
|
||||
print(f" Agent Version: {identify_msg.agent_version}")
|
||||
if identify_msg.HasField("public_key"):
|
||||
print(f" Public Key: {identify_msg.public_key.hex()[:16]}...")
|
||||
if identify_msg.listen_addrs:
|
||||
print(" Listen Addresses:")
|
||||
for addr_bytes in identify_msg.listen_addrs:
|
||||
addr = multiaddr.Multiaddr(addr_bytes)
|
||||
print(f" - {addr}")
|
||||
if identify_msg.protocols:
|
||||
print(" Supported Protocols:")
|
||||
for protocol in identify_msg.protocols:
|
||||
print(f" - {protocol}")
|
||||
|
||||
# Send the response
|
||||
await stream.write(response_data)
|
||||
await stream.close()
|
||||
|
||||
return handle_identify
|
||||
|
||||
|
||||
def create_custom_identify_push_handler(host, host_name: str):
|
||||
"""Create a custom identify/push handler that displays received information."""
|
||||
|
||||
async def handle_identify_push(stream: INetStream) -> None:
|
||||
peer_id = stream.muxed_conn.peer_id
|
||||
print(f"\n📤 {host_name} received identify/push from peer: {peer_id}")
|
||||
|
||||
try:
|
||||
# Read the identify message using the utility function
|
||||
from libp2p.utils.varint import read_length_prefixed_protobuf
|
||||
|
||||
data = await read_length_prefixed_protobuf(stream, use_varint_format=True)
|
||||
|
||||
# Parse the identify message
|
||||
identify_msg = Identify()
|
||||
identify_msg.ParseFromString(data)
|
||||
|
||||
print(" 📋 Received identify information:")
|
||||
if identify_msg.HasField("protocol_version"):
|
||||
print(f" Protocol Version: {identify_msg.protocol_version}")
|
||||
if identify_msg.HasField("agent_version"):
|
||||
print(f" Agent Version: {identify_msg.agent_version}")
|
||||
if identify_msg.HasField("public_key"):
|
||||
print(f" Public Key: {identify_msg.public_key.hex()[:16]}...")
|
||||
if identify_msg.HasField("observed_addr") and identify_msg.observed_addr:
|
||||
observed_addr = multiaddr.Multiaddr(identify_msg.observed_addr)
|
||||
print(f" Observed Address: {observed_addr}")
|
||||
if identify_msg.listen_addrs:
|
||||
print(" Listen Addresses:")
|
||||
for addr_bytes in identify_msg.listen_addrs:
|
||||
addr = multiaddr.Multiaddr(addr_bytes)
|
||||
print(f" - {addr}")
|
||||
if identify_msg.protocols:
|
||||
print(" Supported Protocols:")
|
||||
for protocol in identify_msg.protocols:
|
||||
print(f" - {protocol}")
|
||||
|
||||
# Update the peerstore with the new information
|
||||
from libp2p.identity.identify_push.identify_push import (
|
||||
_update_peerstore_from_identify,
|
||||
)
|
||||
|
||||
await _update_peerstore_from_identify(
|
||||
host.get_peerstore(), peer_id, identify_msg
|
||||
)
|
||||
|
||||
print(f" ✅ {host_name} updated peerstore with new information")
|
||||
|
||||
except Exception as e:
|
||||
print(f" ❌ Error processing identify/push: {e}")
|
||||
finally:
|
||||
await stream.close()
|
||||
|
||||
return handle_identify_push
|
||||
|
||||
|
||||
async def display_peerstore_info(host, host_name: str, peer_id, description: str):
|
||||
"""Display peerstore information for a specific peer."""
|
||||
peerstore = host.get_peerstore()
|
||||
|
||||
try:
|
||||
addrs = peerstore.addrs(peer_id)
|
||||
except Exception:
|
||||
addrs = []
|
||||
|
||||
try:
|
||||
protocols = peerstore.get_protocols(peer_id)
|
||||
except Exception:
|
||||
protocols = []
|
||||
|
||||
print(f"\n📚 {host_name} peerstore for {description}:")
|
||||
print(f" Peer ID: {peer_id}")
|
||||
if addrs:
|
||||
print(" Addresses:")
|
||||
for addr in addrs:
|
||||
print(f" - {addr}")
|
||||
else:
|
||||
print(" Addresses: None")
|
||||
|
||||
if protocols:
|
||||
print(" Protocols:")
|
||||
for protocol in protocols:
|
||||
print(f" - {protocol}")
|
||||
else:
|
||||
print(" Protocols: None")
|
||||
|
||||
|
||||
async def main() -> None:
|
||||
print("\n==== Starting Identify-Push Example ====\n")
|
||||
print("\n==== Starting Enhanced Identify-Push Example ====\n")
|
||||
|
||||
# Create key pairs for the two hosts
|
||||
key_pair_1 = create_new_key_pair()
|
||||
@ -48,45 +188,57 @@ async def main() -> None:
|
||||
# Create the first host
|
||||
host_1 = new_host(key_pair=key_pair_1)
|
||||
|
||||
# Set up the identify and identify/push handlers
|
||||
host_1.set_stream_handler(TProtocol("/ipfs/id/1.0.0"), identify_handler_for(host_1))
|
||||
host_1.set_stream_handler(ID_PUSH, identify_push_handler_for(host_1))
|
||||
# Set up custom identify and identify/push handlers
|
||||
host_1.set_stream_handler(
|
||||
TProtocol("/ipfs/id/1.0.0"), create_custom_identify_handler(host_1, "Host 1")
|
||||
)
|
||||
host_1.set_stream_handler(
|
||||
ID_PUSH, create_custom_identify_push_handler(host_1, "Host 1")
|
||||
)
|
||||
|
||||
# Create the second host
|
||||
host_2 = new_host(key_pair=key_pair_2)
|
||||
|
||||
# Set up the identify and identify/push handlers
|
||||
host_2.set_stream_handler(TProtocol("/ipfs/id/1.0.0"), identify_handler_for(host_2))
|
||||
host_2.set_stream_handler(ID_PUSH, identify_push_handler_for(host_2))
|
||||
# Set up custom identify and identify/push handlers
|
||||
host_2.set_stream_handler(
|
||||
TProtocol("/ipfs/id/1.0.0"), create_custom_identify_handler(host_2, "Host 2")
|
||||
)
|
||||
host_2.set_stream_handler(
|
||||
ID_PUSH, create_custom_identify_push_handler(host_2, "Host 2")
|
||||
)
|
||||
|
||||
# Start listening on random ports using the run context manager
|
||||
import multiaddr
|
||||
|
||||
listen_addr_1 = multiaddr.Multiaddr("/ip4/127.0.0.1/tcp/0")
|
||||
listen_addr_2 = multiaddr.Multiaddr("/ip4/127.0.0.1/tcp/0")
|
||||
|
||||
async with host_1.run([listen_addr_1]), host_2.run([listen_addr_2]):
|
||||
async with (
|
||||
host_1.run([listen_addr_1]),
|
||||
host_2.run([listen_addr_2]),
|
||||
trio.open_nursery() as nursery,
|
||||
):
|
||||
# Start the peer-store cleanup task
|
||||
nursery.start_soon(host_1.get_peerstore().start_cleanup_task, 60)
|
||||
nursery.start_soon(host_2.get_peerstore().start_cleanup_task, 60)
|
||||
|
||||
# Get the addresses of both hosts
|
||||
addr_1 = host_1.get_addrs()[0]
|
||||
logger.info(f"Host 1 listening on {addr_1}")
|
||||
print(f"Host 1 listening on {addr_1}")
|
||||
print(f"Peer ID: {host_1.get_id().pretty()}")
|
||||
|
||||
addr_2 = host_2.get_addrs()[0]
|
||||
logger.info(f"Host 2 listening on {addr_2}")
|
||||
print(f"Host 2 listening on {addr_2}")
|
||||
print(f"Peer ID: {host_2.get_id().pretty()}")
|
||||
|
||||
print("\nConnecting Host 2 to Host 1...")
|
||||
print("🏠 Host Configuration:")
|
||||
print(f" Host 1: {addr_1}")
|
||||
print(f" Host 1 Peer ID: {host_1.get_id().pretty()}")
|
||||
print(f" Host 2: {addr_2}")
|
||||
print(f" Host 2 Peer ID: {host_2.get_id().pretty()}")
|
||||
|
||||
print("\n🔗 Connecting Host 2 to Host 1...")
|
||||
|
||||
# Connect host_2 to host_1
|
||||
peer_info = info_from_p2p_addr(addr_1)
|
||||
await host_2.connect(peer_info)
|
||||
logger.info("Host 2 connected to Host 1")
|
||||
print("Host 2 successfully connected to Host 1")
|
||||
print("✅ Host 2 successfully connected to Host 1")
|
||||
|
||||
# Run the identify protocol from host_2 to host_1
|
||||
# (so Host 1 learns Host 2's address)
|
||||
print("\n🔄 Running identify protocol (Host 2 → Host 1)...")
|
||||
from libp2p.identity.identify.identify import ID as IDENTIFY_PROTOCOL_ID
|
||||
|
||||
stream = await host_2.new_stream(host_1.get_id(), (IDENTIFY_PROTOCOL_ID,))
|
||||
@ -94,64 +246,58 @@ async def main() -> None:
|
||||
await stream.close()
|
||||
|
||||
# Run the identify protocol from host_1 to host_2
|
||||
# (so Host 2 learns Host 1's address)
|
||||
print("\n🔄 Running identify protocol (Host 1 → Host 2)...")
|
||||
stream = await host_1.new_stream(host_2.get_id(), (IDENTIFY_PROTOCOL_ID,))
|
||||
response = await stream.read()
|
||||
await stream.close()
|
||||
|
||||
# --- NEW CODE: Update Host 1's peerstore with Host 2's addresses ---
|
||||
from libp2p.identity.identify.pb.identify_pb2 import (
|
||||
Identify,
|
||||
)
|
||||
|
||||
# Update Host 1's peerstore with Host 2's addresses
|
||||
identify_msg = Identify()
|
||||
identify_msg.ParseFromString(response)
|
||||
peerstore_1 = host_1.get_peerstore()
|
||||
peer_id_2 = host_2.get_id()
|
||||
for addr_bytes in identify_msg.listen_addrs:
|
||||
maddr = multiaddr.Multiaddr(addr_bytes)
|
||||
# TTL can be any positive int
|
||||
peerstore_1.add_addr(
|
||||
peer_id_2,
|
||||
maddr,
|
||||
ttl=3600,
|
||||
)
|
||||
# --- END NEW CODE ---
|
||||
peerstore_1.add_addr(peer_id_2, maddr, ttl=3600)
|
||||
|
||||
# Now Host 1's peerstore should have Host 2's address
|
||||
peerstore_1 = host_1.get_peerstore()
|
||||
peer_id_2 = host_2.get_id()
|
||||
addrs_1_for_2 = peerstore_1.addrs(peer_id_2)
|
||||
logger.info(
|
||||
f"[DEBUG] Host 1 peerstore addresses for Host 2 before push: "
|
||||
f"{addrs_1_for_2}"
|
||||
)
|
||||
print(
|
||||
f"[DEBUG] Host 1 peerstore addresses for Host 2 before push: "
|
||||
f"{addrs_1_for_2}"
|
||||
# Display peerstore information before push
|
||||
await display_peerstore_info(
|
||||
host_1, "Host 1", peer_id_2, "Host 2 (before push)"
|
||||
)
|
||||
|
||||
# Push identify information from host_1 to host_2
|
||||
logger.info("Host 1 pushing identify information to Host 2")
|
||||
print("\nHost 1 pushing identify information to Host 2...")
|
||||
print("\n📤 Host 1 pushing identify information to Host 2...")
|
||||
|
||||
try:
|
||||
# Call push_identify_to_peer which now returns a boolean
|
||||
success = await push_identify_to_peer(host_1, host_2.get_id())
|
||||
|
||||
if success:
|
||||
logger.info("Identify push completed successfully")
|
||||
print("Identify push completed successfully!")
|
||||
print("✅ Identify push completed successfully!")
|
||||
else:
|
||||
logger.warning("Identify push didn't complete successfully")
|
||||
print("\nWarning: Identify push didn't complete successfully")
|
||||
print("⚠️ Identify push didn't complete successfully")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error during identify push: {str(e)}")
|
||||
print(f"\nError during identify push: {str(e)}")
|
||||
print(f"❌ Error during identify push: {str(e)}")
|
||||
|
||||
# Add this at the end of your async with block:
|
||||
await trio.sleep(0.5) # Give background tasks time to finish
|
||||
# Give a moment for the identify/push processing to complete
|
||||
await trio.sleep(0.5)
|
||||
|
||||
# Display peerstore information after push
|
||||
await display_peerstore_info(host_1, "Host 1", peer_id_2, "Host 2 (after push)")
|
||||
await display_peerstore_info(
|
||||
host_2, "Host 2", host_1.get_id(), "Host 1 (after push)"
|
||||
)
|
||||
|
||||
# Give more time for background tasks to finish and connections to stabilize
|
||||
print("\n⏳ Waiting for background tasks to complete...")
|
||||
await trio.sleep(1.0)
|
||||
|
||||
# Gracefully close connections to prevent connection errors
|
||||
print("🔌 Closing connections...")
|
||||
await host_2.disconnect(host_1.get_id())
|
||||
await trio.sleep(0.2)
|
||||
|
||||
print("\n🎉 Example completed successfully!")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
@ -38,17 +38,20 @@ from libp2p.crypto.secp256k1 import (
|
||||
create_new_key_pair,
|
||||
)
|
||||
from libp2p.identity.identify import (
|
||||
ID as ID_IDENTIFY,
|
||||
identify_handler_for,
|
||||
)
|
||||
from libp2p.identity.identify import ID as ID_IDENTIFY
|
||||
from libp2p.identity.identify.identify import (
|
||||
_remote_address_to_multiaddr,
|
||||
)
|
||||
from libp2p.identity.identify.pb.identify_pb2 import (
|
||||
Identify,
|
||||
)
|
||||
from libp2p.identity.identify_push import (
|
||||
ID_PUSH as ID_IDENTIFY_PUSH,
|
||||
identify_push_handler_for,
|
||||
push_identify_to_peer,
|
||||
)
|
||||
from libp2p.identity.identify_push import ID_PUSH as ID_IDENTIFY_PUSH
|
||||
from libp2p.peer.peerinfo import (
|
||||
info_from_p2p_addr,
|
||||
)
|
||||
@ -56,22 +59,47 @@ from libp2p.peer.peerinfo import (
|
||||
# Configure logging
|
||||
logger = logging.getLogger("libp2p.identity.identify-push-example")
|
||||
|
||||
# Default port configuration
|
||||
DEFAULT_PORT = 8888
|
||||
|
||||
|
||||
def custom_identify_push_handler_for(host):
|
||||
def custom_identify_push_handler_for(host, use_varint_format: bool = True):
|
||||
"""
|
||||
Create a custom handler for the identify/push protocol that logs and prints
|
||||
the identity information received from the dialer.
|
||||
|
||||
Args:
|
||||
host: The libp2p host
|
||||
use_varint_format: If True, expect length-prefixed format; if False, expect
|
||||
raw protobuf
|
||||
|
||||
"""
|
||||
|
||||
async def handle_identify_push(stream: INetStream) -> None:
|
||||
peer_id = stream.muxed_conn.peer_id
|
||||
|
||||
# Get remote address information
|
||||
try:
|
||||
# Read the identify message from the stream
|
||||
data = await stream.read()
|
||||
remote_address = stream.get_remote_address()
|
||||
if remote_address:
|
||||
observed_multiaddr = _remote_address_to_multiaddr(remote_address)
|
||||
logger.info(
|
||||
"Connection from remote peer %s, address: %s, multiaddr: %s",
|
||||
peer_id,
|
||||
remote_address,
|
||||
observed_multiaddr,
|
||||
)
|
||||
print(f"\n🔗 Received identify/push request from peer: {peer_id}")
|
||||
# Add the peer ID to create a complete multiaddr
|
||||
complete_multiaddr = f"{observed_multiaddr}/p2p/{peer_id}"
|
||||
print(f" Remote address: {complete_multiaddr}")
|
||||
except Exception as e:
|
||||
logger.error("Error getting remote address: %s", e)
|
||||
print(f"\n🔗 Received identify/push request from peer: {peer_id}")
|
||||
|
||||
try:
|
||||
# Use the utility function to read the protobuf message
|
||||
from libp2p.utils.varint import read_length_prefixed_protobuf
|
||||
|
||||
data = await read_length_prefixed_protobuf(stream, use_varint_format)
|
||||
|
||||
identify_msg = Identify()
|
||||
identify_msg.ParseFromString(data)
|
||||
|
||||
@ -120,11 +148,41 @@ def custom_identify_push_handler_for(host):
|
||||
await _update_peerstore_from_identify(peerstore, peer_id, identify_msg)
|
||||
|
||||
logger.info("Successfully processed identify/push from peer %s", peer_id)
|
||||
print(f"\nSuccessfully processed identify/push from peer {peer_id}")
|
||||
print(f"✅ Successfully processed identify/push from peer {peer_id}")
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error processing identify/push from %s: %s", peer_id, e)
|
||||
print(f"\nError processing identify/push from {peer_id}: {e}")
|
||||
error_msg = str(e)
|
||||
logger.error(
|
||||
"Error processing identify/push from %s: %s", peer_id, error_msg
|
||||
)
|
||||
print(f"\nError processing identify/push from {peer_id}: {error_msg}")
|
||||
|
||||
# Check for specific format mismatch errors
|
||||
if (
|
||||
"Error parsing message" in error_msg
|
||||
or "DecodeError" in error_msg
|
||||
or "ParseFromString" in error_msg
|
||||
):
|
||||
print("\n" + "=" * 60)
|
||||
print("FORMAT MISMATCH DETECTED!")
|
||||
print("=" * 60)
|
||||
if use_varint_format:
|
||||
print(
|
||||
"You are using length-prefixed format (default) but the "
|
||||
"dialer is using raw protobuf format."
|
||||
)
|
||||
print("\nTo fix this, run the dialer with the --raw-format flag:")
|
||||
print(
|
||||
"identify-push-listener-dialer-demo --raw-format -d <ADDRESS>"
|
||||
)
|
||||
else:
|
||||
print("You are using raw protobuf format but the dialer")
|
||||
print("is using length-prefixed format (default).")
|
||||
print(
|
||||
"\nTo fix this, run the dialer without the --raw-format flag:"
|
||||
)
|
||||
print("identify-push-listener-dialer-demo -d <ADDRESS>")
|
||||
print("=" * 60)
|
||||
finally:
|
||||
# Close the stream after processing
|
||||
await stream.close()
|
||||
@ -132,9 +190,15 @@ def custom_identify_push_handler_for(host):
|
||||
return handle_identify_push
|
||||
|
||||
|
||||
async def run_listener(port: int) -> None:
|
||||
async def run_listener(
|
||||
port: int, use_varint_format: bool = True, raw_format_flag: bool = False
|
||||
) -> None:
|
||||
"""Run a host in listener mode."""
|
||||
print(f"\n==== Starting Identify-Push Listener on port {port} ====\n")
|
||||
format_name = "length-prefixed" if use_varint_format else "raw protobuf"
|
||||
print(
|
||||
f"\n==== Starting Identify-Push Listener on port {port} "
|
||||
f"(using {format_name} format) ====\n"
|
||||
)
|
||||
|
||||
# Create key pair for the listener
|
||||
key_pair = create_new_key_pair()
|
||||
@ -142,35 +206,58 @@ async def run_listener(port: int) -> None:
|
||||
# Create the listener host
|
||||
host = new_host(key_pair=key_pair)
|
||||
|
||||
# Set up the identify and identify/push handlers
|
||||
host.set_stream_handler(ID_IDENTIFY, identify_handler_for(host))
|
||||
host.set_stream_handler(ID_IDENTIFY_PUSH, custom_identify_push_handler_for(host))
|
||||
# Set up the identify and identify/push handlers with specified format
|
||||
host.set_stream_handler(
|
||||
ID_IDENTIFY, identify_handler_for(host, use_varint_format=use_varint_format)
|
||||
)
|
||||
host.set_stream_handler(
|
||||
ID_IDENTIFY_PUSH,
|
||||
custom_identify_push_handler_for(host, use_varint_format=use_varint_format),
|
||||
)
|
||||
|
||||
# Start listening
|
||||
listen_addr = multiaddr.Multiaddr(f"/ip4/0.0.0.0/tcp/{port}")
|
||||
|
||||
async with host.run([listen_addr]):
|
||||
addr = host.get_addrs()[0]
|
||||
logger.info("Listener host ready!")
|
||||
print("Listener host ready!")
|
||||
try:
|
||||
async with host.run([listen_addr]):
|
||||
addr = host.get_addrs()[0]
|
||||
logger.info("Listener host ready!")
|
||||
print("Listener host ready!")
|
||||
|
||||
logger.info(f"Listening on: {addr}")
|
||||
print(f"Listening on: {addr}")
|
||||
logger.info(f"Listening on: {addr}")
|
||||
print(f"Listening on: {addr}")
|
||||
|
||||
logger.info(f"Peer ID: {host.get_id().pretty()}")
|
||||
print(f"Peer ID: {host.get_id().pretty()}")
|
||||
logger.info(f"Peer ID: {host.get_id().pretty()}")
|
||||
print(f"Peer ID: {host.get_id().pretty()}")
|
||||
|
||||
print("\nRun dialer with command:")
|
||||
print(f"identify-push-listener-dialer-demo -d {addr}")
|
||||
print("\nWaiting for incoming connections... (Ctrl+C to exit)")
|
||||
print("\nRun dialer with command:")
|
||||
if raw_format_flag:
|
||||
print(f"identify-push-listener-dialer-demo -d {addr} --raw-format")
|
||||
else:
|
||||
print(f"identify-push-listener-dialer-demo -d {addr}")
|
||||
print("\nWaiting for incoming identify/push requests... (Ctrl+C to exit)")
|
||||
|
||||
# Keep running until interrupted
|
||||
await trio.sleep_forever()
|
||||
# Keep running until interrupted
|
||||
try:
|
||||
await trio.sleep_forever()
|
||||
except KeyboardInterrupt:
|
||||
print("\n🛑 Shutting down listener...")
|
||||
logger.info("Listener interrupted by user")
|
||||
return
|
||||
except Exception as e:
|
||||
logger.error(f"Listener error: {e}")
|
||||
raise
|
||||
|
||||
|
||||
async def run_dialer(port: int, destination: str) -> None:
|
||||
async def run_dialer(
|
||||
port: int, destination: str, use_varint_format: bool = True
|
||||
) -> None:
|
||||
"""Run a host in dialer mode that connects to a listener."""
|
||||
print(f"\n==== Starting Identify-Push Dialer on port {port} ====\n")
|
||||
format_name = "length-prefixed" if use_varint_format else "raw protobuf"
|
||||
print(
|
||||
f"\n==== Starting Identify-Push Dialer on port {port} "
|
||||
f"(using {format_name} format) ====\n"
|
||||
)
|
||||
|
||||
# Create key pair for the dialer
|
||||
key_pair = create_new_key_pair()
|
||||
@ -178,9 +265,14 @@ async def run_dialer(port: int, destination: str) -> None:
|
||||
# Create the dialer host
|
||||
host = new_host(key_pair=key_pair)
|
||||
|
||||
# Set up the identify and identify/push handlers
|
||||
host.set_stream_handler(ID_IDENTIFY, identify_handler_for(host))
|
||||
host.set_stream_handler(ID_IDENTIFY_PUSH, identify_push_handler_for(host))
|
||||
# Set up the identify and identify/push handlers with specified format
|
||||
host.set_stream_handler(
|
||||
ID_IDENTIFY, identify_handler_for(host, use_varint_format=use_varint_format)
|
||||
)
|
||||
host.set_stream_handler(
|
||||
ID_IDENTIFY_PUSH,
|
||||
identify_push_handler_for(host, use_varint_format=use_varint_format),
|
||||
)
|
||||
|
||||
# Start listening on a different port
|
||||
listen_addr = multiaddr.Multiaddr(f"/ip4/0.0.0.0/tcp/{port}")
|
||||
@ -201,7 +293,9 @@ async def run_dialer(port: int, destination: str) -> None:
|
||||
try:
|
||||
await host.connect(peer_info)
|
||||
logger.info("Successfully connected to listener!")
|
||||
print("Successfully connected to listener!")
|
||||
print("✅ Successfully connected to listener!")
|
||||
print(f" Connected to: {peer_info.peer_id}")
|
||||
print(f" Full address: {destination}")
|
||||
|
||||
# Push identify information to the listener
|
||||
logger.info("Pushing identify information to listener...")
|
||||
@ -209,11 +303,13 @@ async def run_dialer(port: int, destination: str) -> None:
|
||||
|
||||
try:
|
||||
# Call push_identify_to_peer which returns a boolean
|
||||
success = await push_identify_to_peer(host, peer_info.peer_id)
|
||||
success = await push_identify_to_peer(
|
||||
host, peer_info.peer_id, use_varint_format=use_varint_format
|
||||
)
|
||||
|
||||
if success:
|
||||
logger.info("Identify push completed successfully!")
|
||||
print("Identify push completed successfully!")
|
||||
print("✅ Identify push completed successfully!")
|
||||
|
||||
logger.info("Example completed successfully!")
|
||||
print("\nExample completed successfully!")
|
||||
@ -224,64 +320,114 @@ async def run_dialer(port: int, destination: str) -> None:
|
||||
logger.warning("Example completed with warnings.")
|
||||
print("Example completed with warnings.")
|
||||
except Exception as e:
|
||||
logger.error(f"Error during identify push: {str(e)}")
|
||||
print(f"\nError during identify push: {str(e)}")
|
||||
error_msg = str(e)
|
||||
logger.error(f"Error during identify push: {error_msg}")
|
||||
print(f"\nError during identify push: {error_msg}")
|
||||
|
||||
# Check for specific format mismatch errors
|
||||
if (
|
||||
"Error parsing message" in error_msg
|
||||
or "DecodeError" in error_msg
|
||||
or "ParseFromString" in error_msg
|
||||
):
|
||||
print("\n" + "=" * 60)
|
||||
print("FORMAT MISMATCH DETECTED!")
|
||||
print("=" * 60)
|
||||
if use_varint_format:
|
||||
print(
|
||||
"You are using length-prefixed format (default) but the "
|
||||
"listener is using raw protobuf format."
|
||||
)
|
||||
print(
|
||||
"\nTo fix this, run the dialer with the --raw-format flag:"
|
||||
)
|
||||
print(
|
||||
f"identify-push-listener-dialer-demo --raw-format -d "
|
||||
f"{destination}"
|
||||
)
|
||||
else:
|
||||
print("You are using raw protobuf format but the listener")
|
||||
print("is using length-prefixed format (default).")
|
||||
print(
|
||||
"\nTo fix this, run the dialer without the --raw-format "
|
||||
"flag:"
|
||||
)
|
||||
print(f"identify-push-listener-dialer-demo -d {destination}")
|
||||
print("=" * 60)
|
||||
|
||||
logger.error("Example completed with errors.")
|
||||
print("Example completed with errors.")
|
||||
# Continue execution despite the push error
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error during dialer operation: {str(e)}")
|
||||
print(f"\nError during dialer operation: {str(e)}")
|
||||
raise
|
||||
error_msg = str(e)
|
||||
if "unable to connect" in error_msg or "SwarmException" in error_msg:
|
||||
print(f"\n❌ Cannot connect to peer: {peer_info.peer_id}")
|
||||
print(f" Address: {destination}")
|
||||
print(f" Error: {error_msg}")
|
||||
print("\n💡 Make sure the peer is running and the address is correct.")
|
||||
return
|
||||
else:
|
||||
logger.error(f"Error during dialer operation: {error_msg}")
|
||||
print(f"\nError during dialer operation: {error_msg}")
|
||||
raise
|
||||
|
||||
|
||||
def main() -> None:
|
||||
"""Parse arguments and start the appropriate mode."""
|
||||
description = """
|
||||
This program demonstrates the libp2p identify/push protocol.
|
||||
Without arguments, it runs as a listener on port 8888.
|
||||
With -d parameter, it runs as a dialer on port 8889.
|
||||
"""
|
||||
Without arguments, it runs as a listener on random port.
|
||||
With -d parameter, it runs as a dialer on random port.
|
||||
|
||||
example = (
|
||||
f"/ip4/127.0.0.1/tcp/{DEFAULT_PORT}/p2p/"
|
||||
"QmQn4SwGkDZkUEpBRBvTmheQycxAHJUNmVEnjA2v1qe8Q"
|
||||
)
|
||||
Port 0 (default) means the OS will automatically assign an available port.
|
||||
This prevents port conflicts when running multiple instances.
|
||||
|
||||
Use --raw-format to send raw protobuf messages (old format) instead of
|
||||
length-prefixed protobuf messages (new format, default).
|
||||
"""
|
||||
|
||||
parser = argparse.ArgumentParser(description=description)
|
||||
parser.add_argument(
|
||||
"-p",
|
||||
"--port",
|
||||
default=0,
|
||||
type=int,
|
||||
help=(
|
||||
f"port to listen on (default: {DEFAULT_PORT} for listener, "
|
||||
f"{DEFAULT_PORT + 1} for dialer)"
|
||||
),
|
||||
help="source port number (0 = random available port)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-d",
|
||||
"--destination",
|
||||
type=str,
|
||||
help=f"destination multiaddr string, e.g. {example}",
|
||||
help="destination multiaddr string",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--raw-format",
|
||||
action="store_true",
|
||||
help=(
|
||||
"use raw protobuf format (old format) instead of "
|
||||
"length-prefixed (new format)"
|
||||
),
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
# Determine format: raw format if --raw-format is specified, otherwise
|
||||
# length-prefixed
|
||||
use_varint_format = not args.raw_format
|
||||
|
||||
try:
|
||||
if args.destination:
|
||||
# Run in dialer mode with default port DEFAULT_PORT + 1 if not specified
|
||||
port = args.port if args.port is not None else DEFAULT_PORT + 1
|
||||
trio.run(run_dialer, port, args.destination)
|
||||
# Run in dialer mode with random available port if not specified
|
||||
trio.run(run_dialer, args.port, args.destination, use_varint_format)
|
||||
else:
|
||||
# Run in listener mode with default port DEFAULT_PORT if not specified
|
||||
port = args.port if args.port is not None else DEFAULT_PORT
|
||||
trio.run(run_listener, port)
|
||||
# Run in listener mode with random available port if not specified
|
||||
trio.run(run_listener, args.port, use_varint_format, args.raw_format)
|
||||
except KeyboardInterrupt:
|
||||
print("\nInterrupted by user")
|
||||
logger.info("Interrupted by user")
|
||||
print("\n👋 Goodbye!")
|
||||
logger.info("Application interrupted by user")
|
||||
except Exception as e:
|
||||
print(f"\nError: {str(e)}")
|
||||
print(f"\n❌ Error: {str(e)}")
|
||||
logger.error("Error: %s", str(e))
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
304
examples/kademlia/kademlia.py
Normal file
304
examples/kademlia/kademlia.py
Normal file
@ -0,0 +1,304 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
"""
|
||||
A basic example of using the Kademlia DHT implementation, with all setup logic inlined.
|
||||
This example demonstrates both value storage/retrieval and content server
|
||||
advertisement/discovery.
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import logging
|
||||
import os
|
||||
import random
|
||||
import secrets
|
||||
import sys
|
||||
|
||||
import base58
|
||||
from multiaddr import (
|
||||
Multiaddr,
|
||||
)
|
||||
import trio
|
||||
|
||||
from libp2p import (
|
||||
new_host,
|
||||
)
|
||||
from libp2p.abc import (
|
||||
IHost,
|
||||
)
|
||||
from libp2p.crypto.secp256k1 import (
|
||||
create_new_key_pair,
|
||||
)
|
||||
from libp2p.kad_dht.kad_dht import (
|
||||
DHTMode,
|
||||
KadDHT,
|
||||
)
|
||||
from libp2p.kad_dht.utils import (
|
||||
create_key_from_binary,
|
||||
)
|
||||
from libp2p.tools.async_service import (
|
||||
background_trio_service,
|
||||
)
|
||||
from libp2p.tools.utils import (
|
||||
info_from_p2p_addr,
|
||||
)
|
||||
from libp2p.utils.paths import get_script_dir, join_paths
|
||||
|
||||
# Configure logging
|
||||
logging.basicConfig(
|
||||
level=logging.INFO,
|
||||
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
|
||||
handlers=[logging.StreamHandler()],
|
||||
)
|
||||
logger = logging.getLogger("kademlia-example")
|
||||
|
||||
# Configure DHT module loggers to inherit from the parent logger
|
||||
# This ensures all kademlia-example.* loggers use the same configuration
|
||||
# Get the directory where this script is located
|
||||
SCRIPT_DIR = get_script_dir(__file__)
|
||||
SERVER_ADDR_LOG = join_paths(SCRIPT_DIR, "server_node_addr.txt")
|
||||
|
||||
# Set the level for all child loggers
|
||||
for module in [
|
||||
"kad_dht",
|
||||
"value_store",
|
||||
"peer_routing",
|
||||
"routing_table",
|
||||
"provider_store",
|
||||
]:
|
||||
child_logger = logging.getLogger(f"kademlia-example.{module}")
|
||||
child_logger.setLevel(logging.INFO)
|
||||
child_logger.propagate = True # Allow propagation to parent
|
||||
|
||||
# File to store node information
|
||||
bootstrap_nodes = []
|
||||
|
||||
|
||||
# function to take bootstrap_nodes as input and connects to them
|
||||
async def connect_to_bootstrap_nodes(host: IHost, bootstrap_addrs: list[str]) -> None:
|
||||
"""
|
||||
Connect to the bootstrap nodes provided in the list.
|
||||
|
||||
params: host: The host instance to connect to
|
||||
bootstrap_addrs: List of bootstrap node addresses
|
||||
|
||||
Returns
|
||||
-------
|
||||
None
|
||||
|
||||
"""
|
||||
for addr in bootstrap_addrs:
|
||||
try:
|
||||
peerInfo = info_from_p2p_addr(Multiaddr(addr))
|
||||
host.get_peerstore().add_addrs(peerInfo.peer_id, peerInfo.addrs, 3600)
|
||||
await host.connect(peerInfo)
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to connect to bootstrap node {addr}: {e}")
|
||||
|
||||
|
||||
def save_server_addr(addr: str) -> None:
|
||||
"""Append the server's multiaddress to the log file."""
|
||||
try:
|
||||
with open(SERVER_ADDR_LOG, "w") as f:
|
||||
f.write(addr + "\n")
|
||||
logger.info(f"Saved server address to log: {addr}")
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to save server address: {e}")
|
||||
|
||||
|
||||
def load_server_addrs() -> list[str]:
|
||||
"""Load all server multiaddresses from the log file."""
|
||||
if not os.path.exists(SERVER_ADDR_LOG):
|
||||
return []
|
||||
try:
|
||||
with open(SERVER_ADDR_LOG) as f:
|
||||
return [line.strip() for line in f if line.strip()]
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to load server addresses: {e}")
|
||||
return []
|
||||
|
||||
|
||||
async def run_node(
|
||||
port: int, mode: str, bootstrap_addrs: list[str] | None = None
|
||||
) -> None:
|
||||
"""Run a node that serves content in the DHT with setup inlined."""
|
||||
try:
|
||||
if port <= 0:
|
||||
port = random.randint(10000, 60000)
|
||||
logger.debug(f"Using port: {port}")
|
||||
|
||||
# Convert string mode to DHTMode enum
|
||||
if mode is None or mode.upper() == "CLIENT":
|
||||
dht_mode = DHTMode.CLIENT
|
||||
elif mode.upper() == "SERVER":
|
||||
dht_mode = DHTMode.SERVER
|
||||
else:
|
||||
logger.error(f"Invalid mode: {mode}. Must be 'client' or 'server'")
|
||||
sys.exit(1)
|
||||
|
||||
# Load server addresses for client mode
|
||||
if dht_mode == DHTMode.CLIENT:
|
||||
server_addrs = load_server_addrs()
|
||||
if server_addrs:
|
||||
logger.info(f"Loaded {len(server_addrs)} server addresses from log")
|
||||
bootstrap_nodes.append(server_addrs[0]) # Use the first server address
|
||||
else:
|
||||
logger.warning("No server addresses found in log file")
|
||||
|
||||
if bootstrap_addrs:
|
||||
for addr in bootstrap_addrs:
|
||||
bootstrap_nodes.append(addr)
|
||||
|
||||
key_pair = create_new_key_pair(secrets.token_bytes(32))
|
||||
host = new_host(key_pair=key_pair)
|
||||
listen_addr = Multiaddr(f"/ip4/127.0.0.1/tcp/{port}")
|
||||
|
||||
async with host.run(listen_addrs=[listen_addr]), trio.open_nursery() as nursery:
|
||||
# Start the peer-store cleanup task
|
||||
nursery.start_soon(host.get_peerstore().start_cleanup_task, 60)
|
||||
|
||||
peer_id = host.get_id().pretty()
|
||||
addr_str = f"/ip4/127.0.0.1/tcp/{port}/p2p/{peer_id}"
|
||||
await connect_to_bootstrap_nodes(host, bootstrap_nodes)
|
||||
dht = KadDHT(host, dht_mode)
|
||||
# take all peer ids from the host and add them to the dht
|
||||
for peer_id in host.get_peerstore().peer_ids():
|
||||
await dht.routing_table.add_peer(peer_id)
|
||||
logger.info(f"Connected to bootstrap nodes: {host.get_connected_peers()}")
|
||||
bootstrap_cmd = f"--bootstrap {addr_str}"
|
||||
logger.info("To connect to this node, use: %s", bootstrap_cmd)
|
||||
|
||||
# Save server address in server mode
|
||||
if dht_mode == DHTMode.SERVER:
|
||||
save_server_addr(addr_str)
|
||||
|
||||
# Start the DHT service
|
||||
async with background_trio_service(dht):
|
||||
logger.info(f"DHT service started in {dht_mode.value} mode")
|
||||
val_key = create_key_from_binary(b"py-libp2p kademlia example value")
|
||||
content = b"Hello from python node "
|
||||
content_key = create_key_from_binary(content)
|
||||
|
||||
if dht_mode == DHTMode.SERVER:
|
||||
# Store a value in the DHT
|
||||
msg = "Hello message from Sumanjeet"
|
||||
val_data = msg.encode()
|
||||
await dht.put_value(val_key, val_data)
|
||||
logger.info(
|
||||
f"Stored value '{val_data.decode()}'"
|
||||
f"with key: {base58.b58encode(val_key).decode()}"
|
||||
)
|
||||
|
||||
# Advertise as content server
|
||||
success = await dht.provider_store.provide(content_key)
|
||||
if success:
|
||||
logger.info(
|
||||
"Successfully advertised as server"
|
||||
f"for content: {content_key.hex()}"
|
||||
)
|
||||
else:
|
||||
logger.warning("Failed to advertise as content server")
|
||||
|
||||
else:
|
||||
# retrieve the value
|
||||
logger.info(
|
||||
"Looking up key: %s", base58.b58encode(val_key).decode()
|
||||
)
|
||||
val_data = await dht.get_value(val_key)
|
||||
if val_data:
|
||||
try:
|
||||
logger.info(f"Retrieved value: {val_data.decode()}")
|
||||
except UnicodeDecodeError:
|
||||
logger.info(f"Retrieved value (bytes): {val_data!r}")
|
||||
else:
|
||||
logger.warning("Failed to retrieve value")
|
||||
|
||||
# Also check if we can find servers for our own content
|
||||
logger.info("Looking for servers of content: %s", content_key.hex())
|
||||
providers = await dht.provider_store.find_providers(content_key)
|
||||
if providers:
|
||||
logger.info(
|
||||
"Found %d servers for content: %s",
|
||||
len(providers),
|
||||
[p.peer_id.pretty() for p in providers],
|
||||
)
|
||||
else:
|
||||
logger.warning(
|
||||
"No servers found for content %s", content_key.hex()
|
||||
)
|
||||
|
||||
# Keep the node running
|
||||
while True:
|
||||
logger.info(
|
||||
"Status - Connected peers: %d,"
|
||||
"Peers in store: %d, Values in store: %d",
|
||||
len(dht.host.get_connected_peers()),
|
||||
len(dht.host.get_peerstore().peer_ids()),
|
||||
len(dht.value_store.store),
|
||||
)
|
||||
await trio.sleep(10)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Server node error: {e}", exc_info=True)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def parse_args():
|
||||
"""Parse command line arguments."""
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Kademlia DHT example with content server functionality"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--mode",
|
||||
default="server",
|
||||
help="Run as a server or client node",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--port",
|
||||
type=int,
|
||||
default=0,
|
||||
help="Port to listen on (0 for random)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--bootstrap",
|
||||
type=str,
|
||||
nargs="*",
|
||||
help=(
|
||||
"Multiaddrs of bootstrap nodes. "
|
||||
"Provide a space-separated list of addresses. "
|
||||
"This is required for client mode."
|
||||
),
|
||||
)
|
||||
# add option to use verbose logging
|
||||
parser.add_argument(
|
||||
"--verbose",
|
||||
action="store_true",
|
||||
help="Enable verbose logging",
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
# Set logging level based on verbosity
|
||||
if args.verbose:
|
||||
logging.getLogger().setLevel(logging.DEBUG)
|
||||
else:
|
||||
logging.getLogger().setLevel(logging.INFO)
|
||||
|
||||
return args
|
||||
|
||||
|
||||
def main():
|
||||
"""Main entry point for the kademlia demo."""
|
||||
try:
|
||||
args = parse_args()
|
||||
logger.info(
|
||||
"Running in %s mode on port %d",
|
||||
args.mode,
|
||||
args.port,
|
||||
)
|
||||
trio.run(run_node, args.port, args.mode, args.bootstrap)
|
||||
except Exception as e:
|
||||
logger.critical(f"Script failed: {e}", exc_info=True)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
77
examples/mDNS/mDNS.py
Normal file
77
examples/mDNS/mDNS.py
Normal file
@ -0,0 +1,77 @@
|
||||
import argparse
|
||||
import logging
|
||||
import secrets
|
||||
|
||||
import multiaddr
|
||||
import trio
|
||||
|
||||
from libp2p import (
|
||||
new_host,
|
||||
)
|
||||
from libp2p.abc import PeerInfo
|
||||
from libp2p.crypto.secp256k1 import (
|
||||
create_new_key_pair,
|
||||
)
|
||||
from libp2p.discovery.events.peerDiscovery import peerDiscovery
|
||||
|
||||
logger = logging.getLogger("libp2p.discovery.mdns")
|
||||
logger.setLevel(logging.INFO)
|
||||
handler = logging.StreamHandler()
|
||||
handler.setFormatter(
|
||||
logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
|
||||
)
|
||||
logger.addHandler(handler)
|
||||
|
||||
# Set root logger to DEBUG to capture all logs from dependencies
|
||||
logging.getLogger().setLevel(logging.DEBUG)
|
||||
|
||||
|
||||
def onPeerDiscovery(peerinfo: PeerInfo):
|
||||
logger.info(f"Discovered: {peerinfo.peer_id}")
|
||||
|
||||
|
||||
async def run(port: int) -> None:
|
||||
secret = secrets.token_bytes(32)
|
||||
key_pair = create_new_key_pair(secret)
|
||||
listen_addr = multiaddr.Multiaddr(f"/ip4/0.0.0.0/tcp/{port}")
|
||||
|
||||
peerDiscovery.register_peer_discovered_handler(onPeerDiscovery)
|
||||
|
||||
print(
|
||||
"Run this from the same folder in another console to "
|
||||
"start another peer on a different port:\n\n"
|
||||
"mdns-demo -p <ANOTHER_PORT>\n"
|
||||
)
|
||||
print("Waiting for mDNS peer discovery events...\n")
|
||||
|
||||
logger.info("Starting peer Discovery")
|
||||
host = new_host(key_pair=key_pair, enable_mDNS=True)
|
||||
async with host.run(listen_addrs=[listen_addr]), trio.open_nursery() as nursery:
|
||||
# Start the peer-store cleanup task
|
||||
nursery.start_soon(host.get_peerstore().start_cleanup_task, 60)
|
||||
|
||||
await trio.sleep_forever()
|
||||
|
||||
|
||||
def main() -> None:
|
||||
description = """
|
||||
This program demonstrates mDNS peer discovery using libp2p.
|
||||
To use it, run 'mdns-demo -p <PORT>', where <PORT> is the port number.
|
||||
Start multiple peers on different ports to see discovery in action.
|
||||
"""
|
||||
parser = argparse.ArgumentParser(description=description)
|
||||
parser.add_argument("-p", "--port", default=0, type=int, help="source port number")
|
||||
parser.add_argument(
|
||||
"-v", "--verbose", action="store_true", help="Enable verbose output"
|
||||
)
|
||||
args = parser.parse_args()
|
||||
if args.verbose:
|
||||
logger.setLevel(logging.DEBUG)
|
||||
try:
|
||||
trio.run(run, args.port)
|
||||
except KeyboardInterrupt:
|
||||
logger.info("Exiting...")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@ -55,18 +55,20 @@ async def send_ping(stream: INetStream) -> None:
|
||||
|
||||
|
||||
async def run(port: int, destination: str) -> None:
|
||||
localhost_ip = "127.0.0.1"
|
||||
listen_addr = multiaddr.Multiaddr(f"/ip4/0.0.0.0/tcp/{port}")
|
||||
host = new_host(listen_addrs=[listen_addr])
|
||||
|
||||
async with host.run(listen_addrs=[listen_addr]), trio.open_nursery() as nursery:
|
||||
# Start the peer-store cleanup task
|
||||
nursery.start_soon(host.get_peerstore().start_cleanup_task, 60)
|
||||
|
||||
if not destination:
|
||||
host.set_stream_handler(PING_PROTOCOL_ID, handle_ping)
|
||||
|
||||
print(
|
||||
"Run this from the same folder in another console:\n\n"
|
||||
f"ping-demo -p {int(port) + 1} "
|
||||
f"-d /ip4/{localhost_ip}/tcp/{port}/p2p/{host.get_id().pretty()}\n"
|
||||
f"ping-demo "
|
||||
f"-d {host.get_addrs()[0]}\n"
|
||||
)
|
||||
print("Waiting for incoming connection...")
|
||||
|
||||
@ -96,10 +98,8 @@ def main() -> None:
|
||||
)
|
||||
|
||||
parser = argparse.ArgumentParser(description=description)
|
||||
parser.add_argument("-p", "--port", default=0, type=int, help="source port number")
|
||||
|
||||
parser.add_argument(
|
||||
"-p", "--port", default=8000, type=int, help="source port number"
|
||||
)
|
||||
parser.add_argument(
|
||||
"-d",
|
||||
"--destination",
|
||||
@ -108,9 +108,6 @@ def main() -> None:
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
if not args.port:
|
||||
raise RuntimeError("failed to determine local port")
|
||||
|
||||
try:
|
||||
trio.run(run, *(args.port, args.destination))
|
||||
except KeyboardInterrupt:
|
||||
|
||||
@ -1,9 +1,5 @@
|
||||
import argparse
|
||||
import logging
|
||||
import socket
|
||||
from typing import (
|
||||
Optional,
|
||||
)
|
||||
|
||||
import base58
|
||||
import multiaddr
|
||||
@ -34,6 +30,9 @@ from libp2p.stream_muxer.mplex.mplex import (
|
||||
from libp2p.tools.async_service.trio_service import (
|
||||
background_trio_service,
|
||||
)
|
||||
from libp2p.utils.address_validation import (
|
||||
find_free_port,
|
||||
)
|
||||
|
||||
# Configure logging
|
||||
logging.basicConfig(
|
||||
@ -80,13 +79,6 @@ async def publish_loop(pubsub, topic, termination_event):
|
||||
await trio.sleep(1) # Avoid tight loop on error
|
||||
|
||||
|
||||
def find_free_port():
|
||||
"""Find a free port on localhost."""
|
||||
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
|
||||
s.bind(("", 0)) # Bind to a free port provided by the OS
|
||||
return s.getsockname()[1]
|
||||
|
||||
|
||||
async def monitor_peer_topics(pubsub, nursery, termination_event):
|
||||
"""
|
||||
Monitor for new topics that peers are subscribed to and
|
||||
@ -109,7 +101,7 @@ async def monitor_peer_topics(pubsub, nursery, termination_event):
|
||||
await trio.sleep(2)
|
||||
|
||||
|
||||
async def run(topic: str, destination: Optional[str], port: Optional[int]) -> None:
|
||||
async def run(topic: str, destination: str | None, port: int | None) -> None:
|
||||
# Initialize network settings
|
||||
localhost_ip = "127.0.0.1"
|
||||
|
||||
@ -147,6 +139,9 @@ async def run(topic: str, destination: Optional[str], port: Optional[int]) -> No
|
||||
pubsub = Pubsub(host, gossipsub)
|
||||
termination_event = trio.Event() # Event to signal termination
|
||||
async with host.run(listen_addrs=[listen_addr]), trio.open_nursery() as nursery:
|
||||
# Start the peer-store cleanup task
|
||||
nursery.start_soon(host.get_peerstore().start_cleanup_task, 60)
|
||||
|
||||
logger.info(f"Node started with peer ID: {host.get_id()}")
|
||||
logger.info(f"Listening on: {listen_addr}")
|
||||
logger.info("Initializing PubSub and GossipSub...")
|
||||
|
||||
221
examples/random_walk/random_walk.py
Normal file
221
examples/random_walk/random_walk.py
Normal file
@ -0,0 +1,221 @@
|
||||
"""
|
||||
Random Walk Example for py-libp2p Kademlia DHT
|
||||
|
||||
This example demonstrates the Random Walk module's peer discovery capabilities
|
||||
using real libp2p hosts and Kademlia DHT. It shows how the Random Walk module
|
||||
automatically discovers new peers and maintains routing table health.
|
||||
|
||||
Usage:
|
||||
# Start server nodes (they will discover peers via random walk)
|
||||
python3 random_walk.py --mode server
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import logging
|
||||
import random
|
||||
import secrets
|
||||
import sys
|
||||
|
||||
from multiaddr import Multiaddr
|
||||
import trio
|
||||
|
||||
from libp2p import new_host
|
||||
from libp2p.abc import IHost
|
||||
from libp2p.crypto.secp256k1 import create_new_key_pair
|
||||
from libp2p.kad_dht.kad_dht import DHTMode, KadDHT
|
||||
from libp2p.tools.async_service import background_trio_service
|
||||
|
||||
|
||||
# Simple logging configuration
|
||||
def setup_logging(verbose: bool = False):
|
||||
"""Setup unified logging configuration."""
|
||||
level = logging.DEBUG if verbose else logging.INFO
|
||||
logging.basicConfig(
|
||||
level=level,
|
||||
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
|
||||
handlers=[logging.StreamHandler()],
|
||||
)
|
||||
|
||||
# Configure key module loggers
|
||||
for module in ["libp2p.discovery.random_walk", "libp2p.kad_dht"]:
|
||||
logging.getLogger(module).setLevel(level)
|
||||
|
||||
# Suppress noisy logs
|
||||
logging.getLogger("multiaddr").setLevel(logging.WARNING)
|
||||
|
||||
|
||||
logger = logging.getLogger("random-walk-example")
|
||||
|
||||
# Default bootstrap nodes
|
||||
DEFAULT_BOOTSTRAP_NODES = [
|
||||
"/ip4/104.131.131.82/tcp/4001/p2p/QmaCpDMGvV2BGHeYERUEnRQAwe3N8SzbUtfsmvsqQLuvuJ"
|
||||
]
|
||||
|
||||
|
||||
def filter_compatible_peer_info(peer_info) -> bool:
|
||||
"""Filter peer info to check if it has compatible addresses (TCP + IPv4)."""
|
||||
if not hasattr(peer_info, "addrs") or not peer_info.addrs:
|
||||
return False
|
||||
|
||||
for addr in peer_info.addrs:
|
||||
addr_str = str(addr)
|
||||
if "/tcp/" in addr_str and "/ip4/" in addr_str and "/quic" not in addr_str:
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
async def maintain_connections(host: IHost) -> None:
|
||||
"""Maintain connections to ensure the host remains connected to healthy peers."""
|
||||
while True:
|
||||
try:
|
||||
connected_peers = host.get_connected_peers()
|
||||
list_peers = host.get_peerstore().peers_with_addrs()
|
||||
|
||||
if len(connected_peers) < 20:
|
||||
logger.debug("Reconnecting to maintain peer connections...")
|
||||
|
||||
# Find compatible peers
|
||||
compatible_peers = []
|
||||
for peer_id in list_peers:
|
||||
try:
|
||||
peer_info = host.get_peerstore().peer_info(peer_id)
|
||||
if filter_compatible_peer_info(peer_info):
|
||||
compatible_peers.append(peer_id)
|
||||
except Exception:
|
||||
continue
|
||||
|
||||
# Connect to random subset of compatible peers
|
||||
if compatible_peers:
|
||||
random_peers = random.sample(
|
||||
compatible_peers, min(50, len(compatible_peers))
|
||||
)
|
||||
for peer_id in random_peers:
|
||||
if peer_id not in connected_peers:
|
||||
try:
|
||||
with trio.move_on_after(5):
|
||||
peer_info = host.get_peerstore().peer_info(peer_id)
|
||||
await host.connect(peer_info)
|
||||
logger.debug(f"Connected to peer: {peer_id}")
|
||||
except Exception as e:
|
||||
logger.debug(f"Failed to connect to {peer_id}: {e}")
|
||||
|
||||
await trio.sleep(15)
|
||||
except Exception as e:
|
||||
logger.error(f"Error maintaining connections: {e}")
|
||||
|
||||
|
||||
async def demonstrate_random_walk_discovery(dht: KadDHT, interval: int = 30) -> None:
|
||||
"""Demonstrate Random Walk peer discovery with periodic statistics."""
|
||||
iteration = 0
|
||||
while True:
|
||||
iteration += 1
|
||||
logger.info(f"--- Iteration {iteration} ---")
|
||||
logger.info(f"Routing table size: {dht.get_routing_table_size()}")
|
||||
logger.info(f"Connected peers: {len(dht.host.get_connected_peers())}")
|
||||
logger.info(f"Peerstore size: {len(dht.host.get_peerstore().peer_ids())}")
|
||||
await trio.sleep(interval)
|
||||
|
||||
|
||||
async def run_node(port: int, mode: str, demo_interval: int = 30) -> None:
|
||||
"""Run a node that demonstrates Random Walk peer discovery."""
|
||||
try:
|
||||
if port <= 0:
|
||||
port = random.randint(10000, 60000)
|
||||
|
||||
logger.info(f"Starting {mode} node on port {port}")
|
||||
|
||||
# Determine DHT mode
|
||||
dht_mode = DHTMode.SERVER if mode == "server" else DHTMode.CLIENT
|
||||
|
||||
# Create host and DHT
|
||||
key_pair = create_new_key_pair(secrets.token_bytes(32))
|
||||
host = new_host(key_pair=key_pair, bootstrap=DEFAULT_BOOTSTRAP_NODES)
|
||||
listen_addr = Multiaddr(f"/ip4/0.0.0.0/tcp/{port}")
|
||||
|
||||
async with host.run(listen_addrs=[listen_addr]), trio.open_nursery() as nursery:
|
||||
# Start maintenance tasks
|
||||
nursery.start_soon(host.get_peerstore().start_cleanup_task, 60)
|
||||
nursery.start_soon(maintain_connections, host)
|
||||
|
||||
peer_id = host.get_id().pretty()
|
||||
logger.info(f"Node peer ID: {peer_id}")
|
||||
logger.info(f"Node address: /ip4/0.0.0.0/tcp/{port}/p2p/{peer_id}")
|
||||
|
||||
# Create and start DHT with Random Walk enabled
|
||||
dht = KadDHT(host, dht_mode, enable_random_walk=True)
|
||||
logger.info(f"Initial routing table size: {dht.get_routing_table_size()}")
|
||||
|
||||
async with background_trio_service(dht):
|
||||
logger.info(f"DHT service started in {dht_mode.value} mode")
|
||||
logger.info(f"Random Walk enabled: {dht.is_random_walk_enabled()}")
|
||||
|
||||
async with trio.open_nursery() as task_nursery:
|
||||
# Start demonstration and status reporting
|
||||
task_nursery.start_soon(
|
||||
demonstrate_random_walk_discovery, dht, demo_interval
|
||||
)
|
||||
|
||||
# Periodic status updates
|
||||
async def status_reporter():
|
||||
while True:
|
||||
await trio.sleep(30)
|
||||
logger.debug(
|
||||
f"Connected: {len(dht.host.get_connected_peers())}, "
|
||||
f"Routing table: {dht.get_routing_table_size()}, "
|
||||
f"Peerstore: {len(dht.host.get_peerstore().peer_ids())}"
|
||||
)
|
||||
|
||||
task_nursery.start_soon(status_reporter)
|
||||
await trio.sleep_forever()
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Node error: {e}", exc_info=True)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def parse_args():
|
||||
"""Parse command line arguments."""
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Random Walk Example for py-libp2p Kademlia DHT",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--mode",
|
||||
choices=["server", "client"],
|
||||
default="server",
|
||||
help="Node mode: server (DHT server), or client (DHT client)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--port", type=int, default=0, help="Port to listen on (0 for random)"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--demo-interval",
|
||||
type=int,
|
||||
default=30,
|
||||
help="Interval between random walk demonstrations in seconds",
|
||||
)
|
||||
parser.add_argument("--verbose", action="store_true", help="Enable verbose logging")
|
||||
return parser.parse_args()
|
||||
|
||||
|
||||
def main():
|
||||
"""Main entry point for the random walk example."""
|
||||
try:
|
||||
args = parse_args()
|
||||
setup_logging(args.verbose)
|
||||
|
||||
logger.info("=== Random Walk Example for py-libp2p ===")
|
||||
logger.info(
|
||||
f"Mode: {args.mode}, Port: {args.port} Demo interval: {args.demo_interval}s"
|
||||
)
|
||||
|
||||
trio.run(run_node, args.port, args.mode, args.demo_interval)
|
||||
|
||||
except KeyboardInterrupt:
|
||||
logger.info("Received interrupt signal, shutting down...")
|
||||
except Exception as e:
|
||||
logger.critical(f"Example failed: {e}", exc_info=True)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@ -1,3 +1,5 @@
|
||||
"""Libp2p Python implementation."""
|
||||
|
||||
from collections.abc import (
|
||||
Mapping,
|
||||
Sequence,
|
||||
@ -6,15 +8,12 @@ from importlib.metadata import version as __version
|
||||
from typing import (
|
||||
Literal,
|
||||
Optional,
|
||||
Type,
|
||||
cast,
|
||||
)
|
||||
|
||||
import multiaddr
|
||||
|
||||
from libp2p.abc import (
|
||||
IHost,
|
||||
IMuxedConn,
|
||||
INetworkService,
|
||||
IPeerRouting,
|
||||
IPeerStore,
|
||||
@ -39,6 +38,8 @@ from libp2p.host.routed_host import (
|
||||
RoutedHost,
|
||||
)
|
||||
from libp2p.network.swarm import (
|
||||
ConnectionConfig,
|
||||
RetryConfig,
|
||||
Swarm,
|
||||
)
|
||||
from libp2p.peer.id import (
|
||||
@ -46,22 +47,25 @@ from libp2p.peer.id import (
|
||||
)
|
||||
from libp2p.peer.peerstore import (
|
||||
PeerStore,
|
||||
create_signed_peer_record,
|
||||
)
|
||||
from libp2p.security.insecure.transport import (
|
||||
PLAINTEXT_PROTOCOL_ID,
|
||||
InsecureTransport,
|
||||
)
|
||||
from libp2p.security.noise.transport import PROTOCOL_ID as NOISE_PROTOCOL_ID
|
||||
from libp2p.security.noise.transport import Transport as NoiseTransport
|
||||
from libp2p.security.noise.transport import (
|
||||
PROTOCOL_ID as NOISE_PROTOCOL_ID,
|
||||
Transport as NoiseTransport,
|
||||
)
|
||||
import libp2p.security.secio.transport as secio
|
||||
from libp2p.stream_muxer.mplex.mplex import (
|
||||
MPLEX_PROTOCOL_ID,
|
||||
Mplex,
|
||||
)
|
||||
from libp2p.stream_muxer.yamux.yamux import (
|
||||
PROTOCOL_ID as YAMUX_PROTOCOL_ID,
|
||||
Yamux,
|
||||
)
|
||||
from libp2p.stream_muxer.yamux.yamux import PROTOCOL_ID as YAMUX_PROTOCOL_ID
|
||||
from libp2p.transport.tcp.tcp import (
|
||||
TCP,
|
||||
)
|
||||
@ -81,6 +85,7 @@ DEFAULT_MUXER = "YAMUX"
|
||||
# Multiplexer options
|
||||
MUXER_YAMUX = "YAMUX"
|
||||
MUXER_MPLEX = "MPLEX"
|
||||
DEFAULT_NEGOTIATE_TIMEOUT = 5
|
||||
|
||||
|
||||
def set_default_muxer(muxer_name: Literal["YAMUX", "MPLEX"]) -> None:
|
||||
@ -150,14 +155,15 @@ def get_default_muxer_options() -> TMuxerOptions:
|
||||
else: # YAMUX is default
|
||||
return create_yamux_muxer_option()
|
||||
|
||||
|
||||
def new_swarm(
|
||||
key_pair: Optional[KeyPair] = None,
|
||||
muxer_opt: Optional[TMuxerOptions] = None,
|
||||
sec_opt: Optional[TSecurityOptions] = None,
|
||||
peerstore_opt: Optional[IPeerStore] = None,
|
||||
muxer_preference: Optional[Literal["YAMUX", "MPLEX"]] = None,
|
||||
listen_addrs: Optional[Sequence[multiaddr.Multiaddr]] = None,
|
||||
key_pair: KeyPair | None = None,
|
||||
muxer_opt: TMuxerOptions | None = None,
|
||||
sec_opt: TSecurityOptions | None = None,
|
||||
peerstore_opt: IPeerStore | None = None,
|
||||
muxer_preference: Literal["YAMUX", "MPLEX"] | None = None,
|
||||
listen_addrs: Sequence[multiaddr.Multiaddr] | None = None,
|
||||
retry_config: Optional["RetryConfig"] = None,
|
||||
connection_config: Optional["ConnectionConfig"] = None,
|
||||
) -> INetworkService:
|
||||
"""
|
||||
Create a swarm instance based on the parameters.
|
||||
@ -200,7 +206,9 @@ def new_swarm(
|
||||
key_pair, noise_privkey=noise_key_pair.private_key
|
||||
),
|
||||
TProtocol(secio.ID): secio.Transport(key_pair),
|
||||
TProtocol(PLAINTEXT_PROTOCOL_ID): InsecureTransport(key_pair),
|
||||
TProtocol(PLAINTEXT_PROTOCOL_ID): InsecureTransport(
|
||||
key_pair, peerstore=peerstore_opt
|
||||
),
|
||||
}
|
||||
|
||||
# Use given muxer preference if provided, otherwise use global default
|
||||
@ -232,17 +240,27 @@ def new_swarm(
|
||||
# Store our key pair in peerstore
|
||||
peerstore.add_key_pair(id_opt, key_pair)
|
||||
|
||||
return Swarm(id_opt, peerstore, upgrader, transport)
|
||||
return Swarm(
|
||||
id_opt,
|
||||
peerstore,
|
||||
upgrader,
|
||||
transport,
|
||||
retry_config=retry_config,
|
||||
connection_config=connection_config
|
||||
)
|
||||
|
||||
|
||||
def new_host(
|
||||
key_pair: Optional[KeyPair] = None,
|
||||
muxer_opt: Optional[TMuxerOptions] = None,
|
||||
sec_opt: Optional[TSecurityOptions] = None,
|
||||
peerstore_opt: Optional[IPeerStore] = None,
|
||||
disc_opt: Optional[IPeerRouting] = None,
|
||||
muxer_preference: Optional[Literal["YAMUX", "MPLEX"]] = None,
|
||||
listen_addrs: Sequence[multiaddr.Multiaddr] = None,
|
||||
key_pair: KeyPair | None = None,
|
||||
muxer_opt: TMuxerOptions | None = None,
|
||||
sec_opt: TSecurityOptions | None = None,
|
||||
peerstore_opt: IPeerStore | None = None,
|
||||
disc_opt: IPeerRouting | None = None,
|
||||
muxer_preference: Literal["YAMUX", "MPLEX"] | None = None,
|
||||
listen_addrs: Sequence[multiaddr.Multiaddr] | None = None,
|
||||
enable_mDNS: bool = False,
|
||||
bootstrap: list[str] | None = None,
|
||||
negotiate_timeout: int = DEFAULT_NEGOTIATE_TIMEOUT,
|
||||
) -> IHost:
|
||||
"""
|
||||
Create a new libp2p host based on the given parameters.
|
||||
@ -254,6 +272,8 @@ def new_host(
|
||||
:param disc_opt: optional discovery
|
||||
:param muxer_preference: optional explicit muxer preference
|
||||
:param listen_addrs: optional list of multiaddrs to listen on
|
||||
:param enable_mDNS: whether to enable mDNS discovery
|
||||
:param bootstrap: optional list of bootstrap peer addresses as strings
|
||||
:return: return a host instance
|
||||
"""
|
||||
swarm = new_swarm(
|
||||
@ -266,8 +286,13 @@ def new_host(
|
||||
)
|
||||
|
||||
if disc_opt is not None:
|
||||
return RoutedHost(swarm, disc_opt)
|
||||
return BasicHost(swarm)
|
||||
return RoutedHost(swarm, disc_opt, enable_mDNS, bootstrap)
|
||||
return BasicHost(
|
||||
network=swarm,
|
||||
enable_mDNS=enable_mDNS,
|
||||
bootstrap=bootstrap,
|
||||
negotitate_timeout=negotiate_timeout
|
||||
)
|
||||
|
||||
|
||||
__version__ = __version("libp2p")
|
||||
|
||||
1165
libp2p/abc.py
1165
libp2p/abc.py
File diff suppressed because it is too large
Load Diff
@ -116,15 +116,15 @@ def initialize_pair(
|
||||
EncryptionParameters(
|
||||
cipher_type,
|
||||
hash_type,
|
||||
first_half[0:iv_size],
|
||||
first_half[iv_size + cipher_key_size :],
|
||||
first_half[iv_size : iv_size + cipher_key_size],
|
||||
bytes(first_half[0:iv_size]),
|
||||
bytes(first_half[iv_size + cipher_key_size :]),
|
||||
bytes(first_half[iv_size : iv_size + cipher_key_size]),
|
||||
),
|
||||
EncryptionParameters(
|
||||
cipher_type,
|
||||
hash_type,
|
||||
second_half[0:iv_size],
|
||||
second_half[iv_size + cipher_key_size :],
|
||||
second_half[iv_size : iv_size + cipher_key_size],
|
||||
bytes(second_half[0:iv_size]),
|
||||
bytes(second_half[iv_size + cipher_key_size :]),
|
||||
bytes(second_half[iv_size : iv_size + cipher_key_size]),
|
||||
),
|
||||
)
|
||||
|
||||
@ -9,29 +9,40 @@ from libp2p.crypto.keys import (
|
||||
|
||||
if sys.platform != "win32":
|
||||
from fastecdsa import (
|
||||
curve as curve_types,
|
||||
keys,
|
||||
point,
|
||||
)
|
||||
from fastecdsa import curve as curve_types
|
||||
from fastecdsa.encoding.sec1 import (
|
||||
SEC1Encoder,
|
||||
)
|
||||
else:
|
||||
from coincurve import PrivateKey as CPrivateKey
|
||||
from coincurve import PublicKey as CPublicKey
|
||||
from coincurve import (
|
||||
PrivateKey as CPrivateKey,
|
||||
PublicKey as CPublicKey,
|
||||
)
|
||||
|
||||
|
||||
def infer_local_type(curve: str) -> object:
|
||||
"""
|
||||
Convert a str representation of some elliptic curve to a
|
||||
representation understood by the backend of this module.
|
||||
"""
|
||||
if curve != "P-256":
|
||||
raise NotImplementedError("Only P-256 curve is supported")
|
||||
if sys.platform != "win32":
|
||||
|
||||
if sys.platform != "win32":
|
||||
def infer_local_type(curve: str) -> curve_types.Curve:
|
||||
"""
|
||||
Convert a str representation of some elliptic curve to a
|
||||
representation understood by the backend of this module.
|
||||
"""
|
||||
if curve != "P-256":
|
||||
raise NotImplementedError("Only P-256 curve is supported")
|
||||
return curve_types.P256
|
||||
return "P-256" # coincurve only supports P-256
|
||||
else:
|
||||
|
||||
def infer_local_type(curve: str) -> str:
|
||||
"""
|
||||
Convert a str representation of some elliptic curve to a
|
||||
representation understood by the backend of this module.
|
||||
"""
|
||||
if curve != "P-256":
|
||||
raise NotImplementedError("Only P-256 curve is supported")
|
||||
return "P-256" # coincurve only supports P-256
|
||||
|
||||
|
||||
if sys.platform != "win32":
|
||||
@ -68,7 +79,10 @@ if sys.platform != "win32":
|
||||
return cls(private_key_impl, curve_type)
|
||||
|
||||
def to_bytes(self) -> bytes:
|
||||
return keys.export_key(self.impl, self.curve)
|
||||
key_str = keys.export_key(self.impl, self.curve)
|
||||
if key_str is None:
|
||||
raise Exception("Key not found")
|
||||
return key_str.encode()
|
||||
|
||||
def get_type(self) -> KeyType:
|
||||
return KeyType.ECC_P256
|
||||
|
||||
@ -4,8 +4,10 @@ from Crypto.Hash import (
|
||||
from nacl.exceptions import (
|
||||
BadSignatureError,
|
||||
)
|
||||
from nacl.public import PrivateKey as PrivateKeyImpl
|
||||
from nacl.public import PublicKey as PublicKeyImpl
|
||||
from nacl.public import (
|
||||
PrivateKey as PrivateKeyImpl,
|
||||
PublicKey as PublicKeyImpl,
|
||||
)
|
||||
from nacl.signing import (
|
||||
SigningKey,
|
||||
VerifyKey,
|
||||
@ -48,7 +50,7 @@ class Ed25519PrivateKey(PrivateKey):
|
||||
self.impl = impl
|
||||
|
||||
@classmethod
|
||||
def new(cls, seed: bytes = None) -> "Ed25519PrivateKey":
|
||||
def new(cls, seed: bytes | None = None) -> "Ed25519PrivateKey":
|
||||
if not seed:
|
||||
seed = utils.random()
|
||||
|
||||
@ -75,7 +77,7 @@ class Ed25519PrivateKey(PrivateKey):
|
||||
return Ed25519PublicKey(self.impl.public_key)
|
||||
|
||||
|
||||
def create_new_key_pair(seed: bytes = None) -> KeyPair:
|
||||
def create_new_key_pair(seed: bytes | None = None) -> KeyPair:
|
||||
private_key = Ed25519PrivateKey.new(seed)
|
||||
public_key = private_key.get_public_key()
|
||||
return KeyPair(private_key, public_key)
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
from collections.abc import Callable
|
||||
import sys
|
||||
from typing import (
|
||||
Callable,
|
||||
cast,
|
||||
)
|
||||
|
||||
|
||||
@ -81,12 +81,10 @@ class PrivateKey(Key):
|
||||
"""A ``PrivateKey`` represents a cryptographic private key."""
|
||||
|
||||
@abstractmethod
|
||||
def sign(self, data: bytes) -> bytes:
|
||||
...
|
||||
def sign(self, data: bytes) -> bytes: ...
|
||||
|
||||
@abstractmethod
|
||||
def get_public_key(self) -> PublicKey:
|
||||
...
|
||||
def get_public_key(self) -> PublicKey: ...
|
||||
|
||||
def _serialize_to_protobuf(self) -> crypto_pb2.PrivateKey:
|
||||
"""Return the protobuf representation of this ``Key``."""
|
||||
|
||||
@ -13,7 +13,7 @@ _sym_db = _symbol_database.Default()
|
||||
|
||||
|
||||
|
||||
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1dlibp2p/crypto/pb/crypto.proto\x12\tcrypto.pb\"?\n\tPublicKey\x12$\n\x08key_type\x18\x01 \x02(\x0e\x32\x12.crypto.pb.KeyType\x12\x0c\n\x04\x64\x61ta\x18\x02 \x02(\x0c\"@\n\nPrivateKey\x12$\n\x08key_type\x18\x01 \x02(\x0e\x32\x12.crypto.pb.KeyType\x12\x0c\n\x04\x64\x61ta\x18\x02 \x02(\x0c*G\n\x07KeyType\x12\x07\n\x03RSA\x10\x00\x12\x0b\n\x07\x45\x64\x32\x35\x35\x31\x39\x10\x01\x12\r\n\tSecp256k1\x10\x02\x12\t\n\x05\x45\x43\x44SA\x10\x03\x12\x0c\n\x08\x45\x43\x43_P256\x10\x04')
|
||||
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1dlibp2p/crypto/pb/crypto.proto\x12\tcrypto.pb\"?\n\tPublicKey\x12$\n\x08key_type\x18\x01 \x02(\x0e\x32\x12.crypto.pb.KeyType\x12\x0c\n\x04\x64\x61ta\x18\x02 \x02(\x0c\"@\n\nPrivateKey\x12$\n\x08key_type\x18\x01 \x02(\x0e\x32\x12.crypto.pb.KeyType\x12\x0c\n\x04\x64\x61ta\x18\x02 \x02(\x0c*S\n\x07KeyType\x12\x07\n\x03RSA\x10\x00\x12\x0b\n\x07\x45\x64\x32\x35\x35\x31\x39\x10\x01\x12\r\n\tSecp256k1\x10\x02\x12\t\n\x05\x45\x43\x44SA\x10\x03\x12\x0c\n\x08\x45\x43\x43_P256\x10\x04\x12\n\n\x06X25519\x10\x05')
|
||||
|
||||
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals())
|
||||
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'libp2p.crypto.pb.crypto_pb2', globals())
|
||||
@ -21,7 +21,7 @@ if _descriptor._USE_C_DESCRIPTORS == False:
|
||||
|
||||
DESCRIPTOR._options = None
|
||||
_KEYTYPE._serialized_start=175
|
||||
_KEYTYPE._serialized_end=246
|
||||
_KEYTYPE._serialized_end=258
|
||||
_PUBLICKEY._serialized_start=44
|
||||
_PUBLICKEY._serialized_end=107
|
||||
_PRIVATEKEY._serialized_start=109
|
||||
|
||||
@ -28,6 +28,7 @@ class _KeyTypeEnumTypeWrapper(google.protobuf.internal.enum_type_wrapper._EnumTy
|
||||
Secp256k1: _KeyType.ValueType # 2
|
||||
ECDSA: _KeyType.ValueType # 3
|
||||
ECC_P256: _KeyType.ValueType # 4
|
||||
X25519: _KeyType.ValueType # 5
|
||||
|
||||
class KeyType(_KeyType, metaclass=_KeyTypeEnumTypeWrapper): ...
|
||||
|
||||
@ -36,6 +37,7 @@ Ed25519: KeyType.ValueType # 1
|
||||
Secp256k1: KeyType.ValueType # 2
|
||||
ECDSA: KeyType.ValueType # 3
|
||||
ECC_P256: KeyType.ValueType # 4
|
||||
X25519: KeyType.ValueType # 5
|
||||
global___KeyType = KeyType
|
||||
|
||||
@typing.final
|
||||
|
||||
@ -37,7 +37,7 @@ class Secp256k1PrivateKey(PrivateKey):
|
||||
self.impl = impl
|
||||
|
||||
@classmethod
|
||||
def new(cls, secret: bytes = None) -> "Secp256k1PrivateKey":
|
||||
def new(cls, secret: bytes | None = None) -> "Secp256k1PrivateKey":
|
||||
private_key_impl = coincurve.PrivateKey(secret)
|
||||
return cls(private_key_impl)
|
||||
|
||||
@ -65,7 +65,7 @@ class Secp256k1PrivateKey(PrivateKey):
|
||||
return Secp256k1PublicKey(public_key_impl)
|
||||
|
||||
|
||||
def create_new_key_pair(secret: bytes = None) -> KeyPair:
|
||||
def create_new_key_pair(secret: bytes | None = None) -> KeyPair:
|
||||
"""
|
||||
Returns a new Secp256k1 keypair derived from the provided ``secret``, a
|
||||
sequence of bytes corresponding to some integer between 0 and the group
|
||||
|
||||
@ -1,13 +1,9 @@
|
||||
from collections.abc import (
|
||||
Awaitable,
|
||||
Callable,
|
||||
Mapping,
|
||||
)
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Callable,
|
||||
NewType,
|
||||
Union,
|
||||
)
|
||||
from typing import TYPE_CHECKING, NewType, Union, cast
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from libp2p.abc import (
|
||||
@ -16,15 +12,9 @@ if TYPE_CHECKING:
|
||||
ISecureTransport,
|
||||
)
|
||||
else:
|
||||
|
||||
class INetStream:
|
||||
pass
|
||||
|
||||
class IMuxedConn:
|
||||
pass
|
||||
|
||||
class ISecureTransport:
|
||||
pass
|
||||
IMuxedConn = cast(type, object)
|
||||
INetStream = cast(type, object)
|
||||
ISecureTransport = cast(type, object)
|
||||
|
||||
|
||||
from libp2p.io.abc import (
|
||||
@ -38,12 +28,13 @@ from libp2p.pubsub.pb import (
|
||||
)
|
||||
|
||||
TProtocol = NewType("TProtocol", str)
|
||||
StreamHandlerFn = Callable[["INetStream"], Awaitable[None]]
|
||||
StreamHandlerFn = Callable[[INetStream], Awaitable[None]]
|
||||
THandler = Callable[[ReadWriteCloser], Awaitable[None]]
|
||||
TSecurityOptions = Mapping[TProtocol, "ISecureTransport"]
|
||||
TMuxerClass = type["IMuxedConn"]
|
||||
TSecurityOptions = Mapping[TProtocol, ISecureTransport]
|
||||
TMuxerClass = type[IMuxedConn]
|
||||
TMuxerOptions = Mapping[TProtocol, TMuxerClass]
|
||||
SyncValidatorFn = Callable[[ID, rpc_pb2.Message], bool]
|
||||
AsyncValidatorFn = Callable[[ID, rpc_pb2.Message], Awaitable[bool]]
|
||||
ValidatorFn = Union[SyncValidatorFn, AsyncValidatorFn]
|
||||
UnsubscribeFn = Callable[[], Awaitable[None]]
|
||||
MessageID = NewType("MessageID", str)
|
||||
|
||||
0
libp2p/discovery/__init__.py
Normal file
0
libp2p/discovery/__init__.py
Normal file
5
libp2p/discovery/bootstrap/__init__.py
Normal file
5
libp2p/discovery/bootstrap/__init__.py
Normal file
@ -0,0 +1,5 @@
|
||||
"""Bootstrap peer discovery module for py-libp2p."""
|
||||
|
||||
from .bootstrap import BootstrapDiscovery
|
||||
|
||||
__all__ = ["BootstrapDiscovery"]
|
||||
312
libp2p/discovery/bootstrap/bootstrap.py
Normal file
312
libp2p/discovery/bootstrap/bootstrap.py
Normal file
@ -0,0 +1,312 @@
|
||||
import logging
|
||||
|
||||
from multiaddr import Multiaddr
|
||||
from multiaddr.resolvers import DNSResolver
|
||||
import trio
|
||||
|
||||
from libp2p.abc import ID, INetworkService, PeerInfo
|
||||
from libp2p.discovery.bootstrap.utils import validate_bootstrap_addresses
|
||||
from libp2p.discovery.events.peerDiscovery import peerDiscovery
|
||||
from libp2p.network.exceptions import SwarmException
|
||||
from libp2p.peer.peerinfo import info_from_p2p_addr
|
||||
from libp2p.peer.peerstore import PERMANENT_ADDR_TTL
|
||||
|
||||
logger = logging.getLogger("libp2p.discovery.bootstrap")
|
||||
resolver = DNSResolver()
|
||||
|
||||
DEFAULT_CONNECTION_TIMEOUT = 10
|
||||
|
||||
|
||||
class BootstrapDiscovery:
|
||||
"""
|
||||
Bootstrap-based peer discovery for py-libp2p.
|
||||
Connects to predefined bootstrap peers and adds them to peerstore.
|
||||
"""
|
||||
|
||||
def __init__(self, swarm: INetworkService, bootstrap_addrs: list[str]):
|
||||
"""
|
||||
Initialize BootstrapDiscovery.
|
||||
|
||||
Args:
|
||||
swarm: The network service (swarm) instance
|
||||
bootstrap_addrs: List of bootstrap peer multiaddresses
|
||||
|
||||
"""
|
||||
self.swarm = swarm
|
||||
self.peerstore = swarm.peerstore
|
||||
self.bootstrap_addrs = bootstrap_addrs or []
|
||||
self.discovered_peers: set[str] = set()
|
||||
self.connection_timeout: int = DEFAULT_CONNECTION_TIMEOUT
|
||||
|
||||
async def start(self) -> None:
|
||||
"""Process bootstrap addresses and emit peer discovery events in parallel."""
|
||||
logger.info(
|
||||
f"Starting bootstrap discovery with "
|
||||
f"{len(self.bootstrap_addrs)} bootstrap addresses"
|
||||
)
|
||||
|
||||
# Show all bootstrap addresses being processed
|
||||
for i, addr in enumerate(self.bootstrap_addrs):
|
||||
logger.debug(f"{i + 1}. {addr}")
|
||||
|
||||
# Validate and filter bootstrap addresses
|
||||
self.bootstrap_addrs = validate_bootstrap_addresses(self.bootstrap_addrs)
|
||||
logger.info(f"Valid addresses after validation: {len(self.bootstrap_addrs)}")
|
||||
|
||||
# Use Trio nursery for PARALLEL address processing
|
||||
try:
|
||||
async with trio.open_nursery() as nursery:
|
||||
logger.debug(
|
||||
f"Starting {len(self.bootstrap_addrs)} parallel address "
|
||||
f"processing tasks"
|
||||
)
|
||||
|
||||
# Start all bootstrap address processing tasks in parallel
|
||||
for addr_str in self.bootstrap_addrs:
|
||||
logger.debug(f"Starting parallel task for: {addr_str}")
|
||||
nursery.start_soon(self._process_bootstrap_addr, addr_str)
|
||||
|
||||
# The nursery will wait for all address processing tasks to complete
|
||||
logger.debug(
|
||||
"Nursery active - waiting for address processing tasks to complete"
|
||||
)
|
||||
|
||||
except trio.Cancelled:
|
||||
logger.debug("Bootstrap address processing cancelled - cleaning up tasks")
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f"Bootstrap address processing failed: {e}")
|
||||
raise
|
||||
|
||||
logger.info("Bootstrap discovery startup complete - all tasks finished")
|
||||
|
||||
def stop(self) -> None:
|
||||
"""Clean up bootstrap discovery resources."""
|
||||
logger.info("Stopping bootstrap discovery and cleaning up tasks")
|
||||
|
||||
# Clear discovered peers
|
||||
self.discovered_peers.clear()
|
||||
|
||||
logger.debug("Bootstrap discovery cleanup completed")
|
||||
|
||||
async def _process_bootstrap_addr(self, addr_str: str) -> None:
|
||||
"""Convert string address to PeerInfo and add to peerstore."""
|
||||
try:
|
||||
try:
|
||||
multiaddr = Multiaddr(addr_str)
|
||||
except Exception as e:
|
||||
logger.debug(f"Invalid multiaddr format '{addr_str}': {e}")
|
||||
return
|
||||
|
||||
if self.is_dns_addr(multiaddr):
|
||||
resolved_addrs = await resolver.resolve(multiaddr)
|
||||
if resolved_addrs is None:
|
||||
logger.warning(f"DNS resolution returned None for: {addr_str}")
|
||||
return
|
||||
|
||||
peer_id_str = multiaddr.get_peer_id()
|
||||
if peer_id_str is None:
|
||||
logger.warning(f"Missing peer ID in DNS address: {addr_str}")
|
||||
return
|
||||
peer_id = ID.from_base58(peer_id_str)
|
||||
addrs = [addr for addr in resolved_addrs]
|
||||
if not addrs:
|
||||
logger.warning(f"No addresses resolved for DNS address: {addr_str}")
|
||||
return
|
||||
peer_info = PeerInfo(peer_id, addrs)
|
||||
await self.add_addr(peer_info)
|
||||
else:
|
||||
peer_info = info_from_p2p_addr(multiaddr)
|
||||
await self.add_addr(peer_info)
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to process bootstrap address {addr_str}: {e}")
|
||||
|
||||
def is_dns_addr(self, addr: Multiaddr) -> bool:
|
||||
"""Check if the address is a DNS address."""
|
||||
return any(protocol.name == "dnsaddr" for protocol in addr.protocols())
|
||||
|
||||
async def add_addr(self, peer_info: PeerInfo) -> None:
|
||||
"""
|
||||
Add a peer to the peerstore, emit discovery event,
|
||||
and attempt connection in parallel.
|
||||
"""
|
||||
logger.debug(
|
||||
f"Adding peer {peer_info.peer_id} with {len(peer_info.addrs)} addresses"
|
||||
)
|
||||
|
||||
# Skip if it's our own peer
|
||||
if peer_info.peer_id == self.swarm.get_peer_id():
|
||||
logger.debug(f"Skipping own peer ID: {peer_info.peer_id}")
|
||||
return
|
||||
|
||||
# Filter addresses to only include IPv4+TCP (only supported protocol)
|
||||
ipv4_tcp_addrs = []
|
||||
filtered_out_addrs = []
|
||||
|
||||
for addr in peer_info.addrs:
|
||||
if self._is_ipv4_tcp_addr(addr):
|
||||
ipv4_tcp_addrs.append(addr)
|
||||
else:
|
||||
filtered_out_addrs.append(addr)
|
||||
|
||||
# Log filtering results
|
||||
logger.debug(
|
||||
f"Address filtering for {peer_info.peer_id}: "
|
||||
f"{len(ipv4_tcp_addrs)} IPv4+TCP, {len(filtered_out_addrs)} filtered"
|
||||
)
|
||||
|
||||
# Skip peer if no IPv4+TCP addresses available
|
||||
if not ipv4_tcp_addrs:
|
||||
logger.warning(
|
||||
f"❌ No IPv4+TCP addresses for {peer_info.peer_id} - "
|
||||
f"skipping connection attempts"
|
||||
)
|
||||
return
|
||||
|
||||
# Add only IPv4+TCP addresses to peerstore
|
||||
self.peerstore.add_addrs(peer_info.peer_id, ipv4_tcp_addrs, PERMANENT_ADDR_TTL)
|
||||
|
||||
# Only emit discovery event if this is the first time we see this peer
|
||||
peer_id_str = str(peer_info.peer_id)
|
||||
if peer_id_str not in self.discovered_peers:
|
||||
# Track discovered peer
|
||||
self.discovered_peers.add(peer_id_str)
|
||||
# Emit peer discovery event
|
||||
peerDiscovery.emit_peer_discovered(peer_info)
|
||||
logger.info(f"Peer discovered: {peer_info.peer_id}")
|
||||
|
||||
# Connect to peer (parallel across different bootstrap addresses)
|
||||
logger.debug("Connecting to discovered peer...")
|
||||
await self._connect_to_peer(peer_info.peer_id)
|
||||
|
||||
else:
|
||||
logger.debug(
|
||||
f"Additional addresses added for existing peer: {peer_info.peer_id}"
|
||||
)
|
||||
# Even for existing peers, try to connect if not already connected
|
||||
if peer_info.peer_id not in self.swarm.connections:
|
||||
logger.debug("Connecting to existing peer...")
|
||||
await self._connect_to_peer(peer_info.peer_id)
|
||||
|
||||
async def _connect_to_peer(self, peer_id: ID) -> None:
|
||||
"""
|
||||
Attempt to establish a connection to a peer with timeout.
|
||||
|
||||
Uses swarm.dial_peer to connect using addresses stored in peerstore.
|
||||
Times out after self.connection_timeout seconds to prevent hanging.
|
||||
"""
|
||||
logger.debug(f"Connection attempt for peer: {peer_id}")
|
||||
|
||||
# Pre-connection validation: Check if already connected
|
||||
if peer_id in self.swarm.connections:
|
||||
logger.debug(
|
||||
f"Already connected to {peer_id} - skipping connection attempt"
|
||||
)
|
||||
return
|
||||
|
||||
# Check available addresses before attempting connection
|
||||
available_addrs = self.peerstore.addrs(peer_id)
|
||||
logger.debug(f"Connecting to {peer_id} ({len(available_addrs)} addresses)")
|
||||
|
||||
if not available_addrs:
|
||||
logger.error(f"❌ No addresses available for {peer_id} - cannot connect")
|
||||
return
|
||||
|
||||
# Record start time for connection attempt monitoring
|
||||
connection_start_time = trio.current_time()
|
||||
|
||||
try:
|
||||
with trio.move_on_after(self.connection_timeout):
|
||||
# Log connection attempt
|
||||
logger.debug(
|
||||
f"Attempting connection to {peer_id} using "
|
||||
f"{len(available_addrs)} addresses"
|
||||
)
|
||||
|
||||
# Use swarm.dial_peer to connect using stored addresses
|
||||
await self.swarm.dial_peer(peer_id)
|
||||
|
||||
# Calculate connection time
|
||||
connection_time = trio.current_time() - connection_start_time
|
||||
|
||||
# Post-connection validation: Verify connection was actually established
|
||||
if peer_id in self.swarm.connections:
|
||||
logger.info(
|
||||
f"✅ Connected to {peer_id} (took {connection_time:.2f}s)"
|
||||
)
|
||||
|
||||
else:
|
||||
logger.warning(
|
||||
f"Dial succeeded but connection not found for {peer_id}"
|
||||
)
|
||||
except trio.TooSlowError:
|
||||
logger.warning(
|
||||
f"❌ Connection to {peer_id} timed out after {self.connection_timeout}s"
|
||||
)
|
||||
except SwarmException as e:
|
||||
# Calculate failed connection time
|
||||
failed_connection_time = trio.current_time() - connection_start_time
|
||||
|
||||
# Enhanced error logging
|
||||
error_msg = str(e)
|
||||
if "no addresses established a successful connection" in error_msg:
|
||||
logger.warning(
|
||||
f"❌ Failed to connect to {peer_id} after trying all "
|
||||
f"{len(available_addrs)} addresses "
|
||||
f"(took {failed_connection_time:.2f}s)"
|
||||
)
|
||||
# Log individual address failures if this is a MultiError
|
||||
if (
|
||||
e.__cause__ is not None
|
||||
and hasattr(e.__cause__, "exceptions")
|
||||
and getattr(e.__cause__, "exceptions", None) is not None
|
||||
):
|
||||
exceptions_list = getattr(e.__cause__, "exceptions")
|
||||
logger.debug("📋 Individual address failure details:")
|
||||
for i, addr_exception in enumerate(exceptions_list, 1):
|
||||
logger.debug(f"Address {i}: {addr_exception}")
|
||||
# Also log the actual address that failed
|
||||
if i <= len(available_addrs):
|
||||
logger.debug(f"Failed address: {available_addrs[i - 1]}")
|
||||
else:
|
||||
logger.warning("No detailed exception information available")
|
||||
else:
|
||||
logger.warning(
|
||||
f"❌ Failed to connect to {peer_id}: {e} "
|
||||
f"(took {failed_connection_time:.2f}s)"
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
# Handle unexpected errors that aren't swarm-specific
|
||||
failed_connection_time = trio.current_time() - connection_start_time
|
||||
logger.error(
|
||||
f"❌ Unexpected error connecting to {peer_id}: "
|
||||
f"{e} (took {failed_connection_time:.2f}s)"
|
||||
)
|
||||
# Don't re-raise to prevent killing the nursery and other parallel tasks
|
||||
|
||||
def _is_ipv4_tcp_addr(self, addr: Multiaddr) -> bool:
|
||||
"""
|
||||
Check if address is IPv4 with TCP protocol only.
|
||||
|
||||
Filters out IPv6, UDP, QUIC, WebSocket, and other unsupported protocols.
|
||||
Only IPv4+TCP addresses are supported by the current transport.
|
||||
"""
|
||||
try:
|
||||
protocols = addr.protocols()
|
||||
|
||||
# Must have IPv4 protocol
|
||||
has_ipv4 = any(p.name == "ip4" for p in protocols)
|
||||
if not has_ipv4:
|
||||
return False
|
||||
|
||||
# Must have TCP protocol
|
||||
has_tcp = any(p.name == "tcp" for p in protocols)
|
||||
if not has_tcp:
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
except Exception:
|
||||
# If we can't parse the address, don't use it
|
||||
return False
|
||||
51
libp2p/discovery/bootstrap/utils.py
Normal file
51
libp2p/discovery/bootstrap/utils.py
Normal file
@ -0,0 +1,51 @@
|
||||
"""Utility functions for bootstrap discovery."""
|
||||
|
||||
import logging
|
||||
|
||||
from multiaddr import Multiaddr
|
||||
|
||||
from libp2p.peer.peerinfo import InvalidAddrError, PeerInfo, info_from_p2p_addr
|
||||
|
||||
logger = logging.getLogger("libp2p.discovery.bootstrap.utils")
|
||||
|
||||
|
||||
def validate_bootstrap_addresses(addrs: list[str]) -> list[str]:
|
||||
"""
|
||||
Validate and filter bootstrap addresses.
|
||||
|
||||
:param addrs: List of bootstrap address strings
|
||||
:return: List of valid bootstrap addresses
|
||||
"""
|
||||
valid_addrs = []
|
||||
|
||||
for addr_str in addrs:
|
||||
try:
|
||||
# Try to parse as multiaddr
|
||||
multiaddr = Multiaddr(addr_str)
|
||||
|
||||
# Try to extract peer info (this validates the p2p component)
|
||||
info_from_p2p_addr(multiaddr)
|
||||
|
||||
valid_addrs.append(addr_str)
|
||||
logger.debug(f"Valid bootstrap address: {addr_str}")
|
||||
|
||||
except (InvalidAddrError, ValueError, Exception) as e:
|
||||
logger.warning(f"Invalid bootstrap address '{addr_str}': {e}")
|
||||
continue
|
||||
|
||||
return valid_addrs
|
||||
|
||||
|
||||
def parse_bootstrap_peer_info(addr_str: str) -> PeerInfo | None:
|
||||
"""
|
||||
Parse bootstrap address string into PeerInfo.
|
||||
|
||||
:param addr_str: Bootstrap address string
|
||||
:return: PeerInfo object or None if parsing fails
|
||||
"""
|
||||
try:
|
||||
multiaddr = Multiaddr(addr_str)
|
||||
return info_from_p2p_addr(multiaddr)
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to parse bootstrap address '{addr_str}': {e}")
|
||||
return None
|
||||
0
libp2p/discovery/events/__init__.py
Normal file
0
libp2p/discovery/events/__init__.py
Normal file
26
libp2p/discovery/events/peerDiscovery.py
Normal file
26
libp2p/discovery/events/peerDiscovery.py
Normal file
@ -0,0 +1,26 @@
|
||||
from collections.abc import (
|
||||
Callable,
|
||||
)
|
||||
|
||||
from libp2p.abc import (
|
||||
PeerInfo,
|
||||
)
|
||||
|
||||
TTL: int = 60 * 60 # Time-to-live for discovered peers in seconds
|
||||
|
||||
|
||||
class PeerDiscovery:
|
||||
def __init__(self) -> None:
|
||||
self._peer_discovered_handlers: list[Callable[[PeerInfo], None]] = []
|
||||
|
||||
def register_peer_discovered_handler(
|
||||
self, handler: Callable[[PeerInfo], None]
|
||||
) -> None:
|
||||
self._peer_discovered_handlers.append(handler)
|
||||
|
||||
def emit_peer_discovered(self, peer_info: PeerInfo) -> None:
|
||||
for handler in self._peer_discovered_handlers:
|
||||
handler(peer_info)
|
||||
|
||||
|
||||
peerDiscovery = PeerDiscovery()
|
||||
0
libp2p/discovery/mdns/__init__.py
Normal file
0
libp2p/discovery/mdns/__init__.py
Normal file
91
libp2p/discovery/mdns/broadcaster.py
Normal file
91
libp2p/discovery/mdns/broadcaster.py
Normal file
@ -0,0 +1,91 @@
|
||||
import logging
|
||||
import socket
|
||||
|
||||
from zeroconf import (
|
||||
EventLoopBlocked,
|
||||
ServiceInfo,
|
||||
Zeroconf,
|
||||
)
|
||||
|
||||
logger = logging.getLogger("libp2p.discovery.mdns.broadcaster")
|
||||
|
||||
|
||||
class PeerBroadcaster:
|
||||
"""
|
||||
Broadcasts this peer's presence on the local network using mDNS/zeroconf.
|
||||
Registers a service with the peer's ID in the TXT record as per libp2p spec.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
zeroconf: Zeroconf,
|
||||
service_type: str,
|
||||
service_name: str,
|
||||
peer_id: str,
|
||||
port: int,
|
||||
):
|
||||
self.zeroconf = zeroconf
|
||||
self.service_type = service_type
|
||||
self.peer_id = peer_id
|
||||
self.port = port
|
||||
self.service_name = service_name
|
||||
|
||||
# Get the local IP address
|
||||
local_ip = self._get_local_ip()
|
||||
hostname = socket.gethostname()
|
||||
|
||||
self.service_info = ServiceInfo(
|
||||
type_=self.service_type,
|
||||
name=self.service_name,
|
||||
port=self.port,
|
||||
properties={b"id": self.peer_id.encode()},
|
||||
server=f"{hostname}.local.",
|
||||
addresses=[socket.inet_aton(local_ip)],
|
||||
)
|
||||
|
||||
def _get_local_ip(self) -> str:
|
||||
"""Get the local IP address of this machine"""
|
||||
try:
|
||||
# Connect to a remote address to determine the local IP
|
||||
# This doesn't actually send data
|
||||
with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as s:
|
||||
s.connect(("8.8.8.8", 80))
|
||||
local_ip = s.getsockname()[0]
|
||||
return local_ip
|
||||
except Exception:
|
||||
# Fallback to localhost if we can't determine the IP
|
||||
return "127.0.0.1"
|
||||
|
||||
def register(self) -> None:
|
||||
"""Register the peer's mDNS service on the network."""
|
||||
try:
|
||||
self.zeroconf.register_service(self.service_info)
|
||||
logger.debug(f"mDNS service registered: {self.service_name}")
|
||||
except EventLoopBlocked as e:
|
||||
logger.warning(
|
||||
"EventLoopBlocked while registering mDNS '%s': %s", self.service_name, e
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Unexpected error during mDNS registration for '%s': %r",
|
||||
self.service_name,
|
||||
e,
|
||||
)
|
||||
|
||||
def unregister(self) -> None:
|
||||
"""Unregister the peer's mDNS service from the network."""
|
||||
try:
|
||||
self.zeroconf.unregister_service(self.service_info)
|
||||
logger.debug(f"mDNS service unregistered: {self.service_name}")
|
||||
except EventLoopBlocked as e:
|
||||
logger.warning(
|
||||
"EventLoopBlocked while unregistering mDNS '%s': %s",
|
||||
self.service_name,
|
||||
e,
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Unexpected error during mDNS unregistration for '%s': %r",
|
||||
self.service_name,
|
||||
e,
|
||||
)
|
||||
83
libp2p/discovery/mdns/listener.py
Normal file
83
libp2p/discovery/mdns/listener.py
Normal file
@ -0,0 +1,83 @@
|
||||
import logging
|
||||
import socket
|
||||
|
||||
from zeroconf import (
|
||||
ServiceBrowser,
|
||||
ServiceInfo,
|
||||
ServiceListener,
|
||||
Zeroconf,
|
||||
)
|
||||
|
||||
from libp2p.abc import IPeerStore, Multiaddr
|
||||
from libp2p.discovery.events.peerDiscovery import peerDiscovery
|
||||
from libp2p.peer.id import ID
|
||||
from libp2p.peer.peerinfo import PeerInfo
|
||||
|
||||
logger = logging.getLogger("libp2p.discovery.mdns.listener")
|
||||
|
||||
|
||||
class PeerListener(ServiceListener):
|
||||
"""mDNS listener — now a true ServiceListener subclass."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
peerstore: IPeerStore,
|
||||
zeroconf: Zeroconf,
|
||||
service_type: str,
|
||||
service_name: str,
|
||||
) -> None:
|
||||
self.peerstore = peerstore
|
||||
self.zeroconf = zeroconf
|
||||
self.service_type = service_type
|
||||
self.service_name = service_name
|
||||
self.discovered_services: dict[str, ID] = {}
|
||||
self.browser = ServiceBrowser(self.zeroconf, self.service_type, listener=self)
|
||||
|
||||
def add_service(self, zc: Zeroconf, type_: str, name: str) -> None:
|
||||
if name == self.service_name:
|
||||
return
|
||||
logger.debug(f"Adding service: {name}")
|
||||
info = zc.get_service_info(type_, name, timeout=5000)
|
||||
if not info:
|
||||
return
|
||||
peer_info = self._extract_peer_info(info)
|
||||
if peer_info:
|
||||
self.discovered_services[name] = peer_info.peer_id
|
||||
self.peerstore.add_addrs(peer_info.peer_id, peer_info.addrs, 10)
|
||||
peerDiscovery.emit_peer_discovered(peer_info)
|
||||
logger.debug(f"Discovered Peer: {peer_info.peer_id}")
|
||||
|
||||
def remove_service(self, zc: Zeroconf, type_: str, name: str) -> None:
|
||||
if name == self.service_name:
|
||||
return
|
||||
logger.debug(f"Removing service: {name}")
|
||||
peer_id = self.discovered_services.pop(name)
|
||||
self.peerstore.clear_addrs(peer_id)
|
||||
logger.debug(f"Removed Peer: {peer_id}")
|
||||
|
||||
def update_service(self, zc: Zeroconf, type_: str, name: str) -> None:
|
||||
info = zc.get_service_info(type_, name, timeout=5000)
|
||||
if not info:
|
||||
return
|
||||
peer_info = self._extract_peer_info(info)
|
||||
if peer_info:
|
||||
self.peerstore.clear_addrs(peer_info.peer_id)
|
||||
self.peerstore.add_addrs(peer_info.peer_id, peer_info.addrs, 10)
|
||||
logger.debug(f"Updated Peer {peer_info.peer_id}")
|
||||
|
||||
def _extract_peer_info(self, info: ServiceInfo) -> PeerInfo | None:
|
||||
try:
|
||||
addrs = [
|
||||
Multiaddr(f"/ip4/{socket.inet_ntoa(addr)}/tcp/{info.port}")
|
||||
for addr in info.addresses
|
||||
]
|
||||
pid_bytes = info.properties.get(b"id")
|
||||
if not pid_bytes:
|
||||
return None
|
||||
pid = ID.from_base58(pid_bytes.decode())
|
||||
return PeerInfo(peer_id=pid, addrs=addrs)
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
def stop(self) -> None:
|
||||
self.browser.cancel()
|
||||
73
libp2p/discovery/mdns/mdns.py
Normal file
73
libp2p/discovery/mdns/mdns.py
Normal file
@ -0,0 +1,73 @@
|
||||
"""
|
||||
mDNS-based peer discovery for py-libp2p.
|
||||
Conforms to https://github.com/libp2p/specs/blob/master/discovery/mdns.md
|
||||
Uses zeroconf for mDNS broadcast/listen. Async operations use trio.
|
||||
"""
|
||||
|
||||
import logging
|
||||
|
||||
from zeroconf import (
|
||||
Zeroconf,
|
||||
)
|
||||
|
||||
from libp2p.abc import (
|
||||
INetworkService,
|
||||
)
|
||||
|
||||
from .broadcaster import (
|
||||
PeerBroadcaster,
|
||||
)
|
||||
from .listener import (
|
||||
PeerListener,
|
||||
)
|
||||
from .utils import (
|
||||
stringGen,
|
||||
)
|
||||
|
||||
logger = logging.getLogger("libp2p.discovery.mdns")
|
||||
|
||||
SERVICE_TYPE = "_p2p._udp.local."
|
||||
MCAST_PORT = 5353
|
||||
MCAST_ADDR = "224.0.0.251"
|
||||
|
||||
|
||||
class MDNSDiscovery:
|
||||
"""
|
||||
mDNS-based peer discovery for py-libp2p, using zeroconf.
|
||||
Conforms to the libp2p mDNS discovery spec.
|
||||
"""
|
||||
|
||||
def __init__(self, swarm: INetworkService, port: int = 8000):
|
||||
self.peer_id = str(swarm.get_peer_id())
|
||||
self.port = port
|
||||
self.zeroconf = Zeroconf()
|
||||
self.serviceName = f"{stringGen()}.{SERVICE_TYPE}"
|
||||
self.peerstore = swarm.peerstore
|
||||
self.swarm = swarm
|
||||
self.broadcaster = PeerBroadcaster(
|
||||
zeroconf=self.zeroconf,
|
||||
service_type=SERVICE_TYPE,
|
||||
service_name=self.serviceName,
|
||||
peer_id=self.peer_id,
|
||||
port=self.port,
|
||||
)
|
||||
self.listener = PeerListener(
|
||||
zeroconf=self.zeroconf,
|
||||
peerstore=self.peerstore,
|
||||
service_type=SERVICE_TYPE,
|
||||
service_name=self.serviceName,
|
||||
)
|
||||
|
||||
def start(self) -> None:
|
||||
"""Register this peer and start listening for others."""
|
||||
logger.debug(
|
||||
f"Starting mDNS discovery for peer {self.peer_id} on port {self.port}"
|
||||
)
|
||||
self.broadcaster.register()
|
||||
# Listener is started in constructor
|
||||
|
||||
def stop(self) -> None:
|
||||
"""Unregister this peer and clean up zeroconf resources."""
|
||||
logger.debug("Stopping mDNS discovery")
|
||||
self.broadcaster.unregister()
|
||||
self.zeroconf.close()
|
||||
11
libp2p/discovery/mdns/utils.py
Normal file
11
libp2p/discovery/mdns/utils.py
Normal file
@ -0,0 +1,11 @@
|
||||
import random
|
||||
import string
|
||||
|
||||
|
||||
def stringGen(len: int = 63) -> str:
|
||||
"""Generate a random string of lowercase letters and digits."""
|
||||
charset = string.ascii_lowercase + string.digits
|
||||
result = []
|
||||
for _ in range(len):
|
||||
result.append(random.choice(charset))
|
||||
return "".join(result)
|
||||
17
libp2p/discovery/random_walk/__init__.py
Normal file
17
libp2p/discovery/random_walk/__init__.py
Normal file
@ -0,0 +1,17 @@
|
||||
"""Random walk discovery modules for py-libp2p."""
|
||||
|
||||
from .rt_refresh_manager import RTRefreshManager
|
||||
from .random_walk import RandomWalk
|
||||
from .exceptions import (
|
||||
RoutingTableRefreshError,
|
||||
RandomWalkError,
|
||||
PeerValidationError,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
"RTRefreshManager",
|
||||
"RandomWalk",
|
||||
"RoutingTableRefreshError",
|
||||
"RandomWalkError",
|
||||
"PeerValidationError",
|
||||
]
|
||||
16
libp2p/discovery/random_walk/config.py
Normal file
16
libp2p/discovery/random_walk/config.py
Normal file
@ -0,0 +1,16 @@
|
||||
from typing import Final
|
||||
|
||||
# Timing constants (matching go-libp2p)
|
||||
PEER_PING_TIMEOUT: Final[float] = 10.0 # seconds
|
||||
REFRESH_QUERY_TIMEOUT: Final[float] = 60.0 # seconds
|
||||
REFRESH_INTERVAL: Final[float] = 300.0 # 5 minutes
|
||||
SUCCESSFUL_OUTBOUND_QUERY_GRACE_PERIOD: Final[float] = 60.0 # 1 minute
|
||||
|
||||
# Routing table thresholds
|
||||
MIN_RT_REFRESH_THRESHOLD: Final[int] = 4 # Minimum peers before triggering refresh
|
||||
MAX_N_BOOTSTRAPPERS: Final[int] = 2 # Maximum bootstrap peers to try
|
||||
|
||||
# Random walk specific
|
||||
RANDOM_WALK_CONCURRENCY: Final[int] = 3 # Number of concurrent random walks
|
||||
RANDOM_WALK_ENABLED: Final[bool] = True # Enable automatic random walks
|
||||
RANDOM_WALK_RT_THRESHOLD: Final[int] = 20 # RT size threshold for peerstore fallback
|
||||
19
libp2p/discovery/random_walk/exceptions.py
Normal file
19
libp2p/discovery/random_walk/exceptions.py
Normal file
@ -0,0 +1,19 @@
|
||||
from libp2p.exceptions import BaseLibp2pError
|
||||
|
||||
|
||||
class RoutingTableRefreshError(BaseLibp2pError):
|
||||
"""Base exception for routing table refresh operations."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class RandomWalkError(RoutingTableRefreshError):
|
||||
"""Exception raised during random walk operations."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class PeerValidationError(RoutingTableRefreshError):
|
||||
"""Exception raised when peer validation fails."""
|
||||
|
||||
pass
|
||||
218
libp2p/discovery/random_walk/random_walk.py
Normal file
218
libp2p/discovery/random_walk/random_walk.py
Normal file
@ -0,0 +1,218 @@
|
||||
from collections.abc import Awaitable, Callable
|
||||
import logging
|
||||
import secrets
|
||||
|
||||
import trio
|
||||
|
||||
from libp2p.abc import IHost
|
||||
from libp2p.discovery.random_walk.config import (
|
||||
RANDOM_WALK_CONCURRENCY,
|
||||
RANDOM_WALK_RT_THRESHOLD,
|
||||
REFRESH_QUERY_TIMEOUT,
|
||||
)
|
||||
from libp2p.discovery.random_walk.exceptions import RandomWalkError
|
||||
from libp2p.peer.id import ID
|
||||
from libp2p.peer.peerinfo import PeerInfo
|
||||
|
||||
logger = logging.getLogger("libp2p.discovery.random_walk")
|
||||
|
||||
|
||||
class RandomWalk:
|
||||
"""
|
||||
Random Walk implementation for peer discovery in Kademlia DHT.
|
||||
|
||||
Generates random peer IDs and performs FIND_NODE queries to discover
|
||||
new peers and populate the routing table.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
host: IHost,
|
||||
local_peer_id: ID,
|
||||
query_function: Callable[[bytes], Awaitable[list[ID]]],
|
||||
):
|
||||
"""
|
||||
Initialize Random Walk module.
|
||||
|
||||
Args:
|
||||
host: The libp2p host instance
|
||||
local_peer_id: Local peer ID
|
||||
query_function: Function to query for closest peers given target key bytes
|
||||
|
||||
"""
|
||||
self.host = host
|
||||
self.local_peer_id = local_peer_id
|
||||
self.query_function = query_function
|
||||
|
||||
def generate_random_peer_id(self) -> str:
|
||||
"""
|
||||
Generate a completely random peer ID
|
||||
for random walk queries.
|
||||
|
||||
Returns:
|
||||
Random peer ID as string
|
||||
|
||||
"""
|
||||
# Generate 32 random bytes (256 bits) - same as go-libp2p
|
||||
random_bytes = secrets.token_bytes(32)
|
||||
# Convert to hex string for query
|
||||
return random_bytes.hex()
|
||||
|
||||
async def perform_random_walk(self) -> list[PeerInfo]:
|
||||
"""
|
||||
Perform a single random walk operation.
|
||||
|
||||
Returns:
|
||||
List of validated peers discovered during the walk
|
||||
|
||||
"""
|
||||
try:
|
||||
# Generate random peer ID
|
||||
random_peer_id = self.generate_random_peer_id()
|
||||
logger.info(f"Starting random walk for peer ID: {random_peer_id}")
|
||||
|
||||
# Perform FIND_NODE query
|
||||
discovered_peer_ids: list[ID] = []
|
||||
|
||||
with trio.move_on_after(REFRESH_QUERY_TIMEOUT):
|
||||
# Call the query function with target key bytes
|
||||
target_key = bytes.fromhex(random_peer_id)
|
||||
discovered_peer_ids = await self.query_function(target_key) or []
|
||||
|
||||
if not discovered_peer_ids:
|
||||
logger.debug(f"No peers discovered in random walk for {random_peer_id}")
|
||||
return []
|
||||
|
||||
logger.info(
|
||||
f"Discovered {len(discovered_peer_ids)} peers in random walk "
|
||||
f"for {random_peer_id[:8]}..." # Show only first 8 chars for brevity
|
||||
)
|
||||
|
||||
# Convert peer IDs to PeerInfo objects and validate
|
||||
validated_peers: list[PeerInfo] = []
|
||||
|
||||
for peer_id in discovered_peer_ids:
|
||||
try:
|
||||
# Get addresses from peerstore
|
||||
addrs = self.host.get_peerstore().addrs(peer_id)
|
||||
if addrs:
|
||||
peer_info = PeerInfo(peer_id, addrs)
|
||||
validated_peers.append(peer_info)
|
||||
except Exception as e:
|
||||
logger.debug(f"Failed to create PeerInfo for {peer_id}: {e}")
|
||||
continue
|
||||
|
||||
return validated_peers
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Random walk failed: {e}")
|
||||
raise RandomWalkError(f"Random walk operation failed: {e}") from e
|
||||
|
||||
async def run_concurrent_random_walks(
|
||||
self, count: int = RANDOM_WALK_CONCURRENCY, current_routing_table_size: int = 0
|
||||
) -> list[PeerInfo]:
|
||||
"""
|
||||
Run multiple random walks concurrently.
|
||||
|
||||
Args:
|
||||
count: Number of concurrent random walks to perform
|
||||
current_routing_table_size: Current size of routing table (for optimization)
|
||||
|
||||
Returns:
|
||||
Combined list of all validated peers discovered
|
||||
|
||||
"""
|
||||
all_validated_peers: list[PeerInfo] = []
|
||||
logger.info(f"Starting {count} concurrent random walks")
|
||||
|
||||
# First, try to add peers from peerstore if routing table is small
|
||||
if current_routing_table_size < RANDOM_WALK_RT_THRESHOLD:
|
||||
try:
|
||||
peerstore_peers = self._get_peerstore_peers()
|
||||
if peerstore_peers:
|
||||
logger.debug(
|
||||
f"RT size ({current_routing_table_size}) below threshold, "
|
||||
f"adding {len(peerstore_peers)} peerstore peers"
|
||||
)
|
||||
all_validated_peers.extend(peerstore_peers)
|
||||
except Exception as e:
|
||||
logger.warning(f"Error processing peerstore peers: {e}")
|
||||
|
||||
async def single_walk() -> None:
|
||||
try:
|
||||
peers = await self.perform_random_walk()
|
||||
all_validated_peers.extend(peers)
|
||||
except Exception as e:
|
||||
logger.warning(f"Concurrent random walk failed: {e}")
|
||||
return
|
||||
|
||||
# Run concurrent random walks
|
||||
async with trio.open_nursery() as nursery:
|
||||
for _ in range(count):
|
||||
nursery.start_soon(single_walk)
|
||||
|
||||
# Remove duplicates based on peer ID
|
||||
unique_peers = {}
|
||||
for peer in all_validated_peers:
|
||||
unique_peers[peer.peer_id] = peer
|
||||
|
||||
result = list(unique_peers.values())
|
||||
logger.info(
|
||||
f"Concurrent random walks completed: {len(result)} unique peers discovered"
|
||||
)
|
||||
return result
|
||||
|
||||
def _get_peerstore_peers(self) -> list[PeerInfo]:
|
||||
"""
|
||||
Get peer info objects from the host's peerstore.
|
||||
|
||||
Returns:
|
||||
List of PeerInfo objects from peerstore
|
||||
|
||||
"""
|
||||
try:
|
||||
peerstore = self.host.get_peerstore()
|
||||
peer_ids = peerstore.peers_with_addrs()
|
||||
|
||||
peer_infos = []
|
||||
for peer_id in peer_ids:
|
||||
try:
|
||||
# Skip local peer
|
||||
if peer_id == self.local_peer_id:
|
||||
continue
|
||||
|
||||
peer_info = peerstore.peer_info(peer_id)
|
||||
if peer_info and peer_info.addrs:
|
||||
# Filter for compatible addresses (TCP + IPv4)
|
||||
if self._has_compatible_addresses(peer_info):
|
||||
peer_infos.append(peer_info)
|
||||
except Exception as e:
|
||||
logger.debug(f"Error getting peer info for {peer_id}: {e}")
|
||||
|
||||
return peer_infos
|
||||
|
||||
except Exception as e:
|
||||
logger.warning(f"Error accessing peerstore: {e}")
|
||||
return []
|
||||
|
||||
def _has_compatible_addresses(self, peer_info: PeerInfo) -> bool:
|
||||
"""
|
||||
Check if a peer has TCP+IPv4 compatible addresses.
|
||||
|
||||
Args:
|
||||
peer_info: PeerInfo to check
|
||||
|
||||
Returns:
|
||||
True if peer has compatible addresses
|
||||
|
||||
"""
|
||||
if not peer_info.addrs:
|
||||
return False
|
||||
|
||||
for addr in peer_info.addrs:
|
||||
addr_str = str(addr)
|
||||
# Check for TCP and IPv4 compatibility, avoid QUIC
|
||||
if "/tcp/" in addr_str and "/ip4/" in addr_str and "/quic" not in addr_str:
|
||||
return True
|
||||
|
||||
return False
|
||||
208
libp2p/discovery/random_walk/rt_refresh_manager.py
Normal file
208
libp2p/discovery/random_walk/rt_refresh_manager.py
Normal file
@ -0,0 +1,208 @@
|
||||
from collections.abc import Awaitable, Callable
|
||||
import logging
|
||||
import time
|
||||
from typing import Protocol
|
||||
|
||||
import trio
|
||||
|
||||
from libp2p.abc import IHost
|
||||
from libp2p.discovery.random_walk.config import (
|
||||
MIN_RT_REFRESH_THRESHOLD,
|
||||
RANDOM_WALK_CONCURRENCY,
|
||||
RANDOM_WALK_ENABLED,
|
||||
REFRESH_INTERVAL,
|
||||
)
|
||||
from libp2p.discovery.random_walk.exceptions import RoutingTableRefreshError
|
||||
from libp2p.discovery.random_walk.random_walk import RandomWalk
|
||||
from libp2p.peer.id import ID
|
||||
from libp2p.peer.peerinfo import PeerInfo
|
||||
|
||||
|
||||
class RoutingTableProtocol(Protocol):
|
||||
"""Protocol for routing table operations needed by RT refresh manager."""
|
||||
|
||||
def size(self) -> int:
|
||||
"""Return the current size of the routing table."""
|
||||
...
|
||||
|
||||
async def add_peer(self, peer_obj: PeerInfo) -> bool:
|
||||
"""Add a peer to the routing table."""
|
||||
...
|
||||
|
||||
|
||||
logger = logging.getLogger("libp2p.discovery.random_walk.rt_refresh_manager")
|
||||
|
||||
|
||||
class RTRefreshManager:
|
||||
"""
|
||||
Routing Table Refresh Manager for py-libp2p.
|
||||
|
||||
Manages periodic routing table refreshes and random walk operations
|
||||
to maintain routing table health and discover new peers.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
host: IHost,
|
||||
routing_table: RoutingTableProtocol,
|
||||
local_peer_id: ID,
|
||||
query_function: Callable[[bytes], Awaitable[list[ID]]],
|
||||
enable_auto_refresh: bool = RANDOM_WALK_ENABLED,
|
||||
refresh_interval: float = REFRESH_INTERVAL,
|
||||
min_refresh_threshold: int = MIN_RT_REFRESH_THRESHOLD,
|
||||
):
|
||||
"""
|
||||
Initialize RT Refresh Manager.
|
||||
|
||||
Args:
|
||||
host: The libp2p host instance
|
||||
routing_table: Routing table of host
|
||||
local_peer_id: Local peer ID
|
||||
query_function: Function to query for closest peers given target key bytes
|
||||
enable_auto_refresh: Whether to enable automatic refresh
|
||||
refresh_interval: Interval between refreshes in seconds
|
||||
min_refresh_threshold: Minimum RT size before triggering refresh
|
||||
|
||||
"""
|
||||
self.host = host
|
||||
self.routing_table = routing_table
|
||||
self.local_peer_id = local_peer_id
|
||||
self.query_function = query_function
|
||||
|
||||
self.enable_auto_refresh = enable_auto_refresh
|
||||
self.refresh_interval = refresh_interval
|
||||
self.min_refresh_threshold = min_refresh_threshold
|
||||
|
||||
# Initialize random walk module
|
||||
self.random_walk = RandomWalk(
|
||||
host=host,
|
||||
local_peer_id=self.local_peer_id,
|
||||
query_function=query_function,
|
||||
)
|
||||
|
||||
# Control variables
|
||||
self._running = False
|
||||
self._nursery: trio.Nursery | None = None
|
||||
|
||||
# Tracking
|
||||
self._last_refresh_time = 0.0
|
||||
self._refresh_done_callbacks: list[Callable[[], None]] = []
|
||||
|
||||
async def start(self) -> None:
|
||||
"""Start the RT Refresh Manager."""
|
||||
if self._running:
|
||||
logger.warning("RT Refresh Manager is already running")
|
||||
return
|
||||
|
||||
self._running = True
|
||||
|
||||
logger.info("Starting RT Refresh Manager")
|
||||
|
||||
# Start the main loop
|
||||
async with trio.open_nursery() as nursery:
|
||||
self._nursery = nursery
|
||||
nursery.start_soon(self._main_loop)
|
||||
|
||||
async def stop(self) -> None:
|
||||
"""Stop the RT Refresh Manager."""
|
||||
if not self._running:
|
||||
return
|
||||
|
||||
logger.info("Stopping RT Refresh Manager")
|
||||
self._running = False
|
||||
|
||||
async def _main_loop(self) -> None:
|
||||
"""Main loop for the RT Refresh Manager."""
|
||||
logger.info("RT Refresh Manager main loop started")
|
||||
|
||||
# Initial refresh if auto-refresh is enabled
|
||||
if self.enable_auto_refresh:
|
||||
await self._do_refresh(force=True)
|
||||
|
||||
try:
|
||||
while self._running:
|
||||
async with trio.open_nursery() as nursery:
|
||||
# Schedule periodic refresh if enabled
|
||||
if self.enable_auto_refresh:
|
||||
nursery.start_soon(self._periodic_refresh_task)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"RT Refresh Manager main loop error: {e}")
|
||||
finally:
|
||||
logger.info("RT Refresh Manager main loop stopped")
|
||||
|
||||
async def _periodic_refresh_task(self) -> None:
|
||||
"""Task for periodic refreshes."""
|
||||
while self._running:
|
||||
await trio.sleep(self.refresh_interval)
|
||||
if self._running:
|
||||
await self._do_refresh()
|
||||
|
||||
async def _do_refresh(self, force: bool = False) -> None:
|
||||
"""
|
||||
Perform routing table refresh operation.
|
||||
|
||||
Args:
|
||||
force: Whether to force refresh regardless of timing
|
||||
|
||||
"""
|
||||
try:
|
||||
current_time = time.time()
|
||||
|
||||
# Check if refresh is needed
|
||||
if not force:
|
||||
if current_time - self._last_refresh_time < self.refresh_interval:
|
||||
logger.debug("Skipping refresh: interval not elapsed")
|
||||
return
|
||||
|
||||
if self.routing_table.size() >= self.min_refresh_threshold:
|
||||
logger.debug("Skipping refresh: routing table size above threshold")
|
||||
return
|
||||
|
||||
logger.info(f"Starting routing table refresh (force={force})")
|
||||
start_time = current_time
|
||||
|
||||
# Perform random walks to discover new peers
|
||||
logger.info("Running concurrent random walks to discover new peers")
|
||||
current_rt_size = self.routing_table.size()
|
||||
discovered_peers = await self.random_walk.run_concurrent_random_walks(
|
||||
count=RANDOM_WALK_CONCURRENCY,
|
||||
current_routing_table_size=current_rt_size,
|
||||
)
|
||||
|
||||
# Add discovered peers to routing table
|
||||
added_count = 0
|
||||
for peer_info in discovered_peers:
|
||||
result = await self.routing_table.add_peer(peer_info)
|
||||
if result:
|
||||
added_count += 1
|
||||
|
||||
self._last_refresh_time = current_time
|
||||
|
||||
duration = time.time() - start_time
|
||||
logger.info(
|
||||
f"Routing table refresh completed: "
|
||||
f"{added_count}/{len(discovered_peers)} peers added, "
|
||||
f"RT size: {self.routing_table.size()}, "
|
||||
f"duration: {duration:.2f}s"
|
||||
)
|
||||
|
||||
# Notify refresh completion
|
||||
for callback in self._refresh_done_callbacks:
|
||||
try:
|
||||
callback()
|
||||
except Exception as e:
|
||||
logger.warning(f"Refresh callback error: {e}")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Routing table refresh failed: {e}")
|
||||
raise RoutingTableRefreshError(f"Refresh operation failed: {e}") from e
|
||||
|
||||
def add_refresh_done_callback(self, callback: Callable[[], None]) -> None:
|
||||
"""Add a callback to be called when refresh completes."""
|
||||
self._refresh_done_callbacks.append(callback)
|
||||
|
||||
def remove_refresh_done_callback(self, callback: Callable[[], None]) -> None:
|
||||
"""Remove a refresh completion callback."""
|
||||
if callback in self._refresh_done_callbacks:
|
||||
self._refresh_done_callbacks.remove(callback)
|
||||
@ -1,7 +1,4 @@
|
||||
import logging
|
||||
from typing import (
|
||||
Union,
|
||||
)
|
||||
|
||||
from libp2p.custom_types import (
|
||||
TProtocol,
|
||||
@ -94,7 +91,7 @@ class AutoNATService:
|
||||
finally:
|
||||
await stream.close()
|
||||
|
||||
async def _handle_request(self, request: Union[bytes, Message]) -> Message:
|
||||
async def _handle_request(self, request: bytes | Message) -> Message:
|
||||
"""
|
||||
Process an AutoNAT protocol request.
|
||||
|
||||
|
||||
@ -84,26 +84,23 @@ class AutoNAT:
|
||||
request: Any,
|
||||
target: str,
|
||||
options: tuple[Any, ...] = (),
|
||||
channel_credentials: Optional[Any] = None,
|
||||
call_credentials: Optional[Any] = None,
|
||||
channel_credentials: Any | None = None,
|
||||
call_credentials: Any | None = None,
|
||||
insecure: bool = False,
|
||||
compression: Optional[Any] = None,
|
||||
wait_for_ready: Optional[bool] = None,
|
||||
timeout: Optional[float] = None,
|
||||
metadata: Optional[list[tuple[str, str]]] = None,
|
||||
compression: Any | None = None,
|
||||
wait_for_ready: bool | None = None,
|
||||
timeout: float | None = None,
|
||||
metadata: list[tuple[str, str]] | None = None,
|
||||
) -> Any:
|
||||
return grpc.experimental.unary_unary(
|
||||
request,
|
||||
target,
|
||||
channel = grpc.secure_channel(target, channel_credentials) if channel_credentials else grpc.insecure_channel(target)
|
||||
return channel.unary_unary(
|
||||
"/autonat.pb.AutoNAT/Dial",
|
||||
autonat__pb2.Message.SerializeToString,
|
||||
autonat__pb2.Message.FromString,
|
||||
options,
|
||||
channel_credentials,
|
||||
insecure,
|
||||
call_credentials,
|
||||
compression,
|
||||
wait_for_ready,
|
||||
timeout,
|
||||
metadata,
|
||||
request_serializer=autonat__pb2.Message.SerializeToString,
|
||||
response_deserializer=autonat__pb2.Message.FromString,
|
||||
_registered_method=True,
|
||||
)(
|
||||
request,
|
||||
timeout=timeout,
|
||||
metadata=metadata,
|
||||
wait_for_ready=wait_for_ready,
|
||||
)
|
||||
|
||||
@ -3,6 +3,7 @@ from collections.abc import (
|
||||
Sequence,
|
||||
)
|
||||
from contextlib import (
|
||||
AbstractAsyncContextManager,
|
||||
asynccontextmanager,
|
||||
)
|
||||
import logging
|
||||
@ -28,6 +29,8 @@ from libp2p.custom_types import (
|
||||
StreamHandlerFn,
|
||||
TProtocol,
|
||||
)
|
||||
from libp2p.discovery.bootstrap.bootstrap import BootstrapDiscovery
|
||||
from libp2p.discovery.mdns.mdns import MDNSDiscovery
|
||||
from libp2p.host.defaults import (
|
||||
get_default_protocols,
|
||||
)
|
||||
@ -40,6 +43,7 @@ from libp2p.peer.id import (
|
||||
from libp2p.peer.peerinfo import (
|
||||
PeerInfo,
|
||||
)
|
||||
from libp2p.peer.peerstore import create_signed_peer_record
|
||||
from libp2p.protocol_muxer.exceptions import (
|
||||
MultiselectClientError,
|
||||
MultiselectError,
|
||||
@ -69,6 +73,7 @@ if TYPE_CHECKING:
|
||||
|
||||
|
||||
logger = logging.getLogger("libp2p.network.basic_host")
|
||||
DEFAULT_NEGOTIATE_TIMEOUT = 5
|
||||
|
||||
|
||||
class BasicHost(IHost):
|
||||
@ -88,15 +93,31 @@ class BasicHost(IHost):
|
||||
def __init__(
|
||||
self,
|
||||
network: INetworkService,
|
||||
default_protocols: "OrderedDict[TProtocol, StreamHandlerFn]" = None,
|
||||
enable_mDNS: bool = False,
|
||||
bootstrap: list[str] | None = None,
|
||||
default_protocols: Optional["OrderedDict[TProtocol, StreamHandlerFn]"] = None,
|
||||
negotitate_timeout: int = DEFAULT_NEGOTIATE_TIMEOUT,
|
||||
) -> None:
|
||||
self._network = network
|
||||
self._network.set_stream_handler(self._swarm_stream_handler)
|
||||
self.peerstore = self._network.peerstore
|
||||
self.negotiate_timeout = negotitate_timeout
|
||||
# Protocol muxing
|
||||
default_protocols = default_protocols or get_default_protocols(self)
|
||||
self.multiselect = Multiselect(default_protocols)
|
||||
self.multiselect = Multiselect(dict(default_protocols.items()))
|
||||
self.multiselect_client = MultiselectClient()
|
||||
if enable_mDNS:
|
||||
self.mDNS = MDNSDiscovery(network)
|
||||
if bootstrap:
|
||||
self.bootstrap = BootstrapDiscovery(network, bootstrap)
|
||||
|
||||
# Cache a signed-record if the local-node in the PeerStore
|
||||
envelope = create_signed_peer_record(
|
||||
self.get_id(),
|
||||
self.get_addrs(),
|
||||
self.get_private_key(),
|
||||
)
|
||||
self.get_peerstore().set_local_record(envelope)
|
||||
|
||||
def get_id(self) -> ID:
|
||||
"""
|
||||
@ -147,19 +168,35 @@ class BasicHost(IHost):
|
||||
"""
|
||||
return list(self._network.connections.keys())
|
||||
|
||||
@asynccontextmanager
|
||||
async def run(
|
||||
def run(
|
||||
self, listen_addrs: Sequence[multiaddr.Multiaddr]
|
||||
) -> AsyncIterator[None]:
|
||||
) -> AbstractAsyncContextManager[None]:
|
||||
"""
|
||||
Run the host instance and listen to ``listen_addrs``.
|
||||
|
||||
:param listen_addrs: a sequence of multiaddrs that we want to listen to
|
||||
"""
|
||||
network = self.get_network()
|
||||
async with background_trio_service(network):
|
||||
await network.listen(*listen_addrs)
|
||||
yield
|
||||
|
||||
@asynccontextmanager
|
||||
async def _run() -> AsyncIterator[None]:
|
||||
network = self.get_network()
|
||||
async with background_trio_service(network):
|
||||
await network.listen(*listen_addrs)
|
||||
if hasattr(self, "mDNS") and self.mDNS is not None:
|
||||
logger.debug("Starting mDNS Discovery")
|
||||
self.mDNS.start()
|
||||
if hasattr(self, "bootstrap") and self.bootstrap is not None:
|
||||
logger.debug("Starting Bootstrap Discovery")
|
||||
await self.bootstrap.start()
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
if hasattr(self, "mDNS") and self.mDNS is not None:
|
||||
self.mDNS.stop()
|
||||
if hasattr(self, "bootstrap") and self.bootstrap is not None:
|
||||
self.bootstrap.stop()
|
||||
|
||||
return _run()
|
||||
|
||||
def set_stream_handler(
|
||||
self, protocol_id: TProtocol, stream_handler: StreamHandlerFn
|
||||
@ -173,7 +210,10 @@ class BasicHost(IHost):
|
||||
self.multiselect.add_handler(protocol_id, stream_handler)
|
||||
|
||||
async def new_stream(
|
||||
self, peer_id: ID, protocol_ids: Sequence[TProtocol]
|
||||
self,
|
||||
peer_id: ID,
|
||||
protocol_ids: Sequence[TProtocol],
|
||||
negotitate_timeout: int = DEFAULT_NEGOTIATE_TIMEOUT,
|
||||
) -> INetStream:
|
||||
"""
|
||||
:param peer_id: peer_id that host is connecting
|
||||
@ -185,7 +225,9 @@ class BasicHost(IHost):
|
||||
# Perform protocol muxing to determine protocol to use
|
||||
try:
|
||||
selected_protocol = await self.multiselect_client.select_one_of(
|
||||
list(protocol_ids), MultiselectCommunicator(net_stream)
|
||||
list(protocol_ids),
|
||||
MultiselectCommunicator(net_stream),
|
||||
negotitate_timeout,
|
||||
)
|
||||
except MultiselectClientError as error:
|
||||
logger.debug("fail to open a stream to peer %s, error=%s", peer_id, error)
|
||||
@ -195,7 +237,12 @@ class BasicHost(IHost):
|
||||
net_stream.set_protocol(selected_protocol)
|
||||
return net_stream
|
||||
|
||||
async def send_command(self, peer_id: ID, command: str) -> list[str]:
|
||||
async def send_command(
|
||||
self,
|
||||
peer_id: ID,
|
||||
command: str,
|
||||
response_timeout: int = DEFAULT_NEGOTIATE_TIMEOUT,
|
||||
) -> list[str]:
|
||||
"""
|
||||
Send a multistream-select command to the specified peer and return
|
||||
the response.
|
||||
@ -209,7 +256,7 @@ class BasicHost(IHost):
|
||||
|
||||
try:
|
||||
response = await self.multiselect_client.query_multistream_command(
|
||||
MultiselectCommunicator(new_stream), command
|
||||
MultiselectCommunicator(new_stream), command, response_timeout
|
||||
)
|
||||
except MultiselectClientError as error:
|
||||
logger.debug("fail to open a stream to peer %s, error=%s", peer_id, error)
|
||||
@ -229,7 +276,7 @@ class BasicHost(IHost):
|
||||
:param peer_info: peer_info of the peer we want to connect to
|
||||
:type peer_info: peer.peerinfo.PeerInfo
|
||||
"""
|
||||
self.peerstore.add_addrs(peer_info.peer_id, peer_info.addrs, 10)
|
||||
self.peerstore.add_addrs(peer_info.peer_id, peer_info.addrs, 120)
|
||||
|
||||
# there is already a connection to this peer
|
||||
if peer_info.peer_id in self._network.connections:
|
||||
@ -248,8 +295,13 @@ class BasicHost(IHost):
|
||||
# Perform protocol muxing to determine protocol to use
|
||||
try:
|
||||
protocol, handler = await self.multiselect.negotiate(
|
||||
MultiselectCommunicator(net_stream)
|
||||
MultiselectCommunicator(net_stream), self.negotiate_timeout
|
||||
)
|
||||
if protocol is None:
|
||||
await net_stream.reset()
|
||||
raise StreamFailure(
|
||||
"Failed to negotiate protocol: no protocol selected"
|
||||
)
|
||||
except MultiselectError as error:
|
||||
peer_id = net_stream.muxed_conn.peer_id
|
||||
logger.debug(
|
||||
@ -257,7 +309,23 @@ class BasicHost(IHost):
|
||||
)
|
||||
await net_stream.reset()
|
||||
return
|
||||
if protocol is None:
|
||||
logger.debug(
|
||||
"no protocol negotiated, closing stream from peer %s",
|
||||
net_stream.muxed_conn.peer_id,
|
||||
)
|
||||
await net_stream.reset()
|
||||
return
|
||||
net_stream.set_protocol(protocol)
|
||||
if handler is None:
|
||||
logger.debug(
|
||||
"no handler for protocol %s, closing stream from peer %s",
|
||||
protocol,
|
||||
net_stream.muxed_conn.peer_id,
|
||||
)
|
||||
await net_stream.reset()
|
||||
return
|
||||
|
||||
await handler(net_stream)
|
||||
|
||||
def get_live_peers(self) -> list[ID]:
|
||||
@ -275,13 +343,13 @@ class BasicHost(IHost):
|
||||
:param peer_id: ID of the peer to check
|
||||
:return: True if peer has an active connection, False otherwise
|
||||
"""
|
||||
return peer_id in self._network.connections
|
||||
return len(self._network.get_connections(peer_id)) > 0
|
||||
|
||||
def get_peer_connection_info(self, peer_id: ID) -> Optional[INetConn]:
|
||||
def get_peer_connection_info(self, peer_id: ID) -> INetConn | None:
|
||||
"""
|
||||
Get connection information for a specific peer if connected.
|
||||
|
||||
:param peer_id: ID of the peer to get info for
|
||||
:return: Connection object if peer is connected, None otherwise
|
||||
"""
|
||||
return self._network.connections.get(peer_id)
|
||||
return self._network.get_connection(peer_id)
|
||||
|
||||
@ -9,13 +9,13 @@ from libp2p.abc import (
|
||||
IHost,
|
||||
)
|
||||
from libp2p.host.ping import (
|
||||
ID as PingID,
|
||||
handle_ping,
|
||||
)
|
||||
from libp2p.host.ping import ID as PingID
|
||||
from libp2p.identity.identify.identify import (
|
||||
ID as IdentifyID,
|
||||
identify_handler_for,
|
||||
)
|
||||
from libp2p.identity.identify.identify import ID as IdentifyID
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from libp2p.custom_types import (
|
||||
@ -26,5 +26,8 @@ if TYPE_CHECKING:
|
||||
|
||||
def get_default_protocols(host: IHost) -> "OrderedDict[TProtocol, StreamHandlerFn]":
|
||||
return OrderedDict(
|
||||
((IdentifyID, identify_handler_for(host)), (PingID, handle_ping))
|
||||
(
|
||||
(IdentifyID, identify_handler_for(host, use_varint_format=True)),
|
||||
(PingID, handle_ping),
|
||||
)
|
||||
)
|
||||
|
||||
@ -18,8 +18,14 @@ from libp2p.peer.peerinfo import (
|
||||
class RoutedHost(BasicHost):
|
||||
_router: IPeerRouting
|
||||
|
||||
def __init__(self, network: INetworkService, router: IPeerRouting):
|
||||
super().__init__(network)
|
||||
def __init__(
|
||||
self,
|
||||
network: INetworkService,
|
||||
router: IPeerRouting,
|
||||
enable_mDNS: bool = False,
|
||||
bootstrap: list[str] | None = None,
|
||||
):
|
||||
super().__init__(network, enable_mDNS, bootstrap)
|
||||
self._router = router
|
||||
|
||||
async def connect(self, peer_info: PeerInfo) -> None:
|
||||
@ -40,8 +46,8 @@ class RoutedHost(BasicHost):
|
||||
found_peer_info = await self._router.find_peer(peer_info.peer_id)
|
||||
if not found_peer_info:
|
||||
raise ConnectionFailure("Unable to find Peer address")
|
||||
self.peerstore.add_addrs(peer_info.peer_id, found_peer_info.addrs, 10)
|
||||
self.peerstore.add_addrs(peer_info.peer_id, peer_info.addrs, 10)
|
||||
self.peerstore.add_addrs(peer_info.peer_id, found_peer_info.addrs, 120)
|
||||
self.peerstore.add_addrs(peer_info.peer_id, peer_info.addrs, 120)
|
||||
|
||||
# there is already a connection to this peer
|
||||
if peer_info.peer_id in self._network.connections:
|
||||
|
||||
@ -1,7 +1,4 @@
|
||||
import logging
|
||||
from typing import (
|
||||
Optional,
|
||||
)
|
||||
|
||||
from multiaddr import (
|
||||
Multiaddr,
|
||||
@ -18,8 +15,11 @@ from libp2p.custom_types import (
|
||||
from libp2p.network.stream.exceptions import (
|
||||
StreamClosed,
|
||||
)
|
||||
from libp2p.peer.peerstore import env_to_send_in_RPC
|
||||
from libp2p.utils import (
|
||||
decode_varint_with_size,
|
||||
get_agent_version,
|
||||
varint,
|
||||
)
|
||||
|
||||
from .pb.identify_pb2 import (
|
||||
@ -40,8 +40,8 @@ def _multiaddr_to_bytes(maddr: Multiaddr) -> bytes:
|
||||
|
||||
|
||||
def _remote_address_to_multiaddr(
|
||||
remote_address: Optional[tuple[str, int]]
|
||||
) -> Optional[Multiaddr]:
|
||||
remote_address: tuple[str, int] | None,
|
||||
) -> Multiaddr | None:
|
||||
"""Convert a (host, port) tuple to a Multiaddr."""
|
||||
if remote_address is None:
|
||||
return None
|
||||
@ -58,11 +58,14 @@ def _remote_address_to_multiaddr(
|
||||
|
||||
|
||||
def _mk_identify_protobuf(
|
||||
host: IHost, observed_multiaddr: Optional[Multiaddr]
|
||||
host: IHost, observed_multiaddr: Multiaddr | None
|
||||
) -> Identify:
|
||||
public_key = host.get_public_key()
|
||||
laddrs = host.get_addrs()
|
||||
protocols = host.get_mux().get_protocols()
|
||||
protocols = tuple(str(p) for p in host.get_mux().get_protocols() if p is not None)
|
||||
|
||||
# Create a signed peer-record for the remote peer
|
||||
envelope_bytes, _ = env_to_send_in_RPC(host)
|
||||
|
||||
observed_addr = observed_multiaddr.to_bytes() if observed_multiaddr else b""
|
||||
return Identify(
|
||||
@ -72,24 +75,64 @@ def _mk_identify_protobuf(
|
||||
listen_addrs=map(_multiaddr_to_bytes, laddrs),
|
||||
observed_addr=observed_addr,
|
||||
protocols=protocols,
|
||||
signedPeerRecord=envelope_bytes,
|
||||
)
|
||||
|
||||
|
||||
def identify_handler_for(host: IHost) -> StreamHandlerFn:
|
||||
def parse_identify_response(response: bytes) -> Identify:
|
||||
"""
|
||||
Parse identify response that could be either:
|
||||
- Old format: raw protobuf
|
||||
- New format: length-prefixed protobuf
|
||||
|
||||
This function provides backward and forward compatibility.
|
||||
"""
|
||||
# Try new format first: length-prefixed protobuf
|
||||
if len(response) >= 1:
|
||||
length, varint_size = decode_varint_with_size(response)
|
||||
if varint_size > 0 and length > 0 and varint_size + length <= len(response):
|
||||
protobuf_data = response[varint_size : varint_size + length]
|
||||
try:
|
||||
identify_response = Identify()
|
||||
identify_response.ParseFromString(protobuf_data)
|
||||
# Sanity check: must have agent_version (protocol_version is optional)
|
||||
if identify_response.agent_version:
|
||||
logger.debug(
|
||||
"Parsed length-prefixed identify response (new format)"
|
||||
)
|
||||
return identify_response
|
||||
except Exception:
|
||||
pass # Fall through to old format
|
||||
|
||||
# Fall back to old format: raw protobuf
|
||||
try:
|
||||
identify_response = Identify()
|
||||
identify_response.ParseFromString(response)
|
||||
logger.debug("Parsed raw protobuf identify response (old format)")
|
||||
return identify_response
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to parse identify response: {e}")
|
||||
logger.error(f"Response length: {len(response)}")
|
||||
logger.error(f"Response hex: {response.hex()}")
|
||||
raise
|
||||
|
||||
|
||||
def identify_handler_for(
|
||||
host: IHost, use_varint_format: bool = True
|
||||
) -> StreamHandlerFn:
|
||||
async def handle_identify(stream: INetStream) -> None:
|
||||
# get observed address from ``stream``
|
||||
peer_id = (
|
||||
stream.muxed_conn.peer_id
|
||||
) # remote peer_id is in class Mplex (mplex.py )
|
||||
|
||||
observed_multiaddr: Multiaddr | None = None
|
||||
# Get the remote address
|
||||
try:
|
||||
remote_address = stream.get_remote_address()
|
||||
# Convert to multiaddr
|
||||
if remote_address:
|
||||
observed_multiaddr = _remote_address_to_multiaddr(remote_address)
|
||||
else:
|
||||
observed_multiaddr = None
|
||||
|
||||
logger.debug(
|
||||
"Connection from remote peer %s, address: %s, multiaddr: %s",
|
||||
peer_id,
|
||||
@ -104,7 +147,21 @@ def identify_handler_for(host: IHost) -> StreamHandlerFn:
|
||||
response = protobuf.SerializeToString()
|
||||
|
||||
try:
|
||||
await stream.write(response)
|
||||
if use_varint_format:
|
||||
# Send length-prefixed protobuf message (new format)
|
||||
await stream.write(varint.encode_uvarint(len(response)))
|
||||
await stream.write(response)
|
||||
logger.debug(
|
||||
"Sent new format (length-prefixed) identify response to %s",
|
||||
peer_id,
|
||||
)
|
||||
else:
|
||||
# Send raw protobuf message (old format for backward compatibility)
|
||||
await stream.write(response)
|
||||
logger.debug(
|
||||
"Sent old format (raw protobuf) identify response to %s",
|
||||
peer_id,
|
||||
)
|
||||
except StreamClosed:
|
||||
logger.debug("Fail to respond to %s request: stream closed", ID)
|
||||
else:
|
||||
|
||||
@ -9,4 +9,5 @@ message Identify {
|
||||
repeated bytes listen_addrs = 2;
|
||||
optional bytes observed_addr = 4;
|
||||
repeated string protocols = 3;
|
||||
optional bytes signedPeerRecord = 8;
|
||||
}
|
||||
|
||||
@ -13,7 +13,7 @@ _sym_db = _symbol_database.Default()
|
||||
|
||||
|
||||
|
||||
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n*libp2p/identity/identify/pb/identify.proto\x12\x0bidentify.pb\"\x8f\x01\n\x08Identify\x12\x18\n\x10protocol_version\x18\x05 \x01(\t\x12\x15\n\ragent_version\x18\x06 \x01(\t\x12\x12\n\npublic_key\x18\x01 \x01(\x0c\x12\x14\n\x0clisten_addrs\x18\x02 \x03(\x0c\x12\x15\n\robserved_addr\x18\x04 \x01(\x0c\x12\x11\n\tprotocols\x18\x03 \x03(\t')
|
||||
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n*libp2p/identity/identify/pb/identify.proto\x12\x0bidentify.pb\"\xa9\x01\n\x08Identify\x12\x18\n\x10protocol_version\x18\x05 \x01(\t\x12\x15\n\ragent_version\x18\x06 \x01(\t\x12\x12\n\npublic_key\x18\x01 \x01(\x0c\x12\x14\n\x0clisten_addrs\x18\x02 \x03(\x0c\x12\x15\n\robserved_addr\x18\x04 \x01(\x0c\x12\x11\n\tprotocols\x18\x03 \x03(\t\x12\x18\n\x10signedPeerRecord\x18\x08 \x01(\x0c')
|
||||
|
||||
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals())
|
||||
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'libp2p.identity.identify.pb.identify_pb2', globals())
|
||||
@ -21,5 +21,5 @@ if _descriptor._USE_C_DESCRIPTORS == False:
|
||||
|
||||
DESCRIPTOR._options = None
|
||||
_IDENTIFY._serialized_start=60
|
||||
_IDENTIFY._serialized_end=203
|
||||
_IDENTIFY._serialized_end=229
|
||||
# @@protoc_insertion_point(module_scope)
|
||||
|
||||
@ -22,10 +22,12 @@ class Identify(google.protobuf.message.Message):
|
||||
LISTEN_ADDRS_FIELD_NUMBER: builtins.int
|
||||
OBSERVED_ADDR_FIELD_NUMBER: builtins.int
|
||||
PROTOCOLS_FIELD_NUMBER: builtins.int
|
||||
SIGNEDPEERRECORD_FIELD_NUMBER: builtins.int
|
||||
protocol_version: builtins.str
|
||||
agent_version: builtins.str
|
||||
public_key: builtins.bytes
|
||||
observed_addr: builtins.bytes
|
||||
signedPeerRecord: builtins.bytes
|
||||
@property
|
||||
def listen_addrs(self) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[builtins.bytes]: ...
|
||||
@property
|
||||
@ -39,8 +41,9 @@ class Identify(google.protobuf.message.Message):
|
||||
listen_addrs: collections.abc.Iterable[builtins.bytes] | None = ...,
|
||||
observed_addr: builtins.bytes | None = ...,
|
||||
protocols: collections.abc.Iterable[builtins.str] | None = ...,
|
||||
signedPeerRecord: builtins.bytes | None = ...,
|
||||
) -> None: ...
|
||||
def HasField(self, field_name: typing.Literal["agent_version", b"agent_version", "observed_addr", b"observed_addr", "protocol_version", b"protocol_version", "public_key", b"public_key"]) -> builtins.bool: ...
|
||||
def ClearField(self, field_name: typing.Literal["agent_version", b"agent_version", "listen_addrs", b"listen_addrs", "observed_addr", b"observed_addr", "protocol_version", b"protocol_version", "protocols", b"protocols", "public_key", b"public_key"]) -> None: ...
|
||||
def HasField(self, field_name: typing.Literal["agent_version", b"agent_version", "observed_addr", b"observed_addr", "protocol_version", b"protocol_version", "public_key", b"public_key", "signedPeerRecord", b"signedPeerRecord"]) -> builtins.bool: ...
|
||||
def ClearField(self, field_name: typing.Literal["agent_version", b"agent_version", "listen_addrs", b"listen_addrs", "observed_addr", b"observed_addr", "protocol_version", b"protocol_version", "protocols", b"protocols", "public_key", b"public_key", "signedPeerRecord", b"signedPeerRecord"]) -> None: ...
|
||||
|
||||
global___Identify = Identify
|
||||
|
||||
@ -1,7 +1,4 @@
|
||||
import logging
|
||||
from typing import (
|
||||
Optional,
|
||||
)
|
||||
|
||||
from multiaddr import (
|
||||
Multiaddr,
|
||||
@ -23,11 +20,16 @@ from libp2p.custom_types import (
|
||||
from libp2p.network.stream.exceptions import (
|
||||
StreamClosed,
|
||||
)
|
||||
from libp2p.peer.envelope import consume_envelope
|
||||
from libp2p.peer.id import (
|
||||
ID,
|
||||
)
|
||||
from libp2p.utils import (
|
||||
get_agent_version,
|
||||
varint,
|
||||
)
|
||||
from libp2p.utils.varint import (
|
||||
read_length_prefixed_protobuf,
|
||||
)
|
||||
|
||||
from ..identify.identify import (
|
||||
@ -43,22 +45,31 @@ logger = logging.getLogger(__name__)
|
||||
ID_PUSH = TProtocol("/ipfs/id/push/1.0.0")
|
||||
PROTOCOL_VERSION = "ipfs/0.1.0"
|
||||
AGENT_VERSION = get_agent_version()
|
||||
CONCURRENCY_LIMIT = 10
|
||||
|
||||
|
||||
def identify_push_handler_for(host: IHost) -> StreamHandlerFn:
|
||||
def identify_push_handler_for(
|
||||
host: IHost, use_varint_format: bool = True
|
||||
) -> StreamHandlerFn:
|
||||
"""
|
||||
Create a handler for the identify/push protocol.
|
||||
|
||||
This handler receives pushed identify messages from remote peers and updates
|
||||
the local peerstore with the new information.
|
||||
|
||||
Args:
|
||||
host: The libp2p host.
|
||||
use_varint_format: True=length-prefixed, False=raw protobuf.
|
||||
|
||||
"""
|
||||
|
||||
async def handle_identify_push(stream: INetStream) -> None:
|
||||
peer_id = stream.muxed_conn.peer_id
|
||||
|
||||
try:
|
||||
# Read the identify message from the stream
|
||||
data = await stream.read()
|
||||
# Use the utility function to read the protobuf message
|
||||
data = await read_length_prefixed_protobuf(stream, use_varint_format)
|
||||
|
||||
identify_msg = Identify()
|
||||
identify_msg.ParseFromString(data)
|
||||
|
||||
@ -68,6 +79,11 @@ def identify_push_handler_for(host: IHost) -> StreamHandlerFn:
|
||||
)
|
||||
|
||||
logger.debug("Successfully processed identify/push from peer %s", peer_id)
|
||||
|
||||
# Send acknowledgment to indicate successful processing
|
||||
# This ensures the sender knows the message was received before closing
|
||||
await stream.write(b"OK")
|
||||
|
||||
except StreamClosed:
|
||||
logger.debug(
|
||||
"Stream closed while processing identify/push from %s", peer_id
|
||||
@ -76,7 +92,10 @@ def identify_push_handler_for(host: IHost) -> StreamHandlerFn:
|
||||
logger.error("Error processing identify/push from %s: %s", peer_id, e)
|
||||
finally:
|
||||
# Close the stream after processing
|
||||
await stream.close()
|
||||
try:
|
||||
await stream.close()
|
||||
except Exception:
|
||||
pass # Ignore errors when closing
|
||||
|
||||
return handle_identify_push
|
||||
|
||||
@ -122,6 +141,19 @@ async def _update_peerstore_from_identify(
|
||||
except Exception as e:
|
||||
logger.error("Error updating protocols for peer %s: %s", peer_id, e)
|
||||
|
||||
if identify_msg.HasField("signedPeerRecord"):
|
||||
try:
|
||||
# Convert the signed-peer-record(Envelope) from prtobuf bytes
|
||||
envelope, _ = consume_envelope(
|
||||
identify_msg.signedPeerRecord, "libp2p-peer-record"
|
||||
)
|
||||
# Use a default TTL of 2 hours (7200 seconds)
|
||||
if not peerstore.consume_peer_record(envelope, 7200):
|
||||
logger.error("Updating Certified-Addr-Book was unsuccessful")
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Error updating the certified addr book for peer %s: %s", peer_id, e
|
||||
)
|
||||
# Update observed address if present
|
||||
if identify_msg.HasField("observed_addr") and identify_msg.observed_addr:
|
||||
try:
|
||||
@ -135,7 +167,11 @@ async def _update_peerstore_from_identify(
|
||||
|
||||
|
||||
async def push_identify_to_peer(
|
||||
host: IHost, peer_id: ID, observed_multiaddr: Optional[Multiaddr] = None
|
||||
host: IHost,
|
||||
peer_id: ID,
|
||||
observed_multiaddr: Multiaddr | None = None,
|
||||
limit: trio.Semaphore = trio.Semaphore(CONCURRENCY_LIMIT),
|
||||
use_varint_format: bool = True,
|
||||
) -> bool:
|
||||
"""
|
||||
Push an identify message to a specific peer.
|
||||
@ -143,52 +179,91 @@ async def push_identify_to_peer(
|
||||
This function opens a stream to the peer using the identify/push protocol,
|
||||
sends the identify message, and closes the stream.
|
||||
|
||||
Returns
|
||||
-------
|
||||
bool
|
||||
True if the push was successful, False otherwise.
|
||||
Args:
|
||||
host: The libp2p host.
|
||||
peer_id: The peer ID to push to.
|
||||
observed_multiaddr: The observed multiaddress (optional).
|
||||
limit: Semaphore for concurrency control.
|
||||
use_varint_format: True=length-prefixed, False=raw protobuf.
|
||||
|
||||
Returns:
|
||||
bool: True if the push was successful, False otherwise.
|
||||
|
||||
"""
|
||||
try:
|
||||
# Create a new stream to the peer using the identify/push protocol
|
||||
stream = await host.new_stream(peer_id, [ID_PUSH])
|
||||
async with limit:
|
||||
try:
|
||||
# Create a new stream to the peer using the identify/push protocol
|
||||
stream = await host.new_stream(peer_id, [ID_PUSH])
|
||||
|
||||
# Create the identify message
|
||||
identify_msg = _mk_identify_protobuf(host, observed_multiaddr)
|
||||
response = identify_msg.SerializeToString()
|
||||
# Create the identify message
|
||||
identify_msg = _mk_identify_protobuf(host, observed_multiaddr)
|
||||
response = identify_msg.SerializeToString()
|
||||
|
||||
# Send the identify message
|
||||
await stream.write(response)
|
||||
if use_varint_format:
|
||||
# Send length-prefixed identify message
|
||||
await stream.write(varint.encode_uvarint(len(response)))
|
||||
await stream.write(response)
|
||||
else:
|
||||
# Send raw protobuf message
|
||||
await stream.write(response)
|
||||
|
||||
# Close the stream
|
||||
await stream.close()
|
||||
# Wait for acknowledgment from the receiver with timeout
|
||||
# This ensures the message was processed before closing
|
||||
try:
|
||||
with trio.move_on_after(1.0): # 1 second timeout
|
||||
ack = await stream.read(2) # Read "OK" acknowledgment
|
||||
if ack != b"OK":
|
||||
logger.warning(
|
||||
"Unexpected acknowledgment from peer %s: %s", peer_id, ack
|
||||
)
|
||||
except Exception as e:
|
||||
logger.debug("No acknowledgment received from peer %s: %s", peer_id, e)
|
||||
# Continue anyway, as the message might have been processed
|
||||
|
||||
logger.debug("Successfully pushed identify to peer %s", peer_id)
|
||||
return True
|
||||
except Exception as e:
|
||||
logger.error("Error pushing identify to peer %s: %s", peer_id, e)
|
||||
return False
|
||||
# Close the stream after acknowledgment (or timeout)
|
||||
await stream.close()
|
||||
|
||||
logger.debug("Successfully pushed identify to peer %s", peer_id)
|
||||
return True
|
||||
except Exception as e:
|
||||
logger.error("Error pushing identify to peer %s: %s", peer_id, e)
|
||||
return False
|
||||
|
||||
|
||||
async def push_identify_to_peers(
|
||||
host: IHost,
|
||||
peer_ids: Optional[set[ID]] = None,
|
||||
observed_multiaddr: Optional[Multiaddr] = None,
|
||||
peer_ids: set[ID] | None = None,
|
||||
observed_multiaddr: Multiaddr | None = None,
|
||||
use_varint_format: bool = True,
|
||||
) -> None:
|
||||
"""
|
||||
Push an identify message to multiple peers in parallel.
|
||||
|
||||
If peer_ids is None, push to all connected peers.
|
||||
|
||||
Args:
|
||||
host: The libp2p host.
|
||||
peer_ids: Set of peer IDs to push to (if None, push to all connected peers).
|
||||
observed_multiaddr: The observed multiaddress (optional).
|
||||
use_varint_format: True=length-prefixed, False=raw protobuf.
|
||||
|
||||
"""
|
||||
if peer_ids is None:
|
||||
# Get all connected peers
|
||||
peer_ids = set(host.get_peerstore().peer_ids())
|
||||
peer_ids = set(host.get_connected_peers())
|
||||
|
||||
# Create a single shared semaphore for concurrency control
|
||||
limit = trio.Semaphore(CONCURRENCY_LIMIT)
|
||||
|
||||
# Push to each peer in parallel using a trio.Nursery
|
||||
# TODO: Consider using a bounded nursery to limit concurrency
|
||||
# and avoid overwhelming the network. This can be done by using
|
||||
# trio.open_nursery(max_concurrent=10) or similar.
|
||||
# For now, we will use an unbounded nursery for simplicity.
|
||||
# limiting concurrent connections to CONCURRENCY_LIMIT
|
||||
async with trio.open_nursery() as nursery:
|
||||
for peer_id in peer_ids:
|
||||
nursery.start_soon(push_identify_to_peer, host, peer_id, observed_multiaddr)
|
||||
nursery.start_soon(
|
||||
push_identify_to_peer,
|
||||
host,
|
||||
peer_id,
|
||||
observed_multiaddr,
|
||||
limit,
|
||||
use_varint_format,
|
||||
)
|
||||
|
||||
@ -2,27 +2,22 @@ from abc import (
|
||||
ABC,
|
||||
abstractmethod,
|
||||
)
|
||||
from typing import (
|
||||
Optional,
|
||||
)
|
||||
from typing import Any
|
||||
|
||||
|
||||
class Closer(ABC):
|
||||
@abstractmethod
|
||||
async def close(self) -> None:
|
||||
...
|
||||
async def close(self) -> None: ...
|
||||
|
||||
|
||||
class Reader(ABC):
|
||||
@abstractmethod
|
||||
async def read(self, n: int = None) -> bytes:
|
||||
...
|
||||
async def read(self, n: int | None = None) -> bytes: ...
|
||||
|
||||
|
||||
class Writer(ABC):
|
||||
@abstractmethod
|
||||
async def write(self, data: bytes) -> None:
|
||||
...
|
||||
async def write(self, data: bytes) -> None: ...
|
||||
|
||||
|
||||
class WriteCloser(Writer, Closer):
|
||||
@ -39,7 +34,7 @@ class ReadWriter(Reader, Writer):
|
||||
|
||||
class ReadWriteCloser(Reader, Writer, Closer):
|
||||
@abstractmethod
|
||||
def get_remote_address(self) -> Optional[tuple[str, int]]:
|
||||
def get_remote_address(self) -> tuple[str, int] | None:
|
||||
"""
|
||||
Return the remote address of the connected peer.
|
||||
|
||||
@ -50,14 +45,12 @@ class ReadWriteCloser(Reader, Writer, Closer):
|
||||
|
||||
class MsgReader(ABC):
|
||||
@abstractmethod
|
||||
async def read_msg(self) -> bytes:
|
||||
...
|
||||
async def read_msg(self) -> bytes: ...
|
||||
|
||||
|
||||
class MsgWriter(ABC):
|
||||
@abstractmethod
|
||||
async def write_msg(self, msg: bytes) -> None:
|
||||
...
|
||||
async def write_msg(self, msg: bytes) -> None: ...
|
||||
|
||||
|
||||
class MsgReadWriteCloser(MsgReader, MsgWriter, Closer):
|
||||
@ -66,19 +59,26 @@ class MsgReadWriteCloser(MsgReader, MsgWriter, Closer):
|
||||
|
||||
class Encrypter(ABC):
|
||||
@abstractmethod
|
||||
def encrypt(self, data: bytes) -> bytes:
|
||||
...
|
||||
def encrypt(self, data: bytes) -> bytes: ...
|
||||
|
||||
@abstractmethod
|
||||
def decrypt(self, data: bytes) -> bytes:
|
||||
...
|
||||
def decrypt(self, data: bytes) -> bytes: ...
|
||||
|
||||
|
||||
class EncryptedMsgReadWriter(MsgReadWriteCloser, Encrypter):
|
||||
"""Read/write message with encryption/decryption."""
|
||||
|
||||
def get_remote_address(self) -> Optional[tuple[str, int]]:
|
||||
conn: Any | None
|
||||
|
||||
def __init__(self, conn: Any | None = None):
|
||||
self.conn = conn
|
||||
|
||||
def get_remote_address(self) -> tuple[str, int] | None:
|
||||
"""Get remote address if supported by the underlying connection."""
|
||||
if hasattr(self, "conn") and hasattr(self.conn, "get_remote_address"):
|
||||
if (
|
||||
self.conn is not None
|
||||
and hasattr(self, "conn")
|
||||
and hasattr(self.conn, "get_remote_address")
|
||||
):
|
||||
return self.conn.get_remote_address()
|
||||
return None
|
||||
|
||||
@ -5,6 +5,7 @@ from that repo: "a simple package to r/w length-delimited slices."
|
||||
|
||||
NOTE: currently missing the capability to indicate lengths by "varint" method.
|
||||
"""
|
||||
|
||||
from abc import (
|
||||
abstractmethod,
|
||||
)
|
||||
@ -60,12 +61,10 @@ class BaseMsgReadWriter(MsgReadWriteCloser):
|
||||
return await read_exactly(self.read_write_closer, length)
|
||||
|
||||
@abstractmethod
|
||||
async def next_msg_len(self) -> int:
|
||||
...
|
||||
async def next_msg_len(self) -> int: ...
|
||||
|
||||
@abstractmethod
|
||||
def encode_msg(self, msg: bytes) -> bytes:
|
||||
...
|
||||
def encode_msg(self, msg: bytes) -> bytes: ...
|
||||
|
||||
async def close(self) -> None:
|
||||
await self.read_write_closer.close()
|
||||
|
||||
@ -1,7 +1,4 @@
|
||||
import logging
|
||||
from typing import (
|
||||
Optional,
|
||||
)
|
||||
|
||||
import trio
|
||||
|
||||
@ -34,7 +31,7 @@ class TrioTCPStream(ReadWriteCloser):
|
||||
except (trio.ClosedResourceError, trio.BrokenResourceError) as error:
|
||||
raise IOException from error
|
||||
|
||||
async def read(self, n: int = None) -> bytes:
|
||||
async def read(self, n: int | None = None) -> bytes:
|
||||
async with self.read_lock:
|
||||
if n is not None and n == 0:
|
||||
return b""
|
||||
@ -46,7 +43,7 @@ class TrioTCPStream(ReadWriteCloser):
|
||||
async def close(self) -> None:
|
||||
await self.stream.aclose()
|
||||
|
||||
def get_remote_address(self) -> Optional[tuple[str, int]]:
|
||||
def get_remote_address(self) -> tuple[str, int] | None:
|
||||
"""Return the remote address as (host, port) tuple."""
|
||||
try:
|
||||
return self.stream.socket.getpeername()
|
||||
|
||||
@ -14,12 +14,14 @@ async def read_exactly(
|
||||
"""
|
||||
NOTE: relying on exceptions to break out on erroneous conditions, like EOF
|
||||
"""
|
||||
data = await reader.read(n)
|
||||
buffer = bytearray()
|
||||
buffer.extend(await reader.read(n))
|
||||
|
||||
for _ in range(retry_count):
|
||||
if len(data) < n:
|
||||
remaining = n - len(data)
|
||||
data += await reader.read(remaining)
|
||||
if len(buffer) < n:
|
||||
remaining = n - len(buffer)
|
||||
buffer.extend(await reader.read(remaining))
|
||||
|
||||
else:
|
||||
return data
|
||||
raise IncompleteReadError({"requested_count": n, "received_count": len(data)})
|
||||
return bytes(buffer)
|
||||
raise IncompleteReadError({"requested_count": n, "received_count": len(buffer)})
|
||||
|
||||
30
libp2p/kad_dht/__init__.py
Normal file
30
libp2p/kad_dht/__init__.py
Normal file
@ -0,0 +1,30 @@
|
||||
"""
|
||||
Kademlia DHT implementation for py-libp2p.
|
||||
|
||||
This module provides a Distributed Hash Table (DHT) implementation
|
||||
based on the Kademlia protocol.
|
||||
"""
|
||||
|
||||
from .kad_dht import (
|
||||
KadDHT,
|
||||
)
|
||||
from .peer_routing import (
|
||||
PeerRouting,
|
||||
)
|
||||
from .routing_table import (
|
||||
RoutingTable,
|
||||
)
|
||||
from .utils import (
|
||||
create_key_from_binary,
|
||||
)
|
||||
from .value_store import (
|
||||
ValueStore,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
"KadDHT",
|
||||
"RoutingTable",
|
||||
"PeerRouting",
|
||||
"ValueStore",
|
||||
"create_key_from_binary",
|
||||
]
|
||||
14
libp2p/kad_dht/common.py
Normal file
14
libp2p/kad_dht/common.py
Normal file
@ -0,0 +1,14 @@
|
||||
"""
|
||||
Shared constants and protocol parameters for the Kademlia DHT.
|
||||
"""
|
||||
|
||||
from libp2p.custom_types import (
|
||||
TProtocol,
|
||||
)
|
||||
|
||||
# Constants for the Kademlia algorithm
|
||||
ALPHA = 3 # Concurrency parameter
|
||||
PROTOCOL_ID = TProtocol("/ipfs/kad/1.0.0")
|
||||
QUERY_TIMEOUT = 10
|
||||
|
||||
TTL = DEFAULT_TTL = 24 * 60 * 60 # 24 hours in seconds
|
||||
825
libp2p/kad_dht/kad_dht.py
Normal file
825
libp2p/kad_dht/kad_dht.py
Normal file
@ -0,0 +1,825 @@
|
||||
"""
|
||||
Kademlia DHT implementation for py-libp2p.
|
||||
|
||||
This module provides a complete Distributed Hash Table (DHT)
|
||||
implementation based on the Kademlia algorithm and protocol.
|
||||
"""
|
||||
|
||||
from collections.abc import Awaitable, Callable
|
||||
from enum import (
|
||||
Enum,
|
||||
)
|
||||
import logging
|
||||
import time
|
||||
|
||||
from multiaddr import (
|
||||
Multiaddr,
|
||||
)
|
||||
import trio
|
||||
import varint
|
||||
|
||||
from libp2p.abc import (
|
||||
IHost,
|
||||
)
|
||||
from libp2p.discovery.random_walk.rt_refresh_manager import RTRefreshManager
|
||||
from libp2p.kad_dht.utils import maybe_consume_signed_record
|
||||
from libp2p.network.stream.net_stream import (
|
||||
INetStream,
|
||||
)
|
||||
from libp2p.peer.envelope import Envelope
|
||||
from libp2p.peer.id import (
|
||||
ID,
|
||||
)
|
||||
from libp2p.peer.peerinfo import (
|
||||
PeerInfo,
|
||||
)
|
||||
from libp2p.peer.peerstore import env_to_send_in_RPC
|
||||
from libp2p.tools.async_service import (
|
||||
Service,
|
||||
)
|
||||
|
||||
from .common import (
|
||||
ALPHA,
|
||||
PROTOCOL_ID,
|
||||
QUERY_TIMEOUT,
|
||||
)
|
||||
from .pb.kademlia_pb2 import (
|
||||
Message,
|
||||
)
|
||||
from .peer_routing import (
|
||||
PeerRouting,
|
||||
)
|
||||
from .provider_store import (
|
||||
ProviderStore,
|
||||
)
|
||||
from .routing_table import (
|
||||
RoutingTable,
|
||||
)
|
||||
from .value_store import (
|
||||
ValueStore,
|
||||
)
|
||||
|
||||
logger = logging.getLogger("kademlia-example.kad_dht")
|
||||
# logger = logging.getLogger("libp2p.kademlia")
|
||||
# Default parameters
|
||||
ROUTING_TABLE_REFRESH_INTERVAL = 60 # 1 min in seconds for testing
|
||||
|
||||
|
||||
class DHTMode(Enum):
|
||||
"""DHT operation modes."""
|
||||
|
||||
CLIENT = "CLIENT"
|
||||
SERVER = "SERVER"
|
||||
|
||||
|
||||
class KadDHT(Service):
|
||||
"""
|
||||
Kademlia DHT implementation for libp2p.
|
||||
|
||||
This class provides a DHT implementation that combines routing table management,
|
||||
peer discovery, content routing, and value storage.
|
||||
|
||||
Optional Random Walk feature enhances peer discovery by automatically
|
||||
performing periodic random queries to discover new peers and maintain
|
||||
routing table health.
|
||||
|
||||
Example:
|
||||
# Basic DHT without random walk (default)
|
||||
dht = KadDHT(host, DHTMode.SERVER)
|
||||
|
||||
# DHT with random walk enabled for enhanced peer discovery
|
||||
dht = KadDHT(host, DHTMode.SERVER, enable_random_walk=True)
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, host: IHost, mode: DHTMode, enable_random_walk: bool = False):
|
||||
"""
|
||||
Initialize a new Kademlia DHT node.
|
||||
|
||||
:param host: The libp2p host.
|
||||
:param mode: The mode of host (Client or Server) - must be DHTMode enum
|
||||
:param enable_random_walk: Whether to enable automatic random walk
|
||||
"""
|
||||
super().__init__()
|
||||
|
||||
self.host = host
|
||||
self.local_peer_id = host.get_id()
|
||||
|
||||
# Validate that mode is a DHTMode enum
|
||||
if not isinstance(mode, DHTMode):
|
||||
raise TypeError(f"mode must be DHTMode enum, got {type(mode)}")
|
||||
|
||||
self.mode = mode
|
||||
self.enable_random_walk = enable_random_walk
|
||||
|
||||
# Initialize the routing table
|
||||
self.routing_table = RoutingTable(self.local_peer_id, self.host)
|
||||
|
||||
# Initialize peer routing
|
||||
self.peer_routing = PeerRouting(host, self.routing_table)
|
||||
|
||||
# Initialize value store
|
||||
self.value_store = ValueStore(host=host, local_peer_id=self.local_peer_id)
|
||||
|
||||
# Initialize provider store with host and peer_routing references
|
||||
self.provider_store = ProviderStore(host=host, peer_routing=self.peer_routing)
|
||||
|
||||
# Last time we republished provider records
|
||||
self._last_provider_republish = time.time()
|
||||
|
||||
# Initialize RT Refresh Manager (only if random walk is enabled)
|
||||
self.rt_refresh_manager: RTRefreshManager | None = None
|
||||
if self.enable_random_walk:
|
||||
self.rt_refresh_manager = RTRefreshManager(
|
||||
host=self.host,
|
||||
routing_table=self.routing_table,
|
||||
local_peer_id=self.local_peer_id,
|
||||
query_function=self._create_query_function(),
|
||||
enable_auto_refresh=True,
|
||||
)
|
||||
|
||||
# Set protocol handlers
|
||||
host.set_stream_handler(PROTOCOL_ID, self.handle_stream)
|
||||
|
||||
def _create_query_function(self) -> Callable[[bytes], Awaitable[list[ID]]]:
|
||||
"""
|
||||
Create a query function that wraps peer_routing.find_closest_peers_network.
|
||||
|
||||
This function is used by the RandomWalk module to query for peers without
|
||||
directly importing PeerRouting, avoiding circular import issues.
|
||||
|
||||
Returns:
|
||||
Callable that takes target_key bytes and returns list of peer IDs
|
||||
|
||||
"""
|
||||
|
||||
async def query_function(target_key: bytes) -> list[ID]:
|
||||
"""Query for closest peers to target key."""
|
||||
return await self.peer_routing.find_closest_peers_network(target_key)
|
||||
|
||||
return query_function
|
||||
|
||||
async def run(self) -> None:
|
||||
"""Run the DHT service."""
|
||||
logger.info(f"Starting Kademlia DHT with peer ID {self.local_peer_id}")
|
||||
|
||||
# Start the RT Refresh Manager in parallel with the main DHT service
|
||||
async with trio.open_nursery() as nursery:
|
||||
# Start the RT Refresh Manager only if random walk is enabled
|
||||
if self.rt_refresh_manager is not None:
|
||||
nursery.start_soon(self.rt_refresh_manager.start)
|
||||
logger.info("RT Refresh Manager started - Random Walk is now active")
|
||||
else:
|
||||
logger.info("Random Walk is disabled - RT Refresh Manager not started")
|
||||
|
||||
# Start the main DHT service loop
|
||||
nursery.start_soon(self._run_main_loop)
|
||||
|
||||
async def _run_main_loop(self) -> None:
|
||||
"""Run the main DHT service loop."""
|
||||
# Main service loop
|
||||
while self.manager.is_running:
|
||||
# Periodically refresh the routing table
|
||||
await self.refresh_routing_table()
|
||||
|
||||
# Check if it's time to republish provider records
|
||||
current_time = time.time()
|
||||
# await self._republish_provider_records()
|
||||
self._last_provider_republish = current_time
|
||||
|
||||
# Clean up expired values and provider records
|
||||
expired_values = self.value_store.cleanup_expired()
|
||||
if expired_values > 0:
|
||||
logger.debug(f"Cleaned up {expired_values} expired values")
|
||||
|
||||
self.provider_store.cleanup_expired()
|
||||
|
||||
# Wait before next maintenance cycle
|
||||
await trio.sleep(ROUTING_TABLE_REFRESH_INTERVAL)
|
||||
|
||||
async def stop(self) -> None:
|
||||
"""Stop the DHT service and cleanup resources."""
|
||||
logger.info("Stopping Kademlia DHT")
|
||||
|
||||
# Stop the RT Refresh Manager only if it was started
|
||||
if self.rt_refresh_manager is not None:
|
||||
await self.rt_refresh_manager.stop()
|
||||
logger.info("RT Refresh Manager stopped")
|
||||
else:
|
||||
logger.info("RT Refresh Manager was not running (Random Walk disabled)")
|
||||
|
||||
async def switch_mode(self, new_mode: DHTMode) -> DHTMode:
|
||||
"""
|
||||
Switch the DHT mode.
|
||||
|
||||
:param new_mode: The new mode - must be DHTMode enum
|
||||
:return: The new mode as DHTMode enum
|
||||
"""
|
||||
# Validate that new_mode is a DHTMode enum
|
||||
if not isinstance(new_mode, DHTMode):
|
||||
raise TypeError(f"new_mode must be DHTMode enum, got {type(new_mode)}")
|
||||
|
||||
if new_mode == DHTMode.CLIENT:
|
||||
self.routing_table.cleanup_routing_table()
|
||||
self.mode = new_mode
|
||||
logger.info(f"Switched to {new_mode.value} mode")
|
||||
return self.mode
|
||||
|
||||
async def handle_stream(self, stream: INetStream) -> None:
|
||||
"""
|
||||
Handle an incoming DHT stream using varint length prefixes.
|
||||
"""
|
||||
if self.mode == DHTMode.CLIENT:
|
||||
stream.close
|
||||
return
|
||||
peer_id = stream.muxed_conn.peer_id
|
||||
logger.debug(f"Received DHT stream from peer {peer_id}")
|
||||
await self.add_peer(peer_id)
|
||||
logger.debug(f"Added peer {peer_id} to routing table")
|
||||
|
||||
closer_peer_envelope: Envelope | None = None
|
||||
provider_peer_envelope: Envelope | None = None
|
||||
|
||||
try:
|
||||
# Read varint-prefixed length for the message
|
||||
length_prefix = b""
|
||||
while True:
|
||||
byte = await stream.read(1)
|
||||
if not byte:
|
||||
logger.warning("Stream closed while reading varint length")
|
||||
await stream.close()
|
||||
return
|
||||
length_prefix += byte
|
||||
if byte[0] & 0x80 == 0:
|
||||
break
|
||||
msg_length = varint.decode_bytes(length_prefix)
|
||||
|
||||
# Read the message bytes
|
||||
msg_bytes = await stream.read(msg_length)
|
||||
if len(msg_bytes) < msg_length:
|
||||
logger.warning("Failed to read full message from stream")
|
||||
await stream.close()
|
||||
return
|
||||
|
||||
try:
|
||||
# Parse as protobuf
|
||||
message = Message()
|
||||
message.ParseFromString(msg_bytes)
|
||||
logger.debug(
|
||||
f"Received DHT message from {peer_id}, type: {message.type}"
|
||||
)
|
||||
|
||||
# Handle FIND_NODE message
|
||||
if message.type == Message.MessageType.FIND_NODE:
|
||||
# Get target key directly from protobuf
|
||||
target_key = message.key
|
||||
|
||||
# Find closest peers to the target key
|
||||
closest_peers = self.routing_table.find_local_closest_peers(
|
||||
target_key, 20
|
||||
)
|
||||
logger.debug(f"Found {len(closest_peers)} peers close to target")
|
||||
|
||||
# Consume the source signed_peer_record if sent
|
||||
if not maybe_consume_signed_record(message, self.host, peer_id):
|
||||
logger.error(
|
||||
"Received an invalid-signed-record, dropping the stream"
|
||||
)
|
||||
await stream.close()
|
||||
return
|
||||
|
||||
# Build response message with protobuf
|
||||
response = Message()
|
||||
response.type = Message.MessageType.FIND_NODE
|
||||
|
||||
# Add closest peers to response
|
||||
for peer in closest_peers:
|
||||
# Skip if the peer is the requester
|
||||
if peer == peer_id:
|
||||
continue
|
||||
|
||||
# Add peer to closerPeers field
|
||||
peer_proto = response.closerPeers.add()
|
||||
peer_proto.id = peer.to_bytes()
|
||||
peer_proto.connection = Message.ConnectionType.CAN_CONNECT
|
||||
|
||||
# Add addresses if available
|
||||
try:
|
||||
addrs = self.host.get_peerstore().addrs(peer)
|
||||
if addrs:
|
||||
for addr in addrs:
|
||||
peer_proto.addrs.append(addr.to_bytes())
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# Add the signed-peer-record for each peer in the peer-proto
|
||||
# if cached in the peerstore
|
||||
closer_peer_envelope = (
|
||||
self.host.get_peerstore().get_peer_record(peer)
|
||||
)
|
||||
|
||||
if closer_peer_envelope is not None:
|
||||
peer_proto.signedRecord = (
|
||||
closer_peer_envelope.marshal_envelope()
|
||||
)
|
||||
|
||||
# Create sender_signed_peer_record
|
||||
envelope_bytes, _ = env_to_send_in_RPC(self.host)
|
||||
response.senderRecord = envelope_bytes
|
||||
|
||||
# Serialize and send response
|
||||
response_bytes = response.SerializeToString()
|
||||
await stream.write(varint.encode(len(response_bytes)))
|
||||
await stream.write(response_bytes)
|
||||
logger.debug(
|
||||
f"Sent FIND_NODE response with{len(response.closerPeers)} peers"
|
||||
)
|
||||
|
||||
# Handle ADD_PROVIDER message
|
||||
elif message.type == Message.MessageType.ADD_PROVIDER:
|
||||
# Process ADD_PROVIDER
|
||||
key = message.key
|
||||
logger.debug(f"Received ADD_PROVIDER for key {key.hex()}")
|
||||
|
||||
# Consume the source signed-peer-record if sent
|
||||
if not maybe_consume_signed_record(message, self.host, peer_id):
|
||||
logger.error(
|
||||
"Received an invalid-signed-record, dropping the stream"
|
||||
)
|
||||
await stream.close()
|
||||
return
|
||||
|
||||
# Extract provider information
|
||||
for provider_proto in message.providerPeers:
|
||||
try:
|
||||
# Validate that the provider is the sender
|
||||
provider_id = ID(provider_proto.id)
|
||||
if provider_id != peer_id:
|
||||
logger.warning(
|
||||
f"Provider ID {provider_id} doesn't"
|
||||
f"match sender {peer_id}, ignoring"
|
||||
)
|
||||
continue
|
||||
|
||||
# Convert addresses to Multiaddr
|
||||
addrs = []
|
||||
for addr_bytes in provider_proto.addrs:
|
||||
try:
|
||||
addrs.append(Multiaddr(addr_bytes))
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to parse address: {e}")
|
||||
|
||||
# Add to provider store
|
||||
provider_info = PeerInfo(provider_id, addrs)
|
||||
self.provider_store.add_provider(key, provider_info)
|
||||
logger.debug(
|
||||
f"Added provider {provider_id} for key {key.hex()}"
|
||||
)
|
||||
|
||||
# Process the signed-records of provider if sent
|
||||
if not maybe_consume_signed_record(
|
||||
provider_proto, self.host
|
||||
):
|
||||
logger.error(
|
||||
"Received an invalid-signed-record,"
|
||||
"dropping the stream"
|
||||
)
|
||||
await stream.close()
|
||||
return
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to process provider info: {e}")
|
||||
|
||||
# Send acknowledgement
|
||||
response = Message()
|
||||
response.type = Message.MessageType.ADD_PROVIDER
|
||||
response.key = key
|
||||
|
||||
# Add sender's signed-peer-record
|
||||
envelope_bytes, _ = env_to_send_in_RPC(self.host)
|
||||
response.senderRecord = envelope_bytes
|
||||
|
||||
response_bytes = response.SerializeToString()
|
||||
await stream.write(varint.encode(len(response_bytes)))
|
||||
await stream.write(response_bytes)
|
||||
logger.debug("Sent ADD_PROVIDER acknowledgement")
|
||||
|
||||
# Handle GET_PROVIDERS message
|
||||
elif message.type == Message.MessageType.GET_PROVIDERS:
|
||||
# Process GET_PROVIDERS
|
||||
key = message.key
|
||||
logger.debug(f"Received GET_PROVIDERS request for key {key.hex()}")
|
||||
|
||||
# Consume the source signed_peer_record if sent
|
||||
if not maybe_consume_signed_record(message, self.host, peer_id):
|
||||
logger.error(
|
||||
"Received an invalid-signed-record, dropping the stream"
|
||||
)
|
||||
await stream.close()
|
||||
return
|
||||
|
||||
# Find providers for the key
|
||||
providers = self.provider_store.get_providers(key)
|
||||
logger.debug(
|
||||
f"Found {len(providers)} providers for key {key.hex()}"
|
||||
)
|
||||
|
||||
# Create response
|
||||
response = Message()
|
||||
response.type = Message.MessageType.GET_PROVIDERS
|
||||
response.key = key
|
||||
|
||||
# Create sender_signed_peer_record for the response
|
||||
envelope_bytes, _ = env_to_send_in_RPC(self.host)
|
||||
response.senderRecord = envelope_bytes
|
||||
|
||||
# Add provider information to response
|
||||
for provider_info in providers:
|
||||
provider_proto = response.providerPeers.add()
|
||||
provider_proto.id = provider_info.peer_id.to_bytes()
|
||||
provider_proto.connection = Message.ConnectionType.CAN_CONNECT
|
||||
|
||||
# Add provider signed-records if cached
|
||||
provider_peer_envelope = (
|
||||
self.host.get_peerstore().get_peer_record(
|
||||
provider_info.peer_id
|
||||
)
|
||||
)
|
||||
|
||||
if provider_peer_envelope is not None:
|
||||
provider_proto.signedRecord = (
|
||||
provider_peer_envelope.marshal_envelope()
|
||||
)
|
||||
|
||||
# Add addresses if available
|
||||
for addr in provider_info.addrs:
|
||||
provider_proto.addrs.append(addr.to_bytes())
|
||||
|
||||
# Also include closest peers if we don't have providers
|
||||
if not providers:
|
||||
closest_peers = self.routing_table.find_local_closest_peers(
|
||||
key, 20
|
||||
)
|
||||
logger.debug(
|
||||
f"No providers found, including {len(closest_peers)}"
|
||||
"closest peers"
|
||||
)
|
||||
|
||||
for peer in closest_peers:
|
||||
# Skip if peer is the requester
|
||||
if peer == peer_id:
|
||||
continue
|
||||
|
||||
peer_proto = response.closerPeers.add()
|
||||
peer_proto.id = peer.to_bytes()
|
||||
peer_proto.connection = Message.ConnectionType.CAN_CONNECT
|
||||
|
||||
# Add the signed-records of closest_peers if cached
|
||||
closer_peer_envelope = (
|
||||
self.host.get_peerstore().get_peer_record(peer)
|
||||
)
|
||||
|
||||
if closer_peer_envelope is not None:
|
||||
peer_proto.signedRecord = (
|
||||
closer_peer_envelope.marshal_envelope()
|
||||
)
|
||||
|
||||
# Add addresses if available
|
||||
try:
|
||||
addrs = self.host.get_peerstore().addrs(peer)
|
||||
for addr in addrs:
|
||||
peer_proto.addrs.append(addr.to_bytes())
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# Serialize and send response
|
||||
response_bytes = response.SerializeToString()
|
||||
await stream.write(varint.encode(len(response_bytes)))
|
||||
await stream.write(response_bytes)
|
||||
logger.debug("Sent GET_PROVIDERS response")
|
||||
|
||||
# Handle GET_VALUE message
|
||||
elif message.type == Message.MessageType.GET_VALUE:
|
||||
# Process GET_VALUE
|
||||
key = message.key
|
||||
logger.debug(f"Received GET_VALUE request for key {key.hex()}")
|
||||
|
||||
# Consume the sender_signed_peer_record
|
||||
if not maybe_consume_signed_record(message, self.host, peer_id):
|
||||
logger.error(
|
||||
"Received an invalid-signed-record, dropping the stream"
|
||||
)
|
||||
await stream.close()
|
||||
return
|
||||
|
||||
value = self.value_store.get(key)
|
||||
if value:
|
||||
logger.debug(f"Found value for key {key.hex()}")
|
||||
|
||||
# Create response using protobuf
|
||||
response = Message()
|
||||
response.type = Message.MessageType.GET_VALUE
|
||||
|
||||
# Create record
|
||||
response.key = key
|
||||
response.record.key = key
|
||||
response.record.value = value
|
||||
response.record.timeReceived = str(time.time())
|
||||
|
||||
# Create sender_signed_peer_record
|
||||
envelope_bytes, _ = env_to_send_in_RPC(self.host)
|
||||
response.senderRecord = envelope_bytes
|
||||
|
||||
# Serialize and send response
|
||||
response_bytes = response.SerializeToString()
|
||||
await stream.write(varint.encode(len(response_bytes)))
|
||||
await stream.write(response_bytes)
|
||||
logger.debug("Sent GET_VALUE response")
|
||||
else:
|
||||
logger.debug(f"No value found for key {key.hex()}")
|
||||
|
||||
# Create response with closest peers when no value is found
|
||||
response = Message()
|
||||
response.type = Message.MessageType.GET_VALUE
|
||||
response.key = key
|
||||
|
||||
# Create sender_signed_peer_record for the response
|
||||
envelope_bytes, _ = env_to_send_in_RPC(self.host)
|
||||
response.senderRecord = envelope_bytes
|
||||
|
||||
# Add closest peers to key
|
||||
closest_peers = self.routing_table.find_local_closest_peers(
|
||||
key, 20
|
||||
)
|
||||
logger.debug(
|
||||
"No value found,"
|
||||
f"including {len(closest_peers)} closest peers"
|
||||
)
|
||||
|
||||
for peer in closest_peers:
|
||||
# Skip if peer is the requester
|
||||
if peer == peer_id:
|
||||
continue
|
||||
|
||||
peer_proto = response.closerPeers.add()
|
||||
peer_proto.id = peer.to_bytes()
|
||||
peer_proto.connection = Message.ConnectionType.CAN_CONNECT
|
||||
|
||||
# Add signed-records of closer-peers if cached
|
||||
closer_peer_envelope = (
|
||||
self.host.get_peerstore().get_peer_record(peer)
|
||||
)
|
||||
|
||||
if closer_peer_envelope is not None:
|
||||
peer_proto.signedRecord = (
|
||||
closer_peer_envelope.marshal_envelope()
|
||||
)
|
||||
|
||||
# Add addresses if available
|
||||
try:
|
||||
addrs = self.host.get_peerstore().addrs(peer)
|
||||
for addr in addrs:
|
||||
peer_proto.addrs.append(addr.to_bytes())
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# Serialize and send response
|
||||
response_bytes = response.SerializeToString()
|
||||
await stream.write(varint.encode(len(response_bytes)))
|
||||
await stream.write(response_bytes)
|
||||
logger.debug("Sent GET_VALUE response with closest peers")
|
||||
|
||||
# Handle PUT_VALUE message
|
||||
elif message.type == Message.MessageType.PUT_VALUE and message.HasField(
|
||||
"record"
|
||||
):
|
||||
# Process PUT_VALUE
|
||||
key = message.record.key
|
||||
value = message.record.value
|
||||
success = False
|
||||
|
||||
# Consume the source signed_peer_record if sent
|
||||
if not maybe_consume_signed_record(message, self.host, peer_id):
|
||||
logger.error(
|
||||
"Received an invalid-signed-record, dropping the stream"
|
||||
)
|
||||
await stream.close()
|
||||
return
|
||||
|
||||
try:
|
||||
if not (key and value):
|
||||
raise ValueError(
|
||||
"Missing key or value in PUT_VALUE message"
|
||||
)
|
||||
|
||||
self.value_store.put(key, value)
|
||||
logger.debug(f"Stored value {value.hex()} for key {key.hex()}")
|
||||
success = True
|
||||
except Exception as e:
|
||||
logger.warning(
|
||||
f"Failed to store value {value.hex()} for key "
|
||||
f"{key.hex()}: {e}"
|
||||
)
|
||||
finally:
|
||||
# Send acknowledgement
|
||||
response = Message()
|
||||
response.type = Message.MessageType.PUT_VALUE
|
||||
if success:
|
||||
response.key = key
|
||||
|
||||
# Create sender_signed_peer_record for the response
|
||||
envelope_bytes, _ = env_to_send_in_RPC(self.host)
|
||||
response.senderRecord = envelope_bytes
|
||||
|
||||
# Serialize and send response
|
||||
response_bytes = response.SerializeToString()
|
||||
await stream.write(varint.encode(len(response_bytes)))
|
||||
await stream.write(response_bytes)
|
||||
logger.debug("Sent PUT_VALUE acknowledgement")
|
||||
|
||||
except Exception as proto_err:
|
||||
logger.warning(f"Failed to parse protobuf message: {proto_err}")
|
||||
|
||||
await stream.close()
|
||||
except Exception as e:
|
||||
logger.error(f"Error handling DHT stream: {e}")
|
||||
await stream.close()
|
||||
|
||||
async def refresh_routing_table(self) -> None:
|
||||
"""Refresh the routing table."""
|
||||
logger.debug("Refreshing routing table")
|
||||
await self.peer_routing.refresh_routing_table()
|
||||
|
||||
# Peer routing methods
|
||||
|
||||
async def find_peer(self, peer_id: ID) -> PeerInfo | None:
|
||||
"""
|
||||
Find a peer with the given ID.
|
||||
"""
|
||||
logger.debug(f"Finding peer: {peer_id}")
|
||||
return await self.peer_routing.find_peer(peer_id)
|
||||
|
||||
# Value storage and retrieval methods
|
||||
|
||||
async def put_value(self, key: bytes, value: bytes) -> None:
|
||||
"""
|
||||
Store a value in the DHT.
|
||||
"""
|
||||
logger.debug(f"Storing value for key {key.hex()}")
|
||||
|
||||
# 1. Store locally first
|
||||
self.value_store.put(key, value)
|
||||
try:
|
||||
decoded_value = value.decode("utf-8")
|
||||
except UnicodeDecodeError:
|
||||
decoded_value = value.hex()
|
||||
logger.debug(
|
||||
f"Stored value locally for key {key.hex()} with value {decoded_value}"
|
||||
)
|
||||
|
||||
# 2. Get closest peers, excluding self
|
||||
closest_peers = [
|
||||
peer
|
||||
for peer in self.routing_table.find_local_closest_peers(key)
|
||||
if peer != self.local_peer_id
|
||||
]
|
||||
logger.debug(f"Found {len(closest_peers)} peers to store value at")
|
||||
|
||||
# 3. Store at remote peers in batches of ALPHA, in parallel
|
||||
stored_count = 0
|
||||
for i in range(0, len(closest_peers), ALPHA):
|
||||
batch = closest_peers[i : i + ALPHA]
|
||||
batch_results = [False] * len(batch)
|
||||
|
||||
async def store_one(idx: int, peer: ID) -> None:
|
||||
try:
|
||||
with trio.move_on_after(QUERY_TIMEOUT):
|
||||
success = await self.value_store._store_at_peer(
|
||||
peer, key, value
|
||||
)
|
||||
batch_results[idx] = success
|
||||
if success:
|
||||
logger.debug(f"Stored value at peer {peer}")
|
||||
else:
|
||||
logger.debug(f"Failed to store value at peer {peer}")
|
||||
except Exception as e:
|
||||
logger.debug(f"Error storing value at peer {peer}: {e}")
|
||||
|
||||
async with trio.open_nursery() as nursery:
|
||||
for idx, peer in enumerate(batch):
|
||||
nursery.start_soon(store_one, idx, peer)
|
||||
|
||||
stored_count += sum(batch_results)
|
||||
|
||||
logger.info(f"Successfully stored value at {stored_count} peers")
|
||||
|
||||
async def get_value(self, key: bytes) -> bytes | None:
|
||||
logger.debug(f"Getting value for key: {key.hex()}")
|
||||
|
||||
# 1. Check local store first
|
||||
value = self.value_store.get(key)
|
||||
if value:
|
||||
logger.debug("Found value locally")
|
||||
return value
|
||||
|
||||
# 2. Get closest peers, excluding self
|
||||
closest_peers = [
|
||||
peer
|
||||
for peer in self.routing_table.find_local_closest_peers(key)
|
||||
if peer != self.local_peer_id
|
||||
]
|
||||
logger.debug(f"Searching {len(closest_peers)} peers for value")
|
||||
|
||||
# 3. Query ALPHA peers at a time in parallel
|
||||
for i in range(0, len(closest_peers), ALPHA):
|
||||
batch = closest_peers[i : i + ALPHA]
|
||||
found_value = None
|
||||
|
||||
async def query_one(peer: ID) -> None:
|
||||
nonlocal found_value
|
||||
try:
|
||||
with trio.move_on_after(QUERY_TIMEOUT):
|
||||
value = await self.value_store._get_from_peer(peer, key)
|
||||
if value is not None and found_value is None:
|
||||
found_value = value
|
||||
logger.debug(f"Found value at peer {peer}")
|
||||
except Exception as e:
|
||||
logger.debug(f"Error querying peer {peer}: {e}")
|
||||
|
||||
async with trio.open_nursery() as nursery:
|
||||
for peer in batch:
|
||||
nursery.start_soon(query_one, peer)
|
||||
|
||||
if found_value is not None:
|
||||
self.value_store.put(key, found_value)
|
||||
logger.info("Successfully retrieved value from network")
|
||||
return found_value
|
||||
|
||||
# 4. Not found
|
||||
logger.warning(f"Value not found for key {key.hex()}")
|
||||
return None
|
||||
|
||||
# Add these methods in the Utility methods section
|
||||
|
||||
# Utility methods
|
||||
|
||||
async def add_peer(self, peer_id: ID) -> bool:
|
||||
"""
|
||||
Add a peer to the routing table.
|
||||
|
||||
params: peer_id: The peer ID to add.
|
||||
|
||||
Returns
|
||||
-------
|
||||
bool
|
||||
True if peer was added or updated, False otherwise.
|
||||
|
||||
"""
|
||||
return await self.routing_table.add_peer(peer_id)
|
||||
|
||||
async def provide(self, key: bytes) -> bool:
|
||||
"""
|
||||
Reference to provider_store.provide for convenience.
|
||||
"""
|
||||
return await self.provider_store.provide(key)
|
||||
|
||||
async def find_providers(self, key: bytes, count: int = 20) -> list[PeerInfo]:
|
||||
"""
|
||||
Reference to provider_store.find_providers for convenience.
|
||||
"""
|
||||
return await self.provider_store.find_providers(key, count)
|
||||
|
||||
def get_routing_table_size(self) -> int:
|
||||
"""
|
||||
Get the number of peers in the routing table.
|
||||
|
||||
Returns
|
||||
-------
|
||||
int
|
||||
Number of peers.
|
||||
|
||||
"""
|
||||
return self.routing_table.size()
|
||||
|
||||
def get_value_store_size(self) -> int:
|
||||
"""
|
||||
Get the number of items in the value store.
|
||||
|
||||
Returns
|
||||
-------
|
||||
int
|
||||
Number of items.
|
||||
|
||||
"""
|
||||
return self.value_store.size()
|
||||
|
||||
def is_random_walk_enabled(self) -> bool:
|
||||
"""
|
||||
Check if random walk peer discovery is enabled.
|
||||
|
||||
Returns
|
||||
-------
|
||||
bool
|
||||
True if random walk is enabled, False otherwise.
|
||||
|
||||
"""
|
||||
return self.enable_random_walk
|
||||
0
libp2p/kad_dht/pb/__init__.py
Normal file
0
libp2p/kad_dht/pb/__init__.py
Normal file
41
libp2p/kad_dht/pb/kademlia.proto
Normal file
41
libp2p/kad_dht/pb/kademlia.proto
Normal file
@ -0,0 +1,41 @@
|
||||
syntax = "proto3";
|
||||
|
||||
message Record {
|
||||
bytes key = 1;
|
||||
bytes value = 2;
|
||||
string timeReceived = 5;
|
||||
};
|
||||
|
||||
message Message {
|
||||
enum MessageType {
|
||||
PUT_VALUE = 0;
|
||||
GET_VALUE = 1;
|
||||
ADD_PROVIDER = 2;
|
||||
GET_PROVIDERS = 3;
|
||||
FIND_NODE = 4;
|
||||
PING = 5;
|
||||
}
|
||||
|
||||
enum ConnectionType {
|
||||
NOT_CONNECTED = 0;
|
||||
CONNECTED = 1;
|
||||
CAN_CONNECT = 2;
|
||||
CANNOT_CONNECT = 3;
|
||||
}
|
||||
|
||||
message Peer {
|
||||
bytes id = 1;
|
||||
repeated bytes addrs = 2;
|
||||
ConnectionType connection = 3;
|
||||
optional bytes signedRecord = 4; // Envelope(PeerRecord) encoded
|
||||
}
|
||||
|
||||
MessageType type = 1;
|
||||
int32 clusterLevelRaw = 10;
|
||||
bytes key = 2;
|
||||
Record record = 3;
|
||||
repeated Peer closerPeers = 8;
|
||||
repeated Peer providerPeers = 9;
|
||||
|
||||
optional bytes senderRecord = 11; // Envelope(PeerRecord) encoded
|
||||
}
|
||||
34
libp2p/kad_dht/pb/kademlia_pb2.py
Normal file
34
libp2p/kad_dht/pb/kademlia_pb2.py
Normal file
@ -0,0 +1,34 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Generated by the protocol buffer compiler. DO NOT EDIT!
|
||||
# source: libp2p/kad_dht/pb/kademlia.proto
|
||||
# Protobuf Python Version: 4.25.3
|
||||
"""Generated protocol buffer code."""
|
||||
from google.protobuf import descriptor as _descriptor
|
||||
from google.protobuf import descriptor_pool as _descriptor_pool
|
||||
from google.protobuf import symbol_database as _symbol_database
|
||||
from google.protobuf.internal import builder as _builder
|
||||
# @@protoc_insertion_point(imports)
|
||||
|
||||
_sym_db = _symbol_database.Default()
|
||||
|
||||
|
||||
|
||||
|
||||
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n libp2p/kad_dht/pb/kademlia.proto\":\n\x06Record\x12\x0b\n\x03key\x18\x01 \x01(\x0c\x12\r\n\x05value\x18\x02 \x01(\x0c\x12\x14\n\x0ctimeReceived\x18\x05 \x01(\t\"\xa2\x04\n\x07Message\x12\"\n\x04type\x18\x01 \x01(\x0e\x32\x14.Message.MessageType\x12\x17\n\x0f\x63lusterLevelRaw\x18\n \x01(\x05\x12\x0b\n\x03key\x18\x02 \x01(\x0c\x12\x17\n\x06record\x18\x03 \x01(\x0b\x32\x07.Record\x12\"\n\x0b\x63loserPeers\x18\x08 \x03(\x0b\x32\r.Message.Peer\x12$\n\rproviderPeers\x18\t \x03(\x0b\x32\r.Message.Peer\x12\x19\n\x0csenderRecord\x18\x0b \x01(\x0cH\x00\x88\x01\x01\x1az\n\x04Peer\x12\n\n\x02id\x18\x01 \x01(\x0c\x12\r\n\x05\x61\x64\x64rs\x18\x02 \x03(\x0c\x12+\n\nconnection\x18\x03 \x01(\x0e\x32\x17.Message.ConnectionType\x12\x19\n\x0csignedRecord\x18\x04 \x01(\x0cH\x00\x88\x01\x01\x42\x0f\n\r_signedRecord\"i\n\x0bMessageType\x12\r\n\tPUT_VALUE\x10\x00\x12\r\n\tGET_VALUE\x10\x01\x12\x10\n\x0c\x41\x44\x44_PROVIDER\x10\x02\x12\x11\n\rGET_PROVIDERS\x10\x03\x12\r\n\tFIND_NODE\x10\x04\x12\x08\n\x04PING\x10\x05\"W\n\x0e\x43onnectionType\x12\x11\n\rNOT_CONNECTED\x10\x00\x12\r\n\tCONNECTED\x10\x01\x12\x0f\n\x0b\x43\x41N_CONNECT\x10\x02\x12\x12\n\x0e\x43\x41NNOT_CONNECT\x10\x03\x42\x0f\n\r_senderRecordb\x06proto3')
|
||||
|
||||
_globals = globals()
|
||||
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
|
||||
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'libp2p.kad_dht.pb.kademlia_pb2', _globals)
|
||||
if _descriptor._USE_C_DESCRIPTORS == False:
|
||||
DESCRIPTOR._options = None
|
||||
_globals['_RECORD']._serialized_start=36
|
||||
_globals['_RECORD']._serialized_end=94
|
||||
_globals['_MESSAGE']._serialized_start=97
|
||||
_globals['_MESSAGE']._serialized_end=643
|
||||
_globals['_MESSAGE_PEER']._serialized_start=308
|
||||
_globals['_MESSAGE_PEER']._serialized_end=430
|
||||
_globals['_MESSAGE_MESSAGETYPE']._serialized_start=432
|
||||
_globals['_MESSAGE_MESSAGETYPE']._serialized_end=537
|
||||
_globals['_MESSAGE_CONNECTIONTYPE']._serialized_start=539
|
||||
_globals['_MESSAGE_CONNECTIONTYPE']._serialized_end=626
|
||||
# @@protoc_insertion_point(module_scope)
|
||||
70
libp2p/kad_dht/pb/kademlia_pb2.pyi
Normal file
70
libp2p/kad_dht/pb/kademlia_pb2.pyi
Normal file
@ -0,0 +1,70 @@
|
||||
from google.protobuf.internal import containers as _containers
|
||||
from google.protobuf.internal import enum_type_wrapper as _enum_type_wrapper
|
||||
from google.protobuf import descriptor as _descriptor
|
||||
from google.protobuf import message as _message
|
||||
from typing import ClassVar as _ClassVar, Iterable as _Iterable, Mapping as _Mapping, Optional as _Optional, Union as _Union
|
||||
|
||||
DESCRIPTOR: _descriptor.FileDescriptor
|
||||
|
||||
class Record(_message.Message):
|
||||
__slots__ = ("key", "value", "timeReceived")
|
||||
KEY_FIELD_NUMBER: _ClassVar[int]
|
||||
VALUE_FIELD_NUMBER: _ClassVar[int]
|
||||
TIMERECEIVED_FIELD_NUMBER: _ClassVar[int]
|
||||
key: bytes
|
||||
value: bytes
|
||||
timeReceived: str
|
||||
def __init__(self, key: _Optional[bytes] = ..., value: _Optional[bytes] = ..., timeReceived: _Optional[str] = ...) -> None: ...
|
||||
|
||||
class Message(_message.Message):
|
||||
__slots__ = ("type", "clusterLevelRaw", "key", "record", "closerPeers", "providerPeers", "senderRecord")
|
||||
class MessageType(int, metaclass=_enum_type_wrapper.EnumTypeWrapper):
|
||||
__slots__ = ()
|
||||
PUT_VALUE: _ClassVar[Message.MessageType]
|
||||
GET_VALUE: _ClassVar[Message.MessageType]
|
||||
ADD_PROVIDER: _ClassVar[Message.MessageType]
|
||||
GET_PROVIDERS: _ClassVar[Message.MessageType]
|
||||
FIND_NODE: _ClassVar[Message.MessageType]
|
||||
PING: _ClassVar[Message.MessageType]
|
||||
PUT_VALUE: Message.MessageType
|
||||
GET_VALUE: Message.MessageType
|
||||
ADD_PROVIDER: Message.MessageType
|
||||
GET_PROVIDERS: Message.MessageType
|
||||
FIND_NODE: Message.MessageType
|
||||
PING: Message.MessageType
|
||||
class ConnectionType(int, metaclass=_enum_type_wrapper.EnumTypeWrapper):
|
||||
__slots__ = ()
|
||||
NOT_CONNECTED: _ClassVar[Message.ConnectionType]
|
||||
CONNECTED: _ClassVar[Message.ConnectionType]
|
||||
CAN_CONNECT: _ClassVar[Message.ConnectionType]
|
||||
CANNOT_CONNECT: _ClassVar[Message.ConnectionType]
|
||||
NOT_CONNECTED: Message.ConnectionType
|
||||
CONNECTED: Message.ConnectionType
|
||||
CAN_CONNECT: Message.ConnectionType
|
||||
CANNOT_CONNECT: Message.ConnectionType
|
||||
class Peer(_message.Message):
|
||||
__slots__ = ("id", "addrs", "connection", "signedRecord")
|
||||
ID_FIELD_NUMBER: _ClassVar[int]
|
||||
ADDRS_FIELD_NUMBER: _ClassVar[int]
|
||||
CONNECTION_FIELD_NUMBER: _ClassVar[int]
|
||||
SIGNEDRECORD_FIELD_NUMBER: _ClassVar[int]
|
||||
id: bytes
|
||||
addrs: _containers.RepeatedScalarFieldContainer[bytes]
|
||||
connection: Message.ConnectionType
|
||||
signedRecord: bytes
|
||||
def __init__(self, id: _Optional[bytes] = ..., addrs: _Optional[_Iterable[bytes]] = ..., connection: _Optional[_Union[Message.ConnectionType, str]] = ..., signedRecord: _Optional[bytes] = ...) -> None: ...
|
||||
TYPE_FIELD_NUMBER: _ClassVar[int]
|
||||
CLUSTERLEVELRAW_FIELD_NUMBER: _ClassVar[int]
|
||||
KEY_FIELD_NUMBER: _ClassVar[int]
|
||||
RECORD_FIELD_NUMBER: _ClassVar[int]
|
||||
CLOSERPEERS_FIELD_NUMBER: _ClassVar[int]
|
||||
PROVIDERPEERS_FIELD_NUMBER: _ClassVar[int]
|
||||
SENDERRECORD_FIELD_NUMBER: _ClassVar[int]
|
||||
type: Message.MessageType
|
||||
clusterLevelRaw: int
|
||||
key: bytes
|
||||
record: Record
|
||||
closerPeers: _containers.RepeatedCompositeFieldContainer[Message.Peer]
|
||||
providerPeers: _containers.RepeatedCompositeFieldContainer[Message.Peer]
|
||||
senderRecord: bytes
|
||||
def __init__(self, type: _Optional[_Union[Message.MessageType, str]] = ..., clusterLevelRaw: _Optional[int] = ..., key: _Optional[bytes] = ..., record: _Optional[_Union[Record, _Mapping]] = ..., closerPeers: _Optional[_Iterable[_Union[Message.Peer, _Mapping]]] = ..., providerPeers: _Optional[_Iterable[_Union[Message.Peer, _Mapping]]] = ..., senderRecord: _Optional[bytes] = ...) -> None: ... # type: ignore
|
||||
460
libp2p/kad_dht/peer_routing.py
Normal file
460
libp2p/kad_dht/peer_routing.py
Normal file
@ -0,0 +1,460 @@
|
||||
"""
|
||||
Peer routing implementation for Kademlia DHT.
|
||||
|
||||
This module implements the peer routing interface using Kademlia's algorithm
|
||||
to efficiently locate peers in a distributed network.
|
||||
"""
|
||||
|
||||
import logging
|
||||
|
||||
import trio
|
||||
import varint
|
||||
|
||||
from libp2p.abc import (
|
||||
IHost,
|
||||
INetStream,
|
||||
IPeerRouting,
|
||||
)
|
||||
from libp2p.peer.envelope import Envelope
|
||||
from libp2p.peer.id import (
|
||||
ID,
|
||||
)
|
||||
from libp2p.peer.peerinfo import (
|
||||
PeerInfo,
|
||||
)
|
||||
from libp2p.peer.peerstore import env_to_send_in_RPC
|
||||
|
||||
from .common import (
|
||||
ALPHA,
|
||||
PROTOCOL_ID,
|
||||
)
|
||||
from .pb.kademlia_pb2 import (
|
||||
Message,
|
||||
)
|
||||
from .routing_table import (
|
||||
RoutingTable,
|
||||
)
|
||||
from .utils import (
|
||||
maybe_consume_signed_record,
|
||||
sort_peer_ids_by_distance,
|
||||
)
|
||||
|
||||
# logger = logging.getLogger("libp2p.kademlia.peer_routing")
|
||||
logger = logging.getLogger("kademlia-example.peer_routing")
|
||||
|
||||
MAX_PEER_LOOKUP_ROUNDS = 20 # Maximum number of rounds in peer lookup
|
||||
|
||||
|
||||
class PeerRouting(IPeerRouting):
|
||||
"""
|
||||
Implementation of peer routing using the Kademlia algorithm.
|
||||
|
||||
This class provides methods to find peers in the DHT network
|
||||
and helps maintain the routing table.
|
||||
"""
|
||||
|
||||
def __init__(self, host: IHost, routing_table: RoutingTable):
|
||||
"""
|
||||
Initialize the peer routing service.
|
||||
|
||||
:param host: The libp2p host
|
||||
:param routing_table: The Kademlia routing table
|
||||
|
||||
"""
|
||||
self.host = host
|
||||
self.routing_table = routing_table
|
||||
|
||||
async def find_peer(self, peer_id: ID) -> PeerInfo | None:
|
||||
"""
|
||||
Find a peer with the given ID.
|
||||
|
||||
:param peer_id: The ID of the peer to find
|
||||
|
||||
Returns
|
||||
-------
|
||||
Optional[PeerInfo]
|
||||
The peer information if found, None otherwise
|
||||
|
||||
"""
|
||||
# Check if this is actually our peer ID
|
||||
if peer_id == self.host.get_id():
|
||||
try:
|
||||
# Return our own peer info
|
||||
return PeerInfo(peer_id, self.host.get_addrs())
|
||||
except Exception:
|
||||
logger.exception("Error getting our own peer info")
|
||||
return None
|
||||
|
||||
# First check if the peer is in our routing table
|
||||
peer_info = self.routing_table.get_peer_info(peer_id)
|
||||
if peer_info:
|
||||
logger.debug(f"Found peer {peer_id} in routing table")
|
||||
return peer_info
|
||||
|
||||
# Then check if the peer is in our peerstore
|
||||
try:
|
||||
addrs = self.host.get_peerstore().addrs(peer_id)
|
||||
if addrs:
|
||||
logger.debug(f"Found peer {peer_id} in peerstore")
|
||||
return PeerInfo(peer_id, addrs)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# If not found locally, search the network
|
||||
try:
|
||||
closest_peers = await self.find_closest_peers_network(peer_id.to_bytes())
|
||||
logger.info(f"Closest peers found: {closest_peers}")
|
||||
|
||||
# Check if we found the peer we're looking for
|
||||
for found_peer in closest_peers:
|
||||
if found_peer == peer_id:
|
||||
try:
|
||||
addrs = self.host.get_peerstore().addrs(found_peer)
|
||||
if addrs:
|
||||
return PeerInfo(found_peer, addrs)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error searching for peer {peer_id}: {e}")
|
||||
|
||||
# Not found
|
||||
logger.info(f"Peer {peer_id} not found")
|
||||
return None
|
||||
|
||||
async def _query_single_peer_for_closest(
|
||||
self, peer: ID, target_key: bytes, new_peers: list[ID]
|
||||
) -> None:
|
||||
"""
|
||||
Query a single peer for closest peers and append results to the shared list.
|
||||
|
||||
params: peer : ID
|
||||
The peer to query
|
||||
params: target_key : bytes
|
||||
The target key to find closest peers for
|
||||
params: new_peers : list[ID]
|
||||
Shared list to append results to
|
||||
|
||||
"""
|
||||
try:
|
||||
result = await self._query_peer_for_closest(peer, target_key)
|
||||
# Add deduplication to prevent duplicate peers
|
||||
for peer_id in result:
|
||||
if peer_id not in new_peers:
|
||||
new_peers.append(peer_id)
|
||||
logger.debug(
|
||||
"Queried peer %s for closest peers, got %d results (%d unique)",
|
||||
peer,
|
||||
len(result),
|
||||
len([p for p in result if p not in new_peers[: -len(result)]]),
|
||||
)
|
||||
except Exception as e:
|
||||
logger.debug(f"Query to peer {peer} failed: {e}")
|
||||
|
||||
async def find_closest_peers_network(
|
||||
self, target_key: bytes, count: int = 20
|
||||
) -> list[ID]:
|
||||
"""
|
||||
Find the closest peers to a target key in the entire network.
|
||||
|
||||
Performs an iterative lookup by querying peers for their closest peers.
|
||||
|
||||
Returns
|
||||
-------
|
||||
list[ID]
|
||||
Closest peer IDs
|
||||
|
||||
"""
|
||||
# Start with closest peers from our routing table
|
||||
closest_peers = self.routing_table.find_local_closest_peers(target_key, count)
|
||||
logger.debug("Local closest peers: %d found", len(closest_peers))
|
||||
queried_peers: set[ID] = set()
|
||||
rounds = 0
|
||||
|
||||
# Return early if we have no peers to start with
|
||||
if not closest_peers:
|
||||
logger.debug("No local peers available for network lookup")
|
||||
return []
|
||||
|
||||
# Iterative lookup until convergence
|
||||
while rounds < MAX_PEER_LOOKUP_ROUNDS:
|
||||
rounds += 1
|
||||
logger.debug(f"Lookup round {rounds}/{MAX_PEER_LOOKUP_ROUNDS}")
|
||||
|
||||
# Find peers we haven't queried yet
|
||||
peers_to_query = [p for p in closest_peers if p not in queried_peers]
|
||||
if not peers_to_query:
|
||||
logger.debug("No more unqueried peers available, ending lookup")
|
||||
break # No more peers to query
|
||||
|
||||
# Query these peers for their closest peers to target
|
||||
peers_batch = peers_to_query[:ALPHA] # Limit to ALPHA peers at a time
|
||||
|
||||
# Mark these peers as queried before we actually query them
|
||||
for peer in peers_batch:
|
||||
queried_peers.add(peer)
|
||||
|
||||
# Run queries in parallel for this batch using trio nursery
|
||||
new_peers: list[ID] = [] # Shared array to collect all results
|
||||
|
||||
async with trio.open_nursery() as nursery:
|
||||
for peer in peers_batch:
|
||||
nursery.start_soon(
|
||||
self._query_single_peer_for_closest, peer, target_key, new_peers
|
||||
)
|
||||
|
||||
# If we got no new peers, we're done
|
||||
if not new_peers:
|
||||
logger.debug("No new peers discovered in this round, ending lookup")
|
||||
break
|
||||
|
||||
# Update our list of closest peers
|
||||
all_candidates = closest_peers + new_peers
|
||||
old_closest_peers = closest_peers[:]
|
||||
closest_peers = sort_peer_ids_by_distance(target_key, all_candidates)[
|
||||
:count
|
||||
]
|
||||
logger.debug(f"Updated closest peers count: {len(closest_peers)}")
|
||||
|
||||
# Check if we made any progress (found closer peers)
|
||||
if closest_peers == old_closest_peers:
|
||||
logger.debug("No improvement in closest peers, ending lookup")
|
||||
break
|
||||
|
||||
logger.info(
|
||||
f"Network lookup completed after {rounds} rounds, "
|
||||
f"found {len(closest_peers)} peers"
|
||||
)
|
||||
return closest_peers
|
||||
|
||||
async def _query_peer_for_closest(self, peer: ID, target_key: bytes) -> list[ID]:
|
||||
"""
|
||||
Query a peer for their closest peers
|
||||
to the target key using varint length prefix
|
||||
"""
|
||||
stream = None
|
||||
results = []
|
||||
try:
|
||||
# Add the peer to our routing table regardless of query outcome
|
||||
try:
|
||||
addrs = self.host.get_peerstore().addrs(peer)
|
||||
if addrs:
|
||||
peer_info = PeerInfo(peer, addrs)
|
||||
await self.routing_table.add_peer(peer_info)
|
||||
except Exception as e:
|
||||
logger.debug(f"Failed to add peer {peer} to routing table: {e}")
|
||||
|
||||
# Open a stream to the peer using the Kademlia protocol
|
||||
logger.debug(f"Opening stream to {peer} for closest peers query")
|
||||
try:
|
||||
stream = await self.host.new_stream(peer, [PROTOCOL_ID])
|
||||
logger.debug(f"Stream opened to {peer}")
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to open stream to {peer}: {e}")
|
||||
return []
|
||||
|
||||
# Create and send FIND_NODE request using protobuf
|
||||
find_node_msg = Message()
|
||||
find_node_msg.type = Message.MessageType.FIND_NODE
|
||||
find_node_msg.key = target_key # Set target key directly as bytes
|
||||
|
||||
# Create sender_signed_peer_record
|
||||
envelope_bytes, _ = env_to_send_in_RPC(self.host)
|
||||
find_node_msg.senderRecord = envelope_bytes
|
||||
|
||||
# Serialize and send the protobuf message with varint length prefix
|
||||
proto_bytes = find_node_msg.SerializeToString()
|
||||
logger.debug(
|
||||
f"Sending FIND_NODE: {proto_bytes.hex()} (len={len(proto_bytes)})"
|
||||
)
|
||||
await stream.write(varint.encode(len(proto_bytes)))
|
||||
await stream.write(proto_bytes)
|
||||
|
||||
# Read varint-prefixed response length
|
||||
length_bytes = b""
|
||||
while True:
|
||||
b = await stream.read(1)
|
||||
if not b:
|
||||
logger.warning(
|
||||
"Error reading varint length from stream: connection closed"
|
||||
)
|
||||
return []
|
||||
length_bytes += b
|
||||
if b[0] & 0x80 == 0:
|
||||
break
|
||||
response_length = varint.decode_bytes(length_bytes)
|
||||
|
||||
# Read response data
|
||||
response_bytes = b""
|
||||
remaining = response_length
|
||||
while remaining > 0:
|
||||
chunk = await stream.read(remaining)
|
||||
if not chunk:
|
||||
logger.debug(f"Connection closed by peer {peer} while reading data")
|
||||
return []
|
||||
response_bytes += chunk
|
||||
remaining -= len(chunk)
|
||||
|
||||
# Parse the protobuf response
|
||||
response_msg = Message()
|
||||
response_msg.ParseFromString(response_bytes)
|
||||
logger.debug(
|
||||
"Received response from %s with %d peers",
|
||||
peer,
|
||||
len(response_msg.closerPeers),
|
||||
)
|
||||
|
||||
# Process closest peers from response
|
||||
if response_msg.type == Message.MessageType.FIND_NODE:
|
||||
# Consume the sender_signed_peer_record
|
||||
if not maybe_consume_signed_record(response_msg, self.host, peer):
|
||||
logger.error(
|
||||
"Received an invalid-signed-record,ignoring the response"
|
||||
)
|
||||
return []
|
||||
|
||||
for peer_data in response_msg.closerPeers:
|
||||
# Consume the received closer_peers signed-records, peer-id is
|
||||
# sent with the peer-data
|
||||
if not maybe_consume_signed_record(peer_data, self.host):
|
||||
logger.error(
|
||||
"Received an invalid-signed-record,ignoring the response"
|
||||
)
|
||||
return []
|
||||
|
||||
new_peer_id = ID(peer_data.id)
|
||||
if new_peer_id not in results:
|
||||
results.append(new_peer_id)
|
||||
if peer_data.addrs:
|
||||
from multiaddr import (
|
||||
Multiaddr,
|
||||
)
|
||||
|
||||
addrs = [Multiaddr(addr) for addr in peer_data.addrs]
|
||||
self.host.get_peerstore().add_addrs(new_peer_id, addrs, 3600)
|
||||
|
||||
except Exception as e:
|
||||
logger.debug(f"Error querying peer {peer} for closest: {e}")
|
||||
|
||||
finally:
|
||||
if stream:
|
||||
await stream.close()
|
||||
return results
|
||||
|
||||
async def _handle_kad_stream(self, stream: INetStream) -> None:
|
||||
"""
|
||||
Handle incoming Kademlia protocol streams.
|
||||
|
||||
params: stream: The incoming stream
|
||||
|
||||
Returns
|
||||
-------
|
||||
None
|
||||
|
||||
"""
|
||||
try:
|
||||
# Read message length
|
||||
peer_id = stream.muxed_conn.peer_id
|
||||
length_bytes = await stream.read(4)
|
||||
if not length_bytes:
|
||||
return
|
||||
|
||||
message_length = int.from_bytes(length_bytes, byteorder="big")
|
||||
|
||||
# Read message
|
||||
message_bytes = await stream.read(message_length)
|
||||
if not message_bytes:
|
||||
return
|
||||
|
||||
# Parse protobuf message
|
||||
kad_message = Message()
|
||||
closer_peer_envelope: Envelope | None = None
|
||||
try:
|
||||
kad_message.ParseFromString(message_bytes)
|
||||
|
||||
if kad_message.type == Message.MessageType.FIND_NODE:
|
||||
# Consume the sender's signed-peer-record if sent
|
||||
if not maybe_consume_signed_record(kad_message, self.host, peer_id):
|
||||
logger.error(
|
||||
"Received an invalid-signed-record, dropping the stream"
|
||||
)
|
||||
return
|
||||
|
||||
# Get target key directly from protobuf message
|
||||
target_key = kad_message.key
|
||||
|
||||
# Find closest peers to target
|
||||
closest_peers = self.routing_table.find_local_closest_peers(
|
||||
target_key, 20
|
||||
)
|
||||
|
||||
# Create protobuf response
|
||||
response = Message()
|
||||
response.type = Message.MessageType.FIND_NODE
|
||||
|
||||
# Create sender_signed_peer_record for the response
|
||||
envelope_bytes, _ = env_to_send_in_RPC(self.host)
|
||||
response.senderRecord = envelope_bytes
|
||||
|
||||
# Add peer information to response
|
||||
for peer_id in closest_peers:
|
||||
peer_proto = response.closerPeers.add()
|
||||
peer_proto.id = peer_id.to_bytes()
|
||||
peer_proto.connection = Message.ConnectionType.CAN_CONNECT
|
||||
|
||||
# Add the signed-records of closest_peers if cached
|
||||
closer_peer_envelope = (
|
||||
self.host.get_peerstore().get_peer_record(peer_id)
|
||||
)
|
||||
|
||||
if isinstance(closer_peer_envelope, Envelope):
|
||||
peer_proto.signedRecord = (
|
||||
closer_peer_envelope.marshal_envelope()
|
||||
)
|
||||
|
||||
# Add addresses if available
|
||||
try:
|
||||
addrs = self.host.get_peerstore().addrs(peer_id)
|
||||
if addrs:
|
||||
for addr in addrs:
|
||||
peer_proto.addrs.append(addr.to_bytes())
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# Send response
|
||||
response_bytes = response.SerializeToString()
|
||||
await stream.write(len(response_bytes).to_bytes(4, byteorder="big"))
|
||||
await stream.write(response_bytes)
|
||||
|
||||
except Exception as parse_err:
|
||||
logger.error(f"Failed to parse protocol buffer message: {parse_err}")
|
||||
|
||||
except Exception as e:
|
||||
logger.debug(f"Error handling Kademlia stream: {e}")
|
||||
finally:
|
||||
await stream.close()
|
||||
|
||||
async def refresh_routing_table(self) -> None:
|
||||
"""
|
||||
Refresh the routing table by performing lookups for random keys.
|
||||
|
||||
Returns
|
||||
-------
|
||||
None
|
||||
|
||||
"""
|
||||
logger.info("Refreshing routing table")
|
||||
|
||||
# Perform a lookup for ourselves to populate the routing table
|
||||
local_id = self.host.get_id()
|
||||
closest_peers = await self.find_closest_peers_network(local_id.to_bytes())
|
||||
|
||||
# Add discovered peers to routing table
|
||||
for peer_id in closest_peers:
|
||||
try:
|
||||
addrs = self.host.get_peerstore().addrs(peer_id)
|
||||
if addrs:
|
||||
peer_info = PeerInfo(peer_id, addrs)
|
||||
await self.routing_table.add_peer(peer_info)
|
||||
except Exception as e:
|
||||
logger.debug(f"Failed to add discovered peer {peer_id}: {e}")
|
||||
612
libp2p/kad_dht/provider_store.py
Normal file
612
libp2p/kad_dht/provider_store.py
Normal file
@ -0,0 +1,612 @@
|
||||
"""
|
||||
Provider record storage for Kademlia DHT.
|
||||
|
||||
This module implements the storage for content provider records in the Kademlia DHT.
|
||||
"""
|
||||
|
||||
import logging
|
||||
import time
|
||||
from typing import (
|
||||
Any,
|
||||
)
|
||||
|
||||
from multiaddr import (
|
||||
Multiaddr,
|
||||
)
|
||||
import trio
|
||||
import varint
|
||||
|
||||
from libp2p.abc import (
|
||||
IHost,
|
||||
)
|
||||
from libp2p.custom_types import (
|
||||
TProtocol,
|
||||
)
|
||||
from libp2p.kad_dht.utils import maybe_consume_signed_record
|
||||
from libp2p.peer.id import (
|
||||
ID,
|
||||
)
|
||||
from libp2p.peer.peerinfo import (
|
||||
PeerInfo,
|
||||
)
|
||||
from libp2p.peer.peerstore import env_to_send_in_RPC
|
||||
|
||||
from .common import (
|
||||
ALPHA,
|
||||
PROTOCOL_ID,
|
||||
QUERY_TIMEOUT,
|
||||
)
|
||||
from .pb.kademlia_pb2 import (
|
||||
Message,
|
||||
)
|
||||
|
||||
# logger = logging.getLogger("libp2p.kademlia.provider_store")
|
||||
logger = logging.getLogger("kademlia-example.provider_store")
|
||||
|
||||
# Constants for provider records (based on IPFS standards)
|
||||
PROVIDER_RECORD_REPUBLISH_INTERVAL = 22 * 60 * 60 # 22 hours in seconds
|
||||
PROVIDER_RECORD_EXPIRATION_INTERVAL = 48 * 60 * 60 # 48 hours in seconds
|
||||
PROVIDER_ADDRESS_TTL = 30 * 60 # 30 minutes in seconds
|
||||
|
||||
|
||||
class ProviderRecord:
|
||||
"""
|
||||
A record for a content provider in the DHT.
|
||||
|
||||
Contains the peer information and timestamp.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
provider_info: PeerInfo,
|
||||
timestamp: float | None = None,
|
||||
) -> None:
|
||||
"""
|
||||
Initialize a new provider record.
|
||||
|
||||
:param provider_info: The provider's peer information
|
||||
:param timestamp: Time this record was created/updated
|
||||
(defaults to current time)
|
||||
|
||||
"""
|
||||
self.provider_info = provider_info
|
||||
self.timestamp = timestamp or time.time()
|
||||
|
||||
def is_expired(self) -> bool:
|
||||
"""
|
||||
Check if this provider record has expired.
|
||||
|
||||
Returns
|
||||
-------
|
||||
bool
|
||||
True if the record has expired
|
||||
|
||||
"""
|
||||
current_time = time.time()
|
||||
return (current_time - self.timestamp) >= PROVIDER_RECORD_EXPIRATION_INTERVAL
|
||||
|
||||
def should_republish(self) -> bool:
|
||||
"""
|
||||
Check if this provider record should be republished.
|
||||
|
||||
Returns
|
||||
-------
|
||||
bool
|
||||
True if the record should be republished
|
||||
|
||||
"""
|
||||
current_time = time.time()
|
||||
return (current_time - self.timestamp) >= PROVIDER_RECORD_REPUBLISH_INTERVAL
|
||||
|
||||
@property
|
||||
def peer_id(self) -> ID:
|
||||
"""Get the provider's peer ID."""
|
||||
return self.provider_info.peer_id
|
||||
|
||||
@property
|
||||
def addresses(self) -> list[Multiaddr]:
|
||||
"""Get the provider's addresses."""
|
||||
return self.provider_info.addrs
|
||||
|
||||
|
||||
class ProviderStore:
|
||||
"""
|
||||
Store for content provider records in the Kademlia DHT.
|
||||
|
||||
Maps content keys to provider records, with support for expiration.
|
||||
"""
|
||||
|
||||
def __init__(self, host: IHost, peer_routing: Any = None) -> None:
|
||||
"""
|
||||
Initialize a new provider store.
|
||||
|
||||
:param host: The libp2p host instance (optional)
|
||||
:param peer_routing: The peer routing instance (optional)
|
||||
"""
|
||||
# Maps content keys to a dict of provider records (peer_id -> record)
|
||||
self.providers: dict[bytes, dict[str, ProviderRecord]] = {}
|
||||
self.host = host
|
||||
self.peer_routing = peer_routing
|
||||
self.providing_keys: set[bytes] = set()
|
||||
self.local_peer_id = host.get_id()
|
||||
|
||||
async def _republish_provider_records(self) -> None:
|
||||
"""Republish all provider records for content this node is providing."""
|
||||
# First, republish keys we're actively providing
|
||||
for key in self.providing_keys:
|
||||
logger.debug(f"Republishing provider record for key {key.hex()}")
|
||||
await self.provide(key)
|
||||
|
||||
# Also check for any records that should be republished
|
||||
time.time()
|
||||
for key, providers in self.providers.items():
|
||||
for peer_id_str, record in providers.items():
|
||||
# Only republish records for our own peer
|
||||
if self.local_peer_id and str(self.local_peer_id) == peer_id_str:
|
||||
if record.should_republish():
|
||||
logger.debug(
|
||||
f"Republishing old provider record for key {key.hex()}"
|
||||
)
|
||||
await self.provide(key)
|
||||
|
||||
async def provide(self, key: bytes) -> bool:
|
||||
"""
|
||||
Advertise that this node can provide a piece of content.
|
||||
|
||||
Finds the k closest peers to the key and sends them ADD_PROVIDER messages.
|
||||
|
||||
:param key: The content key (multihash) to advertise
|
||||
|
||||
Returns
|
||||
-------
|
||||
bool
|
||||
True if the advertisement was successful
|
||||
|
||||
"""
|
||||
if not self.host or not self.peer_routing:
|
||||
logger.error("Host or peer_routing not initialized, cannot provide content")
|
||||
return False
|
||||
|
||||
# Add to local provider store
|
||||
local_addrs = []
|
||||
for addr in self.host.get_addrs():
|
||||
local_addrs.append(addr)
|
||||
|
||||
local_peer_info = PeerInfo(self.host.get_id(), local_addrs)
|
||||
self.add_provider(key, local_peer_info)
|
||||
|
||||
# Track that we're providing this key
|
||||
self.providing_keys.add(key)
|
||||
|
||||
# Find the k closest peers to the key
|
||||
closest_peers = await self.peer_routing.find_closest_peers_network(key)
|
||||
logger.debug(
|
||||
"Found %d peers close to key %s for provider advertisement",
|
||||
len(closest_peers),
|
||||
key.hex(),
|
||||
)
|
||||
|
||||
# Send ADD_PROVIDER messages to these ALPHA peers in parallel.
|
||||
success_count = 0
|
||||
for i in range(0, len(closest_peers), ALPHA):
|
||||
batch = closest_peers[i : i + ALPHA]
|
||||
results: list[bool] = [False] * len(batch)
|
||||
|
||||
async def send_one(
|
||||
idx: int, peer_id: ID, results: list[bool] = results
|
||||
) -> None:
|
||||
if peer_id == self.local_peer_id:
|
||||
return
|
||||
try:
|
||||
with trio.move_on_after(QUERY_TIMEOUT):
|
||||
success = await self._send_add_provider(peer_id, key)
|
||||
results[idx] = success
|
||||
if not success:
|
||||
logger.warning(f"Failed to send ADD_PROVIDER to {peer_id}")
|
||||
except Exception as e:
|
||||
logger.warning(f"Error sending ADD_PROVIDER to {peer_id}: {e}")
|
||||
|
||||
async with trio.open_nursery() as nursery:
|
||||
for idx, peer_id in enumerate(batch):
|
||||
nursery.start_soon(send_one, idx, peer_id, results)
|
||||
success_count += sum(results)
|
||||
|
||||
logger.info(f"Successfully advertised to {success_count} peers")
|
||||
return success_count > 0
|
||||
|
||||
async def _send_add_provider(self, peer_id: ID, key: bytes) -> bool:
|
||||
"""
|
||||
Send ADD_PROVIDER message to a specific peer.
|
||||
|
||||
:param peer_id: The peer to send the message to
|
||||
:param key: The content key being provided
|
||||
|
||||
Returns
|
||||
-------
|
||||
bool
|
||||
True if the message was successfully sent and acknowledged
|
||||
|
||||
"""
|
||||
try:
|
||||
result = False
|
||||
# Open a stream to the peer
|
||||
stream = await self.host.new_stream(peer_id, [TProtocol(PROTOCOL_ID)])
|
||||
|
||||
# Get our addresses to include in the message
|
||||
addrs = []
|
||||
for addr in self.host.get_addrs():
|
||||
addrs.append(addr.to_bytes())
|
||||
|
||||
# Create the ADD_PROVIDER message
|
||||
message = Message()
|
||||
message.type = Message.MessageType.ADD_PROVIDER
|
||||
message.key = key
|
||||
|
||||
# Create sender's signed-peer-record
|
||||
envelope_bytes, _ = env_to_send_in_RPC(self.host)
|
||||
message.senderRecord = envelope_bytes
|
||||
|
||||
# Add our provider info
|
||||
provider = message.providerPeers.add()
|
||||
provider.id = self.local_peer_id.to_bytes()
|
||||
provider.addrs.extend(addrs)
|
||||
|
||||
# Add the provider's signed-peer-record
|
||||
provider.signedRecord = envelope_bytes
|
||||
|
||||
# Serialize and send the message
|
||||
proto_bytes = message.SerializeToString()
|
||||
await stream.write(varint.encode(len(proto_bytes)))
|
||||
await stream.write(proto_bytes)
|
||||
logger.debug(f"Sent ADD_PROVIDER to {peer_id} for key {key.hex()}")
|
||||
# Read response length prefix
|
||||
length_bytes = b""
|
||||
while True:
|
||||
logger.debug("Reading response length prefix in add provider")
|
||||
b = await stream.read(1)
|
||||
if not b:
|
||||
return False
|
||||
length_bytes += b
|
||||
if b[0] & 0x80 == 0:
|
||||
break
|
||||
|
||||
response_length = varint.decode_bytes(length_bytes)
|
||||
# Read response data
|
||||
response_bytes = b""
|
||||
remaining = response_length
|
||||
while remaining > 0:
|
||||
chunk = await stream.read(remaining)
|
||||
if not chunk:
|
||||
return False
|
||||
response_bytes += chunk
|
||||
remaining -= len(chunk)
|
||||
|
||||
# Parse response
|
||||
response = Message()
|
||||
response.ParseFromString(response_bytes)
|
||||
|
||||
if response.type == Message.MessageType.ADD_PROVIDER:
|
||||
# Consume the sender's signed-peer-record if sent
|
||||
if not maybe_consume_signed_record(response, self.host, peer_id):
|
||||
logger.error(
|
||||
"Received an invalid-signed-record, ignoring the response"
|
||||
)
|
||||
result = False
|
||||
else:
|
||||
result = True
|
||||
|
||||
except Exception as e:
|
||||
logger.warning(f"Error sending ADD_PROVIDER to {peer_id}: {e}")
|
||||
|
||||
finally:
|
||||
await stream.close()
|
||||
return result
|
||||
|
||||
async def find_providers(self, key: bytes, count: int = 20) -> list[PeerInfo]:
|
||||
"""
|
||||
Find content providers for a given key.
|
||||
|
||||
:param key: The content key to look for
|
||||
:param count: Maximum number of providers to return
|
||||
|
||||
Returns
|
||||
-------
|
||||
List[PeerInfo]
|
||||
List of content providers
|
||||
|
||||
"""
|
||||
if not self.host or not self.peer_routing:
|
||||
logger.error("Host or peer_routing not initialized, cannot find providers")
|
||||
return []
|
||||
|
||||
# Check local provider store first
|
||||
local_providers = self.get_providers(key)
|
||||
if local_providers:
|
||||
logger.debug(
|
||||
f"Found {len(local_providers)} providers locally for {key.hex()}"
|
||||
)
|
||||
return local_providers[:count]
|
||||
logger.debug("local providers are %s", local_providers)
|
||||
|
||||
# Find the closest peers to the key
|
||||
closest_peers = await self.peer_routing.find_closest_peers_network(key)
|
||||
logger.debug(
|
||||
f"Searching {len(closest_peers)} peers for providers of {key.hex()}"
|
||||
)
|
||||
|
||||
# Query these peers for providers in batches of ALPHA, in parallel, with timeout
|
||||
all_providers = []
|
||||
for i in range(0, len(closest_peers), ALPHA):
|
||||
batch = closest_peers[i : i + ALPHA]
|
||||
batch_results: list[list[PeerInfo]] = [[] for _ in batch]
|
||||
|
||||
async def get_one(
|
||||
idx: int,
|
||||
peer_id: ID,
|
||||
batch_results: list[list[PeerInfo]] = batch_results,
|
||||
) -> None:
|
||||
if peer_id == self.local_peer_id:
|
||||
return
|
||||
try:
|
||||
with trio.move_on_after(QUERY_TIMEOUT):
|
||||
providers = await self._get_providers_from_peer(peer_id, key)
|
||||
if providers:
|
||||
for provider in providers:
|
||||
self.add_provider(key, provider)
|
||||
batch_results[idx] = providers
|
||||
else:
|
||||
logger.debug(f"No providers found at peer {peer_id}")
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to get providers from {peer_id}: {e}")
|
||||
|
||||
async with trio.open_nursery() as nursery:
|
||||
for idx, peer_id in enumerate(batch):
|
||||
nursery.start_soon(get_one, idx, peer_id, batch_results)
|
||||
|
||||
for providers in batch_results:
|
||||
all_providers.extend(providers)
|
||||
if len(all_providers) >= count:
|
||||
return all_providers[:count]
|
||||
|
||||
return all_providers[:count]
|
||||
|
||||
async def _get_providers_from_peer(self, peer_id: ID, key: bytes) -> list[PeerInfo]:
|
||||
"""
|
||||
Get content providers from a specific peer.
|
||||
|
||||
:param peer_id: The peer to query
|
||||
:param key: The content key to look for
|
||||
|
||||
Returns
|
||||
-------
|
||||
List[PeerInfo]
|
||||
List of provider information
|
||||
|
||||
"""
|
||||
providers: list[PeerInfo] = []
|
||||
try:
|
||||
# Open a stream to the peer
|
||||
stream = await self.host.new_stream(peer_id, [TProtocol(PROTOCOL_ID)])
|
||||
|
||||
try:
|
||||
# Create the GET_PROVIDERS message
|
||||
message = Message()
|
||||
message.type = Message.MessageType.GET_PROVIDERS
|
||||
message.key = key
|
||||
|
||||
# Create sender's signed-peer-record
|
||||
envelope_bytes, _ = env_to_send_in_RPC(self.host)
|
||||
message.senderRecord = envelope_bytes
|
||||
|
||||
# Serialize and send the message
|
||||
proto_bytes = message.SerializeToString()
|
||||
await stream.write(varint.encode(len(proto_bytes)))
|
||||
await stream.write(proto_bytes)
|
||||
|
||||
# Read response length prefix
|
||||
length_bytes = b""
|
||||
while True:
|
||||
b = await stream.read(1)
|
||||
if not b:
|
||||
return []
|
||||
length_bytes += b
|
||||
if b[0] & 0x80 == 0:
|
||||
break
|
||||
|
||||
response_length = varint.decode_bytes(length_bytes)
|
||||
# Read response data
|
||||
response_bytes = b""
|
||||
remaining = response_length
|
||||
while remaining > 0:
|
||||
chunk = await stream.read(remaining)
|
||||
if not chunk:
|
||||
return []
|
||||
response_bytes += chunk
|
||||
remaining -= len(chunk)
|
||||
|
||||
# Parse response
|
||||
response = Message()
|
||||
response.ParseFromString(response_bytes)
|
||||
|
||||
# Check response type
|
||||
if response.type != Message.MessageType.GET_PROVIDERS:
|
||||
return []
|
||||
|
||||
# Consume the sender's signed-peer-record if sent
|
||||
if not maybe_consume_signed_record(response, self.host, peer_id):
|
||||
logger.error(
|
||||
"Received an invalid-signed-record, ignoring the response"
|
||||
)
|
||||
return []
|
||||
|
||||
# Extract provider information
|
||||
providers = []
|
||||
for provider_proto in response.providerPeers:
|
||||
try:
|
||||
# Consume the provider's signed-peer-record if sent, peer-id
|
||||
# already sent with the provider-proto
|
||||
if not maybe_consume_signed_record(provider_proto, self.host):
|
||||
logger.error(
|
||||
"Received an invalid-signed-record, "
|
||||
"ignoring the response"
|
||||
)
|
||||
return []
|
||||
|
||||
# Create peer ID from bytes
|
||||
provider_id = ID(provider_proto.id)
|
||||
|
||||
# Convert addresses to Multiaddr
|
||||
addrs = []
|
||||
for addr_bytes in provider_proto.addrs:
|
||||
try:
|
||||
addrs.append(Multiaddr(addr_bytes))
|
||||
except Exception:
|
||||
pass # Skip invalid addresses
|
||||
|
||||
# Create PeerInfo and add to result
|
||||
providers.append(PeerInfo(provider_id, addrs))
|
||||
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to parse provider info: {e}")
|
||||
|
||||
finally:
|
||||
await stream.close()
|
||||
return providers
|
||||
|
||||
except Exception as e:
|
||||
logger.warning(f"Error getting providers from {peer_id}: {e}")
|
||||
return []
|
||||
|
||||
def add_provider(self, key: bytes, provider: PeerInfo) -> None:
|
||||
"""
|
||||
Add a provider for a given content key.
|
||||
|
||||
:param key: The content key
|
||||
:param provider: The provider's peer information
|
||||
|
||||
Returns
|
||||
-------
|
||||
None
|
||||
|
||||
"""
|
||||
# Initialize providers for this key if needed
|
||||
if key not in self.providers:
|
||||
self.providers[key] = {}
|
||||
|
||||
# Add or update the provider record
|
||||
peer_id_str = str(provider.peer_id) # Use string representation as dict key
|
||||
self.providers[key][peer_id_str] = ProviderRecord(
|
||||
provider_info=provider, timestamp=time.time()
|
||||
)
|
||||
logger.debug(f"Added provider {provider.peer_id} for key {key.hex()}")
|
||||
|
||||
def get_providers(self, key: bytes) -> list[PeerInfo]:
|
||||
"""
|
||||
Get all providers for a given content key.
|
||||
|
||||
:param key: The content key
|
||||
|
||||
Returns
|
||||
-------
|
||||
List[PeerInfo]
|
||||
List of providers for the key
|
||||
|
||||
"""
|
||||
if key not in self.providers:
|
||||
return []
|
||||
|
||||
# Collect valid provider records (not expired)
|
||||
result = []
|
||||
current_time = time.time()
|
||||
expired_peers = []
|
||||
|
||||
for peer_id_str, record in self.providers[key].items():
|
||||
# Check if the record has expired
|
||||
if current_time - record.timestamp > PROVIDER_RECORD_EXPIRATION_INTERVAL:
|
||||
expired_peers.append(peer_id_str)
|
||||
continue
|
||||
|
||||
# Use addresses only if they haven't expired
|
||||
addresses = []
|
||||
if current_time - record.timestamp <= PROVIDER_ADDRESS_TTL:
|
||||
addresses = record.addresses
|
||||
|
||||
# Create PeerInfo and add to results
|
||||
result.append(PeerInfo(record.peer_id, addresses))
|
||||
|
||||
# Clean up expired records
|
||||
for peer_id in expired_peers:
|
||||
del self.providers[key][peer_id]
|
||||
|
||||
# Remove the key if no providers left
|
||||
if not self.providers[key]:
|
||||
del self.providers[key]
|
||||
|
||||
return result
|
||||
|
||||
def cleanup_expired(self) -> None:
|
||||
"""Remove expired provider records."""
|
||||
current_time = time.time()
|
||||
expired_keys = []
|
||||
|
||||
for key, providers in self.providers.items():
|
||||
expired_providers = []
|
||||
|
||||
for peer_id_str, record in providers.items():
|
||||
if (
|
||||
current_time - record.timestamp
|
||||
> PROVIDER_RECORD_EXPIRATION_INTERVAL
|
||||
):
|
||||
expired_providers.append(peer_id_str)
|
||||
logger.debug(
|
||||
f"Removing expired provider {peer_id_str} for key {key.hex()}"
|
||||
)
|
||||
|
||||
# Remove expired providers
|
||||
for peer_id in expired_providers:
|
||||
del providers[peer_id]
|
||||
|
||||
# Track empty keys for removal
|
||||
if not providers:
|
||||
expired_keys.append(key)
|
||||
|
||||
# Remove empty keys
|
||||
for key in expired_keys:
|
||||
del self.providers[key]
|
||||
logger.debug(f"Removed key with no providers: {key.hex()}")
|
||||
|
||||
def get_provided_keys(self, peer_id: ID) -> list[bytes]:
|
||||
"""
|
||||
Get all content keys provided by a specific peer.
|
||||
|
||||
:param peer_id: The peer ID to look for
|
||||
|
||||
Returns
|
||||
-------
|
||||
List[bytes]
|
||||
List of content keys provided by the peer
|
||||
|
||||
"""
|
||||
peer_id_str = str(peer_id)
|
||||
result = []
|
||||
|
||||
for key, providers in self.providers.items():
|
||||
if peer_id_str in providers:
|
||||
result.append(key)
|
||||
|
||||
return result
|
||||
|
||||
def size(self) -> int:
|
||||
"""
|
||||
Get the total number of provider records in the store.
|
||||
|
||||
Returns
|
||||
-------
|
||||
int
|
||||
Total number of provider records across all keys
|
||||
|
||||
"""
|
||||
total = 0
|
||||
for providers in self.providers.values():
|
||||
total += len(providers)
|
||||
return total
|
||||
745
libp2p/kad_dht/routing_table.py
Normal file
745
libp2p/kad_dht/routing_table.py
Normal file
@ -0,0 +1,745 @@
|
||||
"""
|
||||
Kademlia DHT routing table implementation.
|
||||
"""
|
||||
|
||||
from collections import (
|
||||
OrderedDict,
|
||||
)
|
||||
import logging
|
||||
import time
|
||||
|
||||
import multihash
|
||||
import trio
|
||||
|
||||
from libp2p.abc import (
|
||||
IHost,
|
||||
)
|
||||
from libp2p.kad_dht.utils import (
|
||||
xor_distance,
|
||||
)
|
||||
from libp2p.peer.id import (
|
||||
ID,
|
||||
)
|
||||
from libp2p.peer.peerinfo import (
|
||||
PeerInfo,
|
||||
)
|
||||
|
||||
from .common import (
|
||||
PROTOCOL_ID,
|
||||
)
|
||||
from .pb.kademlia_pb2 import (
|
||||
Message,
|
||||
)
|
||||
|
||||
# logger = logging.getLogger("libp2p.kademlia.routing_table")
|
||||
logger = logging.getLogger("kademlia-example.routing_table")
|
||||
|
||||
# Default parameters
|
||||
BUCKET_SIZE = 20 # k in the Kademlia paper
|
||||
MAXIMUM_BUCKETS = 256 # Maximum number of buckets (for 256-bit keys)
|
||||
PEER_REFRESH_INTERVAL = 60 # Interval to refresh peers in seconds
|
||||
STALE_PEER_THRESHOLD = 3600 # Time in seconds after which a peer is considered stale
|
||||
|
||||
|
||||
def peer_id_to_key(peer_id: ID) -> bytes:
|
||||
"""
|
||||
Convert a peer ID to a 256-bit key for routing table operations.
|
||||
This normalizes all peer IDs to exactly 256 bits by hashing them with SHA-256.
|
||||
|
||||
:param peer_id: The peer ID to convert
|
||||
:return: 32-byte (256-bit) key for routing table operations
|
||||
"""
|
||||
return multihash.digest(peer_id.to_bytes(), "sha2-256").digest
|
||||
|
||||
|
||||
def key_to_int(key: bytes) -> int:
|
||||
"""Convert a 256-bit key to an integer for range calculations."""
|
||||
return int.from_bytes(key, byteorder="big")
|
||||
|
||||
|
||||
class KBucket:
|
||||
"""
|
||||
A k-bucket implementation for the Kademlia DHT.
|
||||
|
||||
Each k-bucket stores up to k (BUCKET_SIZE) peers, sorted by least-recently seen.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
host: IHost,
|
||||
bucket_size: int = BUCKET_SIZE,
|
||||
min_range: int = 0,
|
||||
max_range: int = 2**256,
|
||||
):
|
||||
"""
|
||||
Initialize a new k-bucket.
|
||||
|
||||
:param host: The host this bucket belongs to
|
||||
:param bucket_size: Maximum number of peers to store in the bucket
|
||||
:param min_range: Lower boundary of the bucket's key range (inclusive)
|
||||
:param max_range: Upper boundary of the bucket's key range (exclusive)
|
||||
|
||||
"""
|
||||
self.bucket_size = bucket_size
|
||||
self.host = host
|
||||
self.min_range = min_range
|
||||
self.max_range = max_range
|
||||
# Store PeerInfo objects along with last-seen timestamp
|
||||
self.peers: OrderedDict[ID, tuple[PeerInfo, float]] = OrderedDict()
|
||||
|
||||
def peer_ids(self) -> list[ID]:
|
||||
"""Get all peer IDs in the bucket."""
|
||||
return list(self.peers.keys())
|
||||
|
||||
def peer_infos(self) -> list[PeerInfo]:
|
||||
"""Get all PeerInfo objects in the bucket."""
|
||||
return [info for info, _ in self.peers.values()]
|
||||
|
||||
def get_oldest_peer(self) -> ID | None:
|
||||
"""Get the least-recently seen peer."""
|
||||
if not self.peers:
|
||||
return None
|
||||
return next(iter(self.peers.keys()))
|
||||
|
||||
async def add_peer(self, peer_info: PeerInfo) -> bool:
|
||||
"""
|
||||
Add a peer to the bucket. Returns True if the peer was added or updated,
|
||||
False if the bucket is full.
|
||||
"""
|
||||
current_time = time.time()
|
||||
peer_id = peer_info.peer_id
|
||||
|
||||
# If peer is already in the bucket, move it to the end (most recently seen)
|
||||
if peer_id in self.peers:
|
||||
self.refresh_peer_last_seen(peer_id)
|
||||
return True
|
||||
|
||||
# If bucket has space, add the peer
|
||||
if len(self.peers) < self.bucket_size:
|
||||
self.peers[peer_id] = (peer_info, current_time)
|
||||
return True
|
||||
|
||||
# If bucket is full, we need to replace the least-recently seen peer
|
||||
# Get the least-recently seen peer
|
||||
oldest_peer_id = self.get_oldest_peer()
|
||||
if oldest_peer_id is None:
|
||||
logger.warning("No oldest peer found when bucket is full")
|
||||
return False
|
||||
|
||||
# Check if the old peer is responsive to ping request
|
||||
try:
|
||||
# Try to ping the oldest peer, not the new peer
|
||||
response = await self._ping_peer(oldest_peer_id)
|
||||
if response:
|
||||
# If the old peer is still alive, we will not add the new peer
|
||||
logger.debug(
|
||||
"Old peer %s is still alive, cannot add new peer %s",
|
||||
oldest_peer_id,
|
||||
peer_id,
|
||||
)
|
||||
return False
|
||||
except Exception as e:
|
||||
# If the old peer is unresponsive, we can replace it with the new peer
|
||||
logger.debug(
|
||||
"Old peer %s is unresponsive, replacing with new peer %s: %s",
|
||||
oldest_peer_id,
|
||||
peer_id,
|
||||
str(e),
|
||||
)
|
||||
self.peers.popitem(last=False) # Remove oldest peer
|
||||
self.peers[peer_id] = (peer_info, current_time)
|
||||
return True
|
||||
|
||||
# If we got here, the oldest peer responded but we couldn't add the new peer
|
||||
return False
|
||||
|
||||
def remove_peer(self, peer_id: ID) -> bool:
|
||||
"""
|
||||
Remove a peer from the bucket.
|
||||
Returns True if the peer was in the bucket, False otherwise.
|
||||
"""
|
||||
if peer_id in self.peers:
|
||||
del self.peers[peer_id]
|
||||
return True
|
||||
return False
|
||||
|
||||
def has_peer(self, peer_id: ID) -> bool:
|
||||
"""Check if the peer is in the bucket."""
|
||||
return peer_id in self.peers
|
||||
|
||||
def get_peer_info(self, peer_id: ID) -> PeerInfo | None:
|
||||
"""Get the PeerInfo for a given peer ID if it exists in the bucket."""
|
||||
if peer_id in self.peers:
|
||||
return self.peers[peer_id][0]
|
||||
return None
|
||||
|
||||
def size(self) -> int:
|
||||
"""Get the number of peers in the bucket."""
|
||||
return len(self.peers)
|
||||
|
||||
def get_stale_peers(self, stale_threshold_seconds: int = 3600) -> list[ID]:
|
||||
"""
|
||||
Get peers that haven't been pinged recently.
|
||||
|
||||
params: stale_threshold_seconds: Time in seconds
|
||||
params: after which a peer is considered stale
|
||||
|
||||
Returns
|
||||
-------
|
||||
list[ID]
|
||||
List of peer IDs that need to be refreshed
|
||||
|
||||
"""
|
||||
current_time = time.time()
|
||||
stale_peers = []
|
||||
|
||||
for peer_id, (_, last_seen) in self.peers.items():
|
||||
if current_time - last_seen > stale_threshold_seconds:
|
||||
stale_peers.append(peer_id)
|
||||
|
||||
return stale_peers
|
||||
|
||||
async def _periodic_peer_refresh(self) -> None:
|
||||
"""Background task to periodically refresh peers"""
|
||||
try:
|
||||
while True:
|
||||
await trio.sleep(PEER_REFRESH_INTERVAL) # Check every minute
|
||||
|
||||
# Find stale peers (not pinged in last hour)
|
||||
stale_peers = self.get_stale_peers(
|
||||
stale_threshold_seconds=STALE_PEER_THRESHOLD
|
||||
)
|
||||
if stale_peers:
|
||||
logger.debug(f"Found {len(stale_peers)} stale peers to refresh")
|
||||
|
||||
for peer_id in stale_peers:
|
||||
try:
|
||||
# Try to ping the peer
|
||||
logger.debug("Pinging stale peer %s", peer_id)
|
||||
responce = await self._ping_peer(peer_id)
|
||||
if responce:
|
||||
# Update the last seen time
|
||||
self.refresh_peer_last_seen(peer_id)
|
||||
logger.debug(f"Refreshed peer {peer_id}")
|
||||
else:
|
||||
# If ping fails, remove the peer
|
||||
logger.debug(f"Failed to ping peer {peer_id}")
|
||||
self.remove_peer(peer_id)
|
||||
logger.info(f"Removed unresponsive peer {peer_id}")
|
||||
|
||||
logger.debug(f"Successfully refreshed peer {peer_id}")
|
||||
except Exception as e:
|
||||
# If ping fails, remove the peer
|
||||
logger.debug(
|
||||
"Failed to ping peer %s: %s",
|
||||
peer_id,
|
||||
e,
|
||||
)
|
||||
self.remove_peer(peer_id)
|
||||
logger.info(f"Removed unresponsive peer {peer_id}")
|
||||
except trio.Cancelled:
|
||||
logger.debug("Peer refresh task cancelled")
|
||||
except Exception as e:
|
||||
logger.error(f"Error in peer refresh task: {e}", exc_info=True)
|
||||
|
||||
async def _ping_peer(self, peer_id: ID) -> bool:
|
||||
"""
|
||||
Ping a peer using protobuf message to check
|
||||
if it's still alive and update last seen time.
|
||||
|
||||
params: peer_id: The ID of the peer to ping
|
||||
|
||||
Returns
|
||||
-------
|
||||
bool
|
||||
True if ping successful, False otherwise
|
||||
|
||||
"""
|
||||
result = False
|
||||
# Get peer info directly from the bucket
|
||||
peer_info = self.get_peer_info(peer_id)
|
||||
if not peer_info:
|
||||
raise ValueError(f"Peer {peer_id} not in bucket")
|
||||
|
||||
try:
|
||||
# Open a stream to the peer with the DHT protocol
|
||||
stream = await self.host.new_stream(peer_id, [PROTOCOL_ID])
|
||||
|
||||
try:
|
||||
# Create ping protobuf message
|
||||
ping_msg = Message()
|
||||
ping_msg.type = Message.PING # Use correct enum
|
||||
|
||||
# Serialize and send with length prefix (4 bytes big-endian)
|
||||
msg_bytes = ping_msg.SerializeToString()
|
||||
logger.debug(
|
||||
f"Sending PING message to {peer_id}, size: {len(msg_bytes)} bytes"
|
||||
)
|
||||
await stream.write(len(msg_bytes).to_bytes(4, byteorder="big"))
|
||||
await stream.write(msg_bytes)
|
||||
|
||||
# Wait for response with timeout
|
||||
with trio.move_on_after(2): # 2 second timeout
|
||||
# Read response length (4 bytes)
|
||||
length_bytes = await stream.read(4)
|
||||
if not length_bytes or len(length_bytes) < 4:
|
||||
logger.warning(f"Peer {peer_id} disconnected during ping")
|
||||
return False
|
||||
|
||||
msg_len = int.from_bytes(length_bytes, byteorder="big")
|
||||
if (
|
||||
msg_len <= 0 or msg_len > 1024 * 1024
|
||||
): # Sanity check on message size
|
||||
logger.warning(
|
||||
f"Invalid message length from {peer_id}: {msg_len}"
|
||||
)
|
||||
return False
|
||||
|
||||
logger.debug(
|
||||
f"Receiving response from {peer_id}, size: {msg_len} bytes"
|
||||
)
|
||||
|
||||
# Read full message
|
||||
response_bytes = await stream.read(msg_len)
|
||||
if not response_bytes:
|
||||
logger.warning(f"Failed to read response from {peer_id}")
|
||||
return False
|
||||
|
||||
# Parse protobuf response
|
||||
response = Message()
|
||||
try:
|
||||
response.ParseFromString(response_bytes)
|
||||
except Exception as e:
|
||||
logger.warning(
|
||||
f"Failed to parse protobuf response from {peer_id}: {e}"
|
||||
)
|
||||
return False
|
||||
|
||||
if response.type == Message.PING:
|
||||
# Update the last seen timestamp for this peer
|
||||
logger.debug(f"Successfully pinged peer {peer_id}")
|
||||
result = True
|
||||
return result
|
||||
|
||||
else:
|
||||
logger.warning(
|
||||
f"Unexpected response type from {peer_id}: {response.type}"
|
||||
)
|
||||
return False
|
||||
|
||||
# If we get here, the ping timed out
|
||||
logger.warning(f"Ping to peer {peer_id} timed out")
|
||||
return False
|
||||
|
||||
finally:
|
||||
await stream.close()
|
||||
return result
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error pinging peer {peer_id}: {str(e)}")
|
||||
return False
|
||||
|
||||
def refresh_peer_last_seen(self, peer_id: ID) -> bool:
|
||||
"""
|
||||
Update the last-seen timestamp for a peer in the bucket.
|
||||
|
||||
params: peer_id: The ID of the peer to refresh
|
||||
|
||||
Returns
|
||||
-------
|
||||
bool
|
||||
True if the peer was found and refreshed, False otherwise
|
||||
|
||||
"""
|
||||
if peer_id in self.peers:
|
||||
# Get current peer info and update the timestamp
|
||||
peer_info, _ = self.peers[peer_id]
|
||||
current_time = time.time()
|
||||
self.peers[peer_id] = (peer_info, current_time)
|
||||
# Move to end of ordered dict to mark as most recently seen
|
||||
self.peers.move_to_end(peer_id)
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def key_in_range(self, key: bytes) -> bool:
|
||||
"""
|
||||
Check if a key is in the range of this bucket.
|
||||
|
||||
params: key: The key to check (bytes)
|
||||
|
||||
Returns
|
||||
-------
|
||||
bool
|
||||
True if the key is in range, False otherwise
|
||||
|
||||
"""
|
||||
key_int = key_to_int(key)
|
||||
return self.min_range <= key_int < self.max_range
|
||||
|
||||
def peer_id_in_range(self, peer_id: ID) -> bool:
|
||||
"""
|
||||
Check if a peer ID is in the range of this bucket.
|
||||
|
||||
params: peer_id: The peer ID to check
|
||||
|
||||
Returns
|
||||
-------
|
||||
bool
|
||||
True if the peer ID is in range, False otherwise
|
||||
|
||||
"""
|
||||
key = peer_id_to_key(peer_id)
|
||||
return self.key_in_range(key)
|
||||
|
||||
def split(self) -> tuple["KBucket", "KBucket"]:
|
||||
"""
|
||||
Split the bucket into two buckets.
|
||||
|
||||
Returns
|
||||
-------
|
||||
tuple
|
||||
(lower_bucket, upper_bucket)
|
||||
|
||||
"""
|
||||
midpoint = (self.min_range + self.max_range) // 2
|
||||
lower_bucket = KBucket(self.host, self.bucket_size, self.min_range, midpoint)
|
||||
upper_bucket = KBucket(self.host, self.bucket_size, midpoint, self.max_range)
|
||||
|
||||
# Redistribute peers
|
||||
for peer_id, (peer_info, timestamp) in self.peers.items():
|
||||
peer_key = peer_id_to_key(peer_id)
|
||||
peer_key_int = key_to_int(peer_key)
|
||||
if peer_key_int < midpoint:
|
||||
lower_bucket.peers[peer_id] = (peer_info, timestamp)
|
||||
else:
|
||||
upper_bucket.peers[peer_id] = (peer_info, timestamp)
|
||||
|
||||
return lower_bucket, upper_bucket
|
||||
|
||||
|
||||
class RoutingTable:
|
||||
"""
|
||||
The Kademlia routing table maintains information on which peers to contact for any
|
||||
given peer ID in the network.
|
||||
"""
|
||||
|
||||
def __init__(self, local_id: ID, host: IHost) -> None:
|
||||
"""
|
||||
Initialize the routing table.
|
||||
|
||||
:param local_id: The ID of the local node.
|
||||
:param host: The host this routing table belongs to.
|
||||
|
||||
"""
|
||||
self.local_id = local_id
|
||||
self.host = host
|
||||
self.buckets = [KBucket(host, BUCKET_SIZE)]
|
||||
|
||||
async def add_peer(self, peer_obj: PeerInfo | ID) -> bool:
|
||||
"""
|
||||
Add a peer to the routing table.
|
||||
|
||||
:param peer_obj: Either PeerInfo object or peer ID to add
|
||||
|
||||
Returns
|
||||
-------
|
||||
bool: True if the peer was added or updated, False otherwise
|
||||
|
||||
"""
|
||||
peer_id = None
|
||||
peer_info = None
|
||||
|
||||
try:
|
||||
# Handle different types of input
|
||||
if isinstance(peer_obj, PeerInfo):
|
||||
# Already have PeerInfo object
|
||||
peer_info = peer_obj
|
||||
peer_id = peer_obj.peer_id
|
||||
else:
|
||||
# Assume it's a peer ID
|
||||
peer_id = peer_obj
|
||||
# Try to get addresses from the peerstore if available
|
||||
try:
|
||||
addrs = self.host.get_peerstore().addrs(peer_id)
|
||||
if addrs:
|
||||
# Create PeerInfo object
|
||||
peer_info = PeerInfo(peer_id, addrs)
|
||||
else:
|
||||
logger.debug(
|
||||
"No addresses found for peer %s in peerstore, skipping",
|
||||
peer_id,
|
||||
)
|
||||
return False
|
||||
except Exception as peerstore_error:
|
||||
# Handle case where peer is not in peerstore yet
|
||||
logger.debug(
|
||||
"Peer %s not found in peerstore: %s, skipping",
|
||||
peer_id,
|
||||
str(peerstore_error),
|
||||
)
|
||||
return False
|
||||
|
||||
# Don't add ourselves
|
||||
if peer_id == self.local_id:
|
||||
return False
|
||||
|
||||
# Find the right bucket for this peer
|
||||
bucket = self.find_bucket(peer_id)
|
||||
|
||||
# Try to add to the bucket
|
||||
success = await bucket.add_peer(peer_info)
|
||||
if success:
|
||||
logger.debug(f"Successfully added peer {peer_id} to routing table")
|
||||
return True
|
||||
|
||||
# If bucket is full and couldn't add peer, try splitting the bucket
|
||||
# Only split if the bucket contains our Peer ID
|
||||
if self._should_split_bucket(bucket):
|
||||
logger.debug(
|
||||
f"Bucket is full, attempting to split bucket for peer {peer_id}"
|
||||
)
|
||||
split_success = self._split_bucket(bucket)
|
||||
if split_success:
|
||||
# After splitting,
|
||||
# find the appropriate bucket for the peer and try to add it
|
||||
target_bucket = self.find_bucket(peer_info.peer_id)
|
||||
success = await target_bucket.add_peer(peer_info)
|
||||
if success:
|
||||
logger.debug(
|
||||
f"Successfully added peer {peer_id} after bucket split"
|
||||
)
|
||||
return True
|
||||
else:
|
||||
logger.debug(
|
||||
f"Failed to add peer {peer_id} even after bucket split"
|
||||
)
|
||||
return False
|
||||
else:
|
||||
logger.debug(f"Failed to split bucket for peer {peer_id}")
|
||||
return False
|
||||
else:
|
||||
logger.debug(
|
||||
f"Bucket is full and cannot be split, peer {peer_id} not added"
|
||||
)
|
||||
return False
|
||||
|
||||
except Exception as e:
|
||||
logger.debug(f"Error adding peer {peer_obj} to routing table: {e}")
|
||||
return False
|
||||
|
||||
def remove_peer(self, peer_id: ID) -> bool:
|
||||
"""
|
||||
Remove a peer from the routing table.
|
||||
|
||||
:param peer_id: The ID of the peer to remove
|
||||
|
||||
Returns
|
||||
-------
|
||||
bool: True if the peer was removed, False otherwise
|
||||
|
||||
"""
|
||||
bucket = self.find_bucket(peer_id)
|
||||
return bucket.remove_peer(peer_id)
|
||||
|
||||
def find_bucket(self, peer_id: ID) -> KBucket:
|
||||
"""
|
||||
Find the bucket that would contain the given peer ID.
|
||||
|
||||
:param peer_id: The peer ID to find a bucket for
|
||||
|
||||
Returns
|
||||
-------
|
||||
KBucket: The bucket for this peer
|
||||
|
||||
"""
|
||||
for bucket in self.buckets:
|
||||
if bucket.peer_id_in_range(peer_id):
|
||||
return bucket
|
||||
|
||||
return self.buckets[0]
|
||||
|
||||
def find_local_closest_peers(self, key: bytes, count: int = 20) -> list[ID]:
|
||||
"""
|
||||
Find the closest peers to a given key.
|
||||
|
||||
:param key: The key to find closest peers to (bytes)
|
||||
:param count: Maximum number of peers to return
|
||||
|
||||
Returns
|
||||
-------
|
||||
List[ID]: List of peer IDs closest to the key
|
||||
|
||||
"""
|
||||
# Get all peers from all buckets
|
||||
all_peers = []
|
||||
for bucket in self.buckets:
|
||||
all_peers.extend(bucket.peer_ids())
|
||||
|
||||
# Sort by XOR distance to the key
|
||||
def distance_to_key(peer_id: ID) -> int:
|
||||
peer_key = peer_id_to_key(peer_id)
|
||||
return xor_distance(peer_key, key)
|
||||
|
||||
all_peers.sort(key=distance_to_key)
|
||||
|
||||
return all_peers[:count]
|
||||
|
||||
def get_peer_ids(self) -> list[ID]:
|
||||
"""
|
||||
Get all peer IDs in the routing table.
|
||||
|
||||
Returns
|
||||
-------
|
||||
:param List[ID]: List of all peer IDs
|
||||
|
||||
"""
|
||||
peers = []
|
||||
for bucket in self.buckets:
|
||||
peers.extend(bucket.peer_ids())
|
||||
return peers
|
||||
|
||||
def get_peer_info(self, peer_id: ID) -> PeerInfo | None:
|
||||
"""
|
||||
Get the peer info for a specific peer.
|
||||
|
||||
:param peer_id: The ID of the peer to get info for
|
||||
|
||||
Returns
|
||||
-------
|
||||
PeerInfo: The peer info, or None if not found
|
||||
|
||||
"""
|
||||
bucket = self.find_bucket(peer_id)
|
||||
return bucket.get_peer_info(peer_id)
|
||||
|
||||
def peer_in_table(self, peer_id: ID) -> bool:
|
||||
"""
|
||||
Check if a peer is in the routing table.
|
||||
|
||||
:param peer_id: The ID of the peer to check
|
||||
|
||||
Returns
|
||||
-------
|
||||
bool: True if the peer is in the routing table, False otherwise
|
||||
|
||||
"""
|
||||
bucket = self.find_bucket(peer_id)
|
||||
return bucket.has_peer(peer_id)
|
||||
|
||||
def size(self) -> int:
|
||||
"""
|
||||
Get the number of peers in the routing table.
|
||||
|
||||
Returns
|
||||
-------
|
||||
int: Number of peers
|
||||
|
||||
"""
|
||||
count = 0
|
||||
for bucket in self.buckets:
|
||||
count += bucket.size()
|
||||
return count
|
||||
|
||||
def get_stale_peers(self, stale_threshold_seconds: int = 3600) -> list[ID]:
|
||||
"""
|
||||
Get all stale peers from all buckets
|
||||
|
||||
params: stale_threshold_seconds:
|
||||
Time in seconds after which a peer is considered stale
|
||||
|
||||
Returns
|
||||
-------
|
||||
list[ID]
|
||||
List of stale peer IDs
|
||||
|
||||
"""
|
||||
stale_peers = []
|
||||
for bucket in self.buckets:
|
||||
stale_peers.extend(bucket.get_stale_peers(stale_threshold_seconds))
|
||||
return stale_peers
|
||||
|
||||
def get_peer_infos(self) -> list[PeerInfo]:
|
||||
"""
|
||||
Get all PeerInfo objects in the routing table.
|
||||
|
||||
Returns
|
||||
-------
|
||||
List[PeerInfo]: List of all PeerInfo objects
|
||||
|
||||
"""
|
||||
peer_infos = []
|
||||
for bucket in self.buckets:
|
||||
peer_infos.extend(bucket.peer_infos())
|
||||
return peer_infos
|
||||
|
||||
def cleanup_routing_table(self) -> None:
|
||||
"""
|
||||
Cleanup the routing table by removing all data.
|
||||
This is useful for resetting the routing table during tests or reinitialization.
|
||||
"""
|
||||
self.buckets = [KBucket(self.host, BUCKET_SIZE)]
|
||||
logger.info("Routing table cleaned up, all data removed.")
|
||||
|
||||
def _should_split_bucket(self, bucket: KBucket) -> bool:
|
||||
"""
|
||||
Check if a bucket should be split according to Kademlia rules.
|
||||
|
||||
:param bucket: The bucket to check
|
||||
:return: True if the bucket should be split
|
||||
"""
|
||||
# Check if we've exceeded maximum buckets
|
||||
if len(self.buckets) >= MAXIMUM_BUCKETS:
|
||||
logger.debug("Maximum number of buckets reached, cannot split")
|
||||
return False
|
||||
|
||||
# Check if the bucket contains our local ID
|
||||
local_key = peer_id_to_key(self.local_id)
|
||||
local_key_int = key_to_int(local_key)
|
||||
contains_local_id = bucket.min_range <= local_key_int < bucket.max_range
|
||||
|
||||
logger.debug(
|
||||
f"Bucket range: {bucket.min_range} - {bucket.max_range}, "
|
||||
f"local_key_int: {local_key_int}, contains_local: {contains_local_id}"
|
||||
)
|
||||
|
||||
return contains_local_id
|
||||
|
||||
def _split_bucket(self, bucket: KBucket) -> bool:
|
||||
"""
|
||||
Split a bucket into two buckets.
|
||||
|
||||
:param bucket: The bucket to split
|
||||
:return: True if the bucket was successfully split
|
||||
"""
|
||||
try:
|
||||
# Find the bucket index
|
||||
bucket_index = self.buckets.index(bucket)
|
||||
logger.debug(f"Splitting bucket at index {bucket_index}")
|
||||
|
||||
# Split the bucket
|
||||
lower_bucket, upper_bucket = bucket.split()
|
||||
|
||||
# Replace the original bucket with the two new buckets
|
||||
self.buckets[bucket_index] = lower_bucket
|
||||
self.buckets.insert(bucket_index + 1, upper_bucket)
|
||||
|
||||
logger.debug(
|
||||
f"Bucket split successful. New bucket count: {len(self.buckets)}"
|
||||
)
|
||||
logger.debug(
|
||||
f"Lower bucket range: "
|
||||
f"{lower_bucket.min_range} - {lower_bucket.max_range}, "
|
||||
f"peers: {lower_bucket.size()}"
|
||||
)
|
||||
logger.debug(
|
||||
f"Upper bucket range: "
|
||||
f"{upper_bucket.min_range} - {upper_bucket.max_range}, "
|
||||
f"peers: {upper_bucket.size()}"
|
||||
)
|
||||
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error splitting bucket: {e}")
|
||||
return False
|
||||
197
libp2p/kad_dht/utils.py
Normal file
197
libp2p/kad_dht/utils.py
Normal file
@ -0,0 +1,197 @@
|
||||
"""
|
||||
Utility functions for Kademlia DHT implementation.
|
||||
"""
|
||||
|
||||
import logging
|
||||
|
||||
import base58
|
||||
import multihash
|
||||
|
||||
from libp2p.abc import IHost
|
||||
from libp2p.peer.envelope import consume_envelope
|
||||
from libp2p.peer.id import (
|
||||
ID,
|
||||
)
|
||||
|
||||
from .pb.kademlia_pb2 import (
|
||||
Message,
|
||||
)
|
||||
|
||||
logger = logging.getLogger("kademlia-example.utils")
|
||||
|
||||
|
||||
def maybe_consume_signed_record(
|
||||
msg: Message | Message.Peer, host: IHost, peer_id: ID | None = None
|
||||
) -> bool:
|
||||
"""
|
||||
Attempt to parse and store a signed-peer-record (Envelope) received during
|
||||
DHT communication. If the record is invalid, the peer-id does not match, or
|
||||
updating the peerstore fails, the function logs an error and returns False.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
msg : Message | Message.Peer
|
||||
The protobuf message received during DHT communication. Can either be a
|
||||
top-level `Message` containing `senderRecord` or a `Message.Peer`
|
||||
containing `signedRecord`.
|
||||
host : IHost
|
||||
The local host instance, providing access to the peerstore for storing
|
||||
verified peer records.
|
||||
peer_id : ID | None, optional
|
||||
The expected peer ID for record validation. If provided, the peer ID
|
||||
inside the record must match this value.
|
||||
|
||||
Returns
|
||||
-------
|
||||
bool
|
||||
True if a valid signed peer record was successfully consumed and stored,
|
||||
False otherwise.
|
||||
|
||||
"""
|
||||
if isinstance(msg, Message):
|
||||
if msg.HasField("senderRecord"):
|
||||
try:
|
||||
# Convert the signed-peer-record(Envelope) from
|
||||
# protobuf bytes
|
||||
envelope, record = consume_envelope(
|
||||
msg.senderRecord,
|
||||
"libp2p-peer-record",
|
||||
)
|
||||
if not (isinstance(peer_id, ID) and record.peer_id == peer_id):
|
||||
return False
|
||||
# Use the default TTL of 2 hours (7200 seconds)
|
||||
if not host.get_peerstore().consume_peer_record(envelope, 7200):
|
||||
logger.error("Failed to update the Certified-Addr-Book")
|
||||
return False
|
||||
except Exception as e:
|
||||
logger.error("Failed to update the Certified-Addr-Book: %s", e)
|
||||
return False
|
||||
else:
|
||||
if msg.HasField("signedRecord"):
|
||||
try:
|
||||
# Convert the signed-peer-record(Envelope) from
|
||||
# protobuf bytes
|
||||
envelope, record = consume_envelope(
|
||||
msg.signedRecord,
|
||||
"libp2p-peer-record",
|
||||
)
|
||||
if not record.peer_id.to_bytes() == msg.id:
|
||||
return False
|
||||
# Use the default TTL of 2 hours (7200 seconds)
|
||||
if not host.get_peerstore().consume_peer_record(envelope, 7200):
|
||||
logger.error("Failed to update the Certified-Addr-Book")
|
||||
return False
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Failed to update the Certified-Addr-Book: %s",
|
||||
e,
|
||||
)
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def create_key_from_binary(binary_data: bytes) -> bytes:
|
||||
"""
|
||||
Creates a key for the DHT by hashing binary data with SHA-256.
|
||||
|
||||
params: binary_data: The binary data to hash.
|
||||
|
||||
Returns
|
||||
-------
|
||||
bytes: The resulting key.
|
||||
|
||||
"""
|
||||
return multihash.digest(binary_data, "sha2-256").digest
|
||||
|
||||
|
||||
def xor_distance(key1: bytes, key2: bytes) -> int:
|
||||
"""
|
||||
Calculate the XOR distance between two keys.
|
||||
|
||||
params: key1: First key (bytes)
|
||||
params: key2: Second key (bytes)
|
||||
|
||||
Returns
|
||||
-------
|
||||
int: The XOR distance between the keys
|
||||
|
||||
"""
|
||||
# Ensure the inputs are bytes
|
||||
if not isinstance(key1, bytes) or not isinstance(key2, bytes):
|
||||
raise TypeError("Both key1 and key2 must be bytes objects")
|
||||
|
||||
# Convert to integers
|
||||
k1 = int.from_bytes(key1, byteorder="big")
|
||||
k2 = int.from_bytes(key2, byteorder="big")
|
||||
|
||||
# Calculate XOR distance
|
||||
return k1 ^ k2
|
||||
|
||||
|
||||
def bytes_to_base58(data: bytes) -> str:
|
||||
"""
|
||||
Convert bytes to base58 encoded string.
|
||||
|
||||
params: data: Input bytes
|
||||
|
||||
Returns
|
||||
-------
|
||||
str: Base58 encoded string
|
||||
|
||||
"""
|
||||
return base58.b58encode(data).decode("utf-8")
|
||||
|
||||
|
||||
def sort_peer_ids_by_distance(target_key: bytes, peer_ids: list[ID]) -> list[ID]:
|
||||
"""
|
||||
Sort a list of peer IDs by their distance to the target key.
|
||||
|
||||
params: target_key: The target key to measure distance from
|
||||
params: peer_ids: List of peer IDs to sort
|
||||
|
||||
Returns
|
||||
-------
|
||||
List[ID]: Sorted list of peer IDs from closest to furthest
|
||||
|
||||
"""
|
||||
|
||||
def get_distance(peer_id: ID) -> int:
|
||||
# Hash the peer ID bytes to get a key for distance calculation
|
||||
peer_hash = multihash.digest(peer_id.to_bytes(), "sha2-256").digest
|
||||
return xor_distance(target_key, peer_hash)
|
||||
|
||||
return sorted(peer_ids, key=get_distance)
|
||||
|
||||
|
||||
def shared_prefix_len(first: bytes, second: bytes) -> int:
|
||||
"""
|
||||
Calculate the number of prefix bits shared by two byte sequences.
|
||||
|
||||
params: first: First byte sequence
|
||||
params: second: Second byte sequence
|
||||
|
||||
Returns
|
||||
-------
|
||||
int: Number of shared prefix bits
|
||||
|
||||
"""
|
||||
# Compare each byte to find the first bit difference
|
||||
common_length = 0
|
||||
for i in range(min(len(first), len(second))):
|
||||
byte_first = first[i]
|
||||
byte_second = second[i]
|
||||
|
||||
if byte_first == byte_second:
|
||||
common_length += 8
|
||||
else:
|
||||
# Find specific bit where they differ
|
||||
xor = byte_first ^ byte_second
|
||||
# Count leading zeros in the xor result
|
||||
for j in range(7, -1, -1):
|
||||
if (xor >> j) & 1 == 1:
|
||||
return common_length + (7 - j)
|
||||
|
||||
# This shouldn't be reached if xor != 0
|
||||
return common_length + 8
|
||||
|
||||
return common_length
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user