mirror of
https://github.com/varun-r-mallya/py-libp2p.git
synced 2026-02-12 16:10:57 +00:00
Compare commits
10 Commits
chore01
...
278d6206ab
| Author | SHA1 | Date | |
|---|---|---|---|
| 278d6206ab | |||
| 82cda6818c | |||
| 81259f7912 | |||
| f3a3a10251 | |||
| 356192d793 | |||
| 58b33ba2e8 | |||
| 00ba846f7b | |||
| 007527ef75 | |||
| 98438916ad | |||
| 966cef58de |
42
.github/workflows/tox.yml
vendored
42
.github/workflows/tox.yml
vendored
@ -36,48 +36,10 @@ jobs:
|
||||
- uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ matrix.python }}
|
||||
|
||||
- name: Install Nim for interop testing
|
||||
if: matrix.toxenv == 'interop'
|
||||
run: |
|
||||
echo "Installing Nim for nim-libp2p interop testing..."
|
||||
curl -sSf https://nim-lang.org/choosenim/init.sh | sh -s -- -y --firstInstall
|
||||
echo "$HOME/.nimble/bin" >> $GITHUB_PATH
|
||||
echo "$HOME/.choosenim/toolchains/nim-stable/bin" >> $GITHUB_PATH
|
||||
|
||||
- name: Cache nimble packages
|
||||
if: matrix.toxenv == 'interop'
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
~/.nimble
|
||||
~/.choosenim/toolchains/*/lib
|
||||
key: ${{ runner.os }}-nimble-${{ hashFiles('**/nim_echo_server.nim') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-nimble-
|
||||
|
||||
- name: Build nim interop binaries
|
||||
if: matrix.toxenv == 'interop'
|
||||
run: |
|
||||
export PATH="$HOME/.nimble/bin:$HOME/.choosenim/toolchains/nim-stable/bin:$PATH"
|
||||
cd tests/interop/nim_libp2p
|
||||
./scripts/setup_nim_echo.sh
|
||||
|
||||
- run: |
|
||||
python -m pip install --upgrade pip
|
||||
python -m pip install tox
|
||||
|
||||
- name: Run Tests or Generate Docs
|
||||
run: |
|
||||
if [[ "${{ matrix.toxenv }}" == 'docs' ]]; then
|
||||
export TOXENV=docs
|
||||
else
|
||||
export TOXENV=py${{ matrix.python }}-${{ matrix.toxenv }}
|
||||
fi
|
||||
# Set PATH for nim commands during tox
|
||||
if [[ "${{ matrix.toxenv }}" == 'interop' ]]; then
|
||||
export PATH="$HOME/.nimble/bin:$HOME/.choosenim/toolchains/nim-stable/bin:$PATH"
|
||||
fi
|
||||
- run: |
|
||||
python -m tox run -r
|
||||
|
||||
windows:
|
||||
@ -103,5 +65,5 @@ jobs:
|
||||
if [[ "${{ matrix.toxenv }}" == "wheel" ]]; then
|
||||
python -m tox run -e windows-wheel
|
||||
else
|
||||
python -m tox run -e py${{ matrix.python-version }}-${{ matrix.toxenv }}
|
||||
python -m tox run -e py311-${{ matrix.toxenv }}
|
||||
fi
|
||||
|
||||
7
.gitignore
vendored
7
.gitignore
vendored
@ -178,10 +178,3 @@ env.bak/
|
||||
#lockfiles
|
||||
uv.lock
|
||||
poetry.lock
|
||||
tests/interop/js_libp2p/js_node/node_modules/
|
||||
tests/interop/js_libp2p/js_node/package-lock.json
|
||||
tests/interop/js_libp2p/js_node/src/node_modules/
|
||||
tests/interop/js_libp2p/js_node/src/package-lock.json
|
||||
|
||||
# Sphinx documentation build
|
||||
_build/
|
||||
|
||||
12
README.md
12
README.md
@ -61,12 +61,12 @@ ______________________________________________________________________
|
||||
|
||||
### Discovery
|
||||
|
||||
| **Discovery** | **Status** | **Source** |
|
||||
| -------------------- | :--------: | :----------------------------------------------------------------------------------: |
|
||||
| **`bootstrap`** | ✅ | [source](https://github.com/libp2p/py-libp2p/tree/main/libp2p/discovery/bootstrap) |
|
||||
| **`random-walk`** | ✅ | [source](https://github.com/libp2p/py-libp2p/tree/main/libp2p/discovery/random_walk) |
|
||||
| **`mdns-discovery`** | ✅ | [source](https://github.com/libp2p/py-libp2p/tree/main/libp2p/discovery/mdns) |
|
||||
| **`rendezvous`** | 🌱 | |
|
||||
| **Discovery** | **Status** | **Source** |
|
||||
| -------------------- | :--------: | :--------------------------------------------------------------------------------: |
|
||||
| **`bootstrap`** | ✅ | [source](https://github.com/libp2p/py-libp2p/tree/main/libp2p/discovery/bootstrap) |
|
||||
| **`random-walk`** | 🌱 | |
|
||||
| **`mdns-discovery`** | ✅ | [source](https://github.com/libp2p/py-libp2p/tree/main/libp2p/discovery/mdns) |
|
||||
| **`rendezvous`** | 🌱 | |
|
||||
|
||||
______________________________________________________________________
|
||||
|
||||
|
||||
@ -36,14 +36,12 @@ Create a file named ``relay_node.py`` with the following content:
|
||||
from libp2p.relay.circuit_v2.transport import CircuitV2Transport
|
||||
from libp2p.relay.circuit_v2.config import RelayConfig
|
||||
from libp2p.tools.async_service import background_trio_service
|
||||
from libp2p.utils import get_wildcard_address
|
||||
|
||||
logging.basicConfig(level=logging.DEBUG)
|
||||
logger = logging.getLogger("relay_node")
|
||||
|
||||
async def run_relay():
|
||||
# Use wildcard address to listen on all interfaces
|
||||
listen_addr = get_wildcard_address(9000)
|
||||
listen_addr = multiaddr.Multiaddr("/ip4/0.0.0.0/tcp/9000")
|
||||
host = new_host()
|
||||
|
||||
config = RelayConfig(
|
||||
@ -109,7 +107,6 @@ Create a file named ``destination_node.py`` with the following content:
|
||||
from libp2p.relay.circuit_v2.config import RelayConfig
|
||||
from libp2p.peer.peerinfo import info_from_p2p_addr
|
||||
from libp2p.tools.async_service import background_trio_service
|
||||
from libp2p.utils import get_wildcard_address
|
||||
|
||||
logging.basicConfig(level=logging.DEBUG)
|
||||
logger = logging.getLogger("destination_node")
|
||||
@ -142,8 +139,7 @@ Create a file named ``destination_node.py`` with the following content:
|
||||
Run a simple destination node that accepts connections.
|
||||
This is a simplified version that doesn't use the relay functionality.
|
||||
"""
|
||||
# Create a libp2p host - use wildcard address to listen on all interfaces
|
||||
listen_addr = get_wildcard_address(9001)
|
||||
listen_addr = multiaddr.Multiaddr(f"/ip4/0.0.0.0/tcp/9001")
|
||||
host = new_host()
|
||||
|
||||
# Configure as a relay receiver (stop)
|
||||
@ -256,15 +252,14 @@ Create a file named ``source_node.py`` with the following content:
|
||||
from libp2p.peer.peerinfo import info_from_p2p_addr
|
||||
from libp2p.tools.async_service import background_trio_service
|
||||
from libp2p.relay.circuit_v2.discovery import RelayInfo
|
||||
from libp2p.utils import get_wildcard_address
|
||||
|
||||
# Configure logging
|
||||
logging.basicConfig(level=logging.DEBUG)
|
||||
logger = logging.getLogger("source_node")
|
||||
|
||||
async def run_source(relay_peer_id=None, destination_peer_id=None):
|
||||
# Create a libp2p host - use wildcard address to listen on all interfaces
|
||||
listen_addr = get_wildcard_address(9002)
|
||||
# Create a libp2p host
|
||||
listen_addr = multiaddr.Multiaddr("/ip4/0.0.0.0/tcp/9002")
|
||||
host = new_host()
|
||||
|
||||
# Configure as a relay client
|
||||
@ -433,7 +428,7 @@ Running the Example
|
||||
Relay node multiaddr: /ip4/127.0.0.1/tcp/9000/p2p/QmaUigQJ9nJERa6GaZuyfaiX91QjYwoQJ46JS3k7ys7SLx
|
||||
==================================================
|
||||
|
||||
Listening on: [<Multiaddr /ip4/127.0.0.1/tcp/9000/p2p/QmaUigQJ9nJERa6GaZuyfaiX91QjYwoQJ46JS3k7ys7SLx>]
|
||||
Listening on: [<Multiaddr /ip4/0.0.0.0/tcp/9000/p2p/QmaUigQJ9nJERa6GaZuyfaiX91QjYwoQJ46JS3k7ys7SLx>]
|
||||
Protocol service started
|
||||
Relay service started successfully
|
||||
Relay limits: RelayLimits(duration=3600, data=10485760, max_circuit_conns=8, max_reservations=4)
|
||||
@ -452,7 +447,7 @@ Running the Example
|
||||
Use this ID in the source node: QmPBr38KeQG2ibyL4fxq6yJWpfoVNCqJMHBdNyn1Qe4h5s
|
||||
==================================================
|
||||
|
||||
Listening on: [<Multiaddr /ip4/127.0.0.1/tcp/9001/p2p/QmPBr38KeQG2ibyL4fxq6yJWpfoVNCqJMHBdNyn1Qe4h5s>]
|
||||
Listening on: [<Multiaddr /ip4/0.0.0.0/tcp/9001/p2p/QmPBr38KeQG2ibyL4fxq6yJWpfoVNCqJMHBdNyn1Qe4h5s>]
|
||||
Registered echo protocol handler
|
||||
Protocol service started
|
||||
Transport created
|
||||
@ -474,7 +469,7 @@ Running the Example
|
||||
|
||||
$ python source_node.py
|
||||
Source node started with ID: QmPyM56cgmFoHTgvMgGfDWRdVRQznmxCDDDg2dJ8ygVXj3
|
||||
Listening on: [<Multiaddr /ip4/127.0.0.1/tcp/9002/p2p/QmPyM56cgmFoHTgvMgGfDWRdVRQznmxCDDDg2dJ8ygVXj3>]
|
||||
Listening on: [<Multiaddr /ip4/0.0.0.0/tcp/9002/p2p/QmPyM56cgmFoHTgvMgGfDWRdVRQznmxCDDDg2dJ8ygVXj3>]
|
||||
Protocol service started
|
||||
No relay peer ID provided. Please enter the relay\'s peer ID:
|
||||
Enter relay peer ID: QmaUigQJ9nJERa6GaZuyfaiX91QjYwoQJ46JS3k7ys7SLx
|
||||
|
||||
@ -1,43 +0,0 @@
|
||||
QUIC Echo Demo
|
||||
==============
|
||||
|
||||
This example demonstrates a simple ``echo`` protocol using **QUIC transport**.
|
||||
|
||||
QUIC provides built-in TLS security and stream multiplexing over UDP, making it an excellent transport choice for libp2p applications.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ python -m pip install libp2p
|
||||
Collecting libp2p
|
||||
...
|
||||
Successfully installed libp2p-x.x.x
|
||||
$ echo-quic-demo
|
||||
Run this from the same folder in another console:
|
||||
|
||||
echo-quic-demo -d /ip4/127.0.0.1/udp/8000/quic-v1/p2p/16Uiu2HAmAsbxRR1HiGJRNVPQLNMeNsBCsXT3rDjoYBQzgzNpM5mJ
|
||||
|
||||
Waiting for incoming connection...
|
||||
|
||||
Copy the line that starts with ``echo-quic-demo -p 8001``, open a new terminal in the same
|
||||
folder and paste it in:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ echo-quic-demo -d /ip4/127.0.0.1/udp/8000/quic-v1/p2p/16Uiu2HAmE3N7KauPTmHddYPsbMcBp2C6XAmprELX3YcFEN9iXiBu
|
||||
|
||||
I am 16Uiu2HAmE3N7KauPTmHddYPsbMcBp2C6XAmprELX3YcFEN9iXiBu
|
||||
STARTING CLIENT CONNECTION PROCESS
|
||||
CLIENT CONNECTED TO SERVER
|
||||
Sent: hi, there!
|
||||
Got: ECHO: hi, there!
|
||||
|
||||
**Key differences from TCP Echo:**
|
||||
|
||||
- Uses UDP instead of TCP: ``/udp/8000`` instead of ``/tcp/8000``
|
||||
- Includes QUIC protocol identifier: ``/quic-v1`` in the multiaddr
|
||||
- Built-in TLS security (no separate security transport needed)
|
||||
- Native stream multiplexing over a single QUIC connection
|
||||
|
||||
.. literalinclude:: ../examples/echo/echo_quic.py
|
||||
:language: python
|
||||
:linenos:
|
||||
@ -12,7 +12,7 @@ This example demonstrates how to use the libp2p ``identify`` protocol.
|
||||
$ identify-demo
|
||||
First host listening. Run this from another console:
|
||||
|
||||
identify-demo -p 8889 -d /ip4/127.0.0.1/tcp/8888/p2p/QmUiN4R3fNrCoQugGgmmb3v35neMEjKFNrsbNGVDsRHWpM
|
||||
identify-demo -p 8889 -d /ip4/0.0.0.0/tcp/8888/p2p/QmUiN4R3fNrCoQugGgmmb3v35neMEjKFNrsbNGVDsRHWpM
|
||||
|
||||
Waiting for incoming identify request...
|
||||
|
||||
@ -21,13 +21,13 @@ folder and paste it in:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ identify-demo -p 8889 -d /ip4/127.0.0.1/tcp/8888/p2p/QmUiN4R3fNrCoQugGgmmb3v35neMEjKFNrsbNGVDsRHWpM
|
||||
dialer (host_b) listening on /ip4/127.0.0.1/tcp/8889
|
||||
$ identify-demo -p 8889 -d /ip4/0.0.0.0/tcp/8888/p2p/QmUiN4R3fNrCoQugGgmmb3v35neMEjKFNrsbNGVDsRHWpM
|
||||
dialer (host_b) listening on /ip4/0.0.0.0/tcp/8889
|
||||
Second host connecting to peer: QmUiN4R3fNrCoQugGgmmb3v35neMEjKFNrsbNGVDsRHWpM
|
||||
Starting identify protocol...
|
||||
Identify response:
|
||||
Public Key (Base64): CAASpgIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDC6c/oNPP9X13NDQ3Xrlp3zOj+ErXIWb/A4JGwWchiDBwMhMslEX3ct8CqI0BqUYKuwdFjowqqopOJ3cS2MlqtGaiP6Dg9bvGqSDoD37BpNaRVNcebRxtB0nam9SQy3PYLbHAmz0vR4ToSiL9OLRORnGOxCtHBuR8ZZ5vS0JEni8eQMpNa7IuXwyStnuty/QjugOZudBNgYSr8+9gH722KTjput5IRL7BrpIdd4HNXGVRm4b9BjNowvHu404x3a/ifeNblpy/FbYyFJEW0looygKF7hpRHhRbRKIDZt2BqOfT1sFkbqsHE85oY859+VMzP61YELgvGwai2r7KcjkW/AgMBAAE=
|
||||
Listen Addresses: ['/ip4/127.0.0.1/tcp/8888/p2p/QmUiN4R3fNrCoQugGgmmb3v35neMEjKFNrsbNGVDsRHWpM']
|
||||
Listen Addresses: ['/ip4/0.0.0.0/tcp/8888/p2p/QmUiN4R3fNrCoQugGgmmb3v35neMEjKFNrsbNGVDsRHWpM']
|
||||
Protocols: ['/ipfs/id/1.0.0', '/ipfs/ping/1.0.0']
|
||||
Observed Address: ['/ip4/127.0.0.1/tcp/38082']
|
||||
Protocol Version: ipfs/0.1.0
|
||||
|
||||
@ -34,11 +34,11 @@ There is also a more interactive version of the example which runs as separate l
|
||||
==== Starting Identify-Push Listener on port 8888 ====
|
||||
|
||||
Listener host ready!
|
||||
Listening on: /ip4/127.0.0.1/tcp/8888/p2p/QmUiN4R3fNrCoQugGgmmb3v35neMEjKFNrsbNGVDsRHWpM
|
||||
Listening on: /ip4/0.0.0.0/tcp/8888/p2p/QmUiN4R3fNrCoQugGgmmb3v35neMEjKFNrsbNGVDsRHWpM
|
||||
Peer ID: QmUiN4R3fNrCoQugGgmmb3v35neMEjKFNrsbNGVDsRHWpM
|
||||
|
||||
Run dialer with command:
|
||||
identify-push-listener-dialer-demo -d /ip4/127.0.0.1/tcp/8888/p2p/QmUiN4R3fNrCoQugGgmmb3v35neMEjKFNrsbNGVDsRHWpM
|
||||
identify-push-listener-dialer-demo -d /ip4/0.0.0.0/tcp/8888/p2p/QmUiN4R3fNrCoQugGgmmb3v35neMEjKFNrsbNGVDsRHWpM
|
||||
|
||||
Waiting for incoming connections... (Ctrl+C to exit)
|
||||
|
||||
@ -47,12 +47,12 @@ folder and paste it in:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ identify-push-listener-dialer-demo -d /ip4/127.0.0.1/tcp/8888/p2p/QmUiN4R3fNrCoQugGgmmb3v35neMEjKFNrsbNGVDsRHWpM
|
||||
$ identify-push-listener-dialer-demo -d /ip4/0.0.0.0/tcp/8888/p2p/QmUiN4R3fNrCoQugGgmmb3v35neMEjKFNrsbNGVDsRHWpM
|
||||
|
||||
==== Starting Identify-Push Dialer on port 8889 ====
|
||||
|
||||
Dialer host ready!
|
||||
Listening on: /ip4/127.0.0.1/tcp/8889/p2p/QmZyXwVuTaBcDeRsSkJpOpWrSt
|
||||
Listening on: /ip4/0.0.0.0/tcp/8889/p2p/QmZyXwVuTaBcDeRsSkJpOpWrSt
|
||||
|
||||
Connecting to peer: QmUiN4R3fNrCoQugGgmmb3v35neMEjKFNrsbNGVDsRHWpM
|
||||
Successfully connected to listener!
|
||||
|
||||
@ -1,194 +0,0 @@
|
||||
Multiple Connections Per Peer
|
||||
=============================
|
||||
|
||||
This example demonstrates how to use the multiple connections per peer feature in py-libp2p.
|
||||
|
||||
Overview
|
||||
--------
|
||||
|
||||
The multiple connections per peer feature allows a libp2p node to maintain multiple network connections to the same peer. This provides several benefits:
|
||||
|
||||
- **Improved reliability**: If one connection fails, others remain available
|
||||
- **Better performance**: Load can be distributed across multiple connections
|
||||
- **Enhanced throughput**: Multiple streams can be created in parallel
|
||||
- **Fault tolerance**: Redundant connections provide backup paths
|
||||
|
||||
Configuration
|
||||
-------------
|
||||
|
||||
The feature is configured through the `ConnectionConfig` class:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from libp2p.network.swarm import ConnectionConfig
|
||||
|
||||
# Default configuration
|
||||
config = ConnectionConfig()
|
||||
print(f"Max connections per peer: {config.max_connections_per_peer}")
|
||||
print(f"Load balancing strategy: {config.load_balancing_strategy}")
|
||||
|
||||
# Custom configuration
|
||||
custom_config = ConnectionConfig(
|
||||
max_connections_per_peer=5,
|
||||
connection_timeout=60.0,
|
||||
load_balancing_strategy="least_loaded"
|
||||
)
|
||||
|
||||
Load Balancing Strategies
|
||||
-------------------------
|
||||
|
||||
Two load balancing strategies are available:
|
||||
|
||||
**Round Robin** (default)
|
||||
Cycles through connections in order, distributing load evenly.
|
||||
|
||||
**Least Loaded**
|
||||
Selects the connection with the fewest active streams.
|
||||
|
||||
API Usage
|
||||
---------
|
||||
|
||||
The new API provides direct access to multiple connections:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from libp2p import new_swarm
|
||||
|
||||
# Create swarm with multiple connections support
|
||||
swarm = new_swarm()
|
||||
|
||||
# Dial a peer - returns list of connections
|
||||
connections = await swarm.dial_peer(peer_id)
|
||||
print(f"Established {len(connections)} connections")
|
||||
|
||||
# Get all connections to a peer
|
||||
peer_connections = swarm.get_connections(peer_id)
|
||||
|
||||
# Get all connections (across all peers)
|
||||
all_connections = swarm.get_connections()
|
||||
|
||||
# Get the complete connections map
|
||||
connections_map = swarm.get_connections_map()
|
||||
|
||||
# Backward compatibility - get single connection
|
||||
single_conn = swarm.get_connection(peer_id)
|
||||
|
||||
Backward Compatibility
|
||||
----------------------
|
||||
|
||||
Existing code continues to work through backward compatibility features:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# Legacy 1:1 mapping (returns first connection for each peer)
|
||||
legacy_connections = swarm.connections_legacy
|
||||
|
||||
# Single connection access (returns first available connection)
|
||||
conn = swarm.get_connection(peer_id)
|
||||
|
||||
Example
|
||||
-------
|
||||
|
||||
A complete working example is available in the `examples/doc-examples/multiple_connections_example.py` file.
|
||||
|
||||
Production Configuration
|
||||
-------------------------
|
||||
|
||||
For production use, consider these settings:
|
||||
|
||||
**RetryConfig Parameters**
|
||||
|
||||
The `RetryConfig` class controls connection retry behavior with exponential backoff:
|
||||
|
||||
- **max_retries**: Maximum number of retry attempts before giving up (default: 3)
|
||||
- **initial_delay**: Initial delay in seconds before the first retry (default: 0.1s)
|
||||
- **max_delay**: Maximum delay cap to prevent excessive wait times (default: 30.0s)
|
||||
- **backoff_multiplier**: Exponential backoff multiplier - each retry multiplies delay by this factor (default: 2.0)
|
||||
- **jitter_factor**: Random jitter (0.0-1.0) to prevent synchronized retries (default: 0.1)
|
||||
|
||||
**ConnectionConfig Parameters**
|
||||
|
||||
The `ConnectionConfig` class manages multi-connection behavior:
|
||||
|
||||
- **max_connections_per_peer**: Maximum connections allowed to a single peer (default: 3)
|
||||
- **connection_timeout**: Timeout for establishing new connections in seconds (default: 30.0s)
|
||||
- **load_balancing_strategy**: Strategy for distributing streams ("round_robin" or "least_loaded")
|
||||
|
||||
**Load Balancing Strategies Explained**
|
||||
|
||||
- **round_robin**: Cycles through connections in order, distributing load evenly. Simple and predictable.
|
||||
- **least_loaded**: Selects the connection with the fewest active streams. Better for performance but more complex.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from libp2p.network.swarm import ConnectionConfig, RetryConfig
|
||||
|
||||
# Production-ready configuration
|
||||
retry_config = RetryConfig(
|
||||
max_retries=3, # Maximum retry attempts before giving up
|
||||
initial_delay=0.1, # Start with 100ms delay
|
||||
max_delay=30.0, # Cap exponential backoff at 30 seconds
|
||||
backoff_multiplier=2.0, # Double delay each retry (100ms -> 200ms -> 400ms)
|
||||
jitter_factor=0.1 # Add 10% random jitter to prevent thundering herd
|
||||
)
|
||||
|
||||
connection_config = ConnectionConfig(
|
||||
max_connections_per_peer=3, # Allow up to 3 connections per peer
|
||||
connection_timeout=30.0, # 30 second timeout for new connections
|
||||
load_balancing_strategy="round_robin" # Simple, predictable load distribution
|
||||
)
|
||||
|
||||
swarm = new_swarm(
|
||||
retry_config=retry_config,
|
||||
connection_config=connection_config
|
||||
)
|
||||
|
||||
**How RetryConfig Works in Practice**
|
||||
|
||||
With the configuration above, connection retries follow this pattern:
|
||||
|
||||
1. **Attempt 1**: Immediate connection attempt
|
||||
2. **Attempt 2**: Wait 100ms ± 10ms jitter, then retry
|
||||
3. **Attempt 3**: Wait 200ms ± 20ms jitter, then retry
|
||||
4. **Attempt 4**: Wait 400ms ± 40ms jitter, then retry
|
||||
5. **Attempt 5**: Wait 800ms ± 80ms jitter, then retry
|
||||
6. **Attempt 6**: Wait 1.6s ± 160ms jitter, then retry
|
||||
7. **Attempt 7**: Wait 3.2s ± 320ms jitter, then retry
|
||||
8. **Attempt 8**: Wait 6.4s ± 640ms jitter, then retry
|
||||
9. **Attempt 9**: Wait 12.8s ± 1.28s jitter, then retry
|
||||
10. **Attempt 10**: Wait 25.6s ± 2.56s jitter, then retry
|
||||
11. **Attempt 11**: Wait 30.0s (capped) ± 3.0s jitter, then retry
|
||||
12. **Attempt 12**: Wait 30.0s (capped) ± 3.0s jitter, then retry
|
||||
13. **Give up**: After 12 retries (3 initial + 9 retries), connection fails
|
||||
|
||||
The jitter prevents multiple clients from retrying simultaneously, reducing server load.
|
||||
|
||||
**Parameter Tuning Guidelines**
|
||||
|
||||
**For Development/Testing:**
|
||||
- Use lower `max_retries` (1-2) and shorter delays for faster feedback
|
||||
- Example: `RetryConfig(max_retries=2, initial_delay=0.01, max_delay=0.1)`
|
||||
|
||||
**For Production:**
|
||||
- Use moderate `max_retries` (3-5) with reasonable delays for reliability
|
||||
- Example: `RetryConfig(max_retries=5, initial_delay=0.1, max_delay=60.0)`
|
||||
|
||||
**For High-Latency Networks:**
|
||||
- Use higher `max_retries` (5-10) with longer delays
|
||||
- Example: `RetryConfig(max_retries=8, initial_delay=0.5, max_delay=120.0)`
|
||||
|
||||
**For Load Balancing:**
|
||||
- Use `round_robin` for simple, predictable behavior
|
||||
- Use `least_loaded` when you need optimal performance and can handle complexity
|
||||
|
||||
Architecture
|
||||
------------
|
||||
|
||||
The implementation follows the same architectural patterns as the Go and JavaScript reference implementations:
|
||||
|
||||
- **Core data structure**: `dict[ID, list[INetConn]]` for 1:many mapping
|
||||
- **API consistency**: Methods like `get_connections()` match reference implementations
|
||||
- **Load balancing**: Integrated at the API level for optimal performance
|
||||
- **Backward compatibility**: Maintains existing interfaces for gradual migration
|
||||
|
||||
This design ensures consistency across libp2p implementations while providing the benefits of multiple connections per peer.
|
||||
@ -15,7 +15,7 @@ This example demonstrates how to create a chat application using libp2p's PubSub
|
||||
2025-04-06 23:59:17,471 - pubsub-demo - INFO - Your selected topic is: pubsub-chat
|
||||
2025-04-06 23:59:17,472 - pubsub-demo - INFO - Using random available port: 33269
|
||||
2025-04-06 23:59:17,490 - pubsub-demo - INFO - Node started with peer ID: QmcJnocH1d1tz3Zp4MotVDjNfNFawXHw2dpB9tMYGTXJp7
|
||||
2025-04-06 23:59:17,490 - pubsub-demo - INFO - Listening on: /ip4/127.0.0.1/tcp/33269
|
||||
2025-04-06 23:59:17,490 - pubsub-demo - INFO - Listening on: /ip4/0.0.0.0/tcp/33269
|
||||
2025-04-06 23:59:17,490 - pubsub-demo - INFO - Initializing PubSub and GossipSub...
|
||||
2025-04-06 23:59:17,491 - pubsub-demo - INFO - Pubsub and GossipSub services started.
|
||||
2025-04-06 23:59:17,491 - pubsub-demo - INFO - Pubsub ready.
|
||||
@ -35,7 +35,7 @@ Copy the line that starts with ``pubsub-demo -d``, open a new terminal and paste
|
||||
2025-04-07 00:00:59,846 - pubsub-demo - INFO - Your selected topic is: pubsub-chat
|
||||
2025-04-07 00:00:59,846 - pubsub-demo - INFO - Using random available port: 51977
|
||||
2025-04-07 00:00:59,864 - pubsub-demo - INFO - Node started with peer ID: QmYQKCm95Ut1aXsjHmWVYqdaVbno1eKTYC8KbEVjqUaKaQ
|
||||
2025-04-07 00:00:59,864 - pubsub-demo - INFO - Listening on: /ip4/127.0.0.1/tcp/51977
|
||||
2025-04-07 00:00:59,864 - pubsub-demo - INFO - Listening on: /ip4/0.0.0.0/tcp/51977
|
||||
2025-04-07 00:00:59,864 - pubsub-demo - INFO - Initializing PubSub and GossipSub...
|
||||
2025-04-07 00:00:59,864 - pubsub-demo - INFO - Pubsub and GossipSub services started.
|
||||
2025-04-07 00:00:59,865 - pubsub-demo - INFO - Pubsub ready.
|
||||
|
||||
@ -1,131 +0,0 @@
|
||||
Random Walk Example
|
||||
===================
|
||||
|
||||
This example demonstrates the Random Walk module's peer discovery capabilities using real libp2p hosts and Kademlia DHT.
|
||||
It shows how the Random Walk module automatically discovers new peers and maintains routing table health.
|
||||
|
||||
The Random Walk implementation performs the following key operations:
|
||||
|
||||
* **Automatic Peer Discovery**: Generates random peer IDs and queries the DHT network to discover new peers
|
||||
* **Routing Table Maintenance**: Periodically refreshes the routing table to maintain network connectivity
|
||||
* **Connection Management**: Maintains optimal connections to healthy peers in the network
|
||||
* **Real-time Statistics**: Displays routing table size, connected peers, and peerstore statistics
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ python -m pip install libp2p
|
||||
Collecting libp2p
|
||||
...
|
||||
Successfully installed libp2p-x.x.x
|
||||
$ cd examples/random_walk
|
||||
$ python random_walk.py --mode server
|
||||
2025-08-12 19:51:25,424 - random-walk-example - INFO - === Random Walk Example for py-libp2p ===
|
||||
2025-08-12 19:51:25,424 - random-walk-example - INFO - Mode: server, Port: 0 Demo interval: 30s
|
||||
2025-08-12 19:51:25,426 - random-walk-example - INFO - Starting server node on port 45123
|
||||
2025-08-12 19:51:25,426 - random-walk-example - INFO - Node peer ID: 16Uiu2HAm7EsNv5vvjPAehGAVfChjYjD63ZHyWogQRdzntSbAg9ef
|
||||
2025-08-12 19:51:25,426 - random-walk-example - INFO - Node address: /ip4/127.0.0.1/tcp/45123/p2p/16Uiu2HAm7EsNv5vvjPAehGAVfChjYjD63ZHyWogQRdzntSbAg9ef
|
||||
2025-08-12 19:51:25,427 - random-walk-example - INFO - Initial routing table size: 0
|
||||
2025-08-12 19:51:25,427 - random-walk-example - INFO - DHT service started in SERVER mode
|
||||
2025-08-12 19:51:25,430 - libp2p.discovery.random_walk.rt_refresh_manager - INFO - RT Refresh Manager started
|
||||
2025-08-12 19:51:55,432 - random-walk-example - INFO - --- Iteration 1 ---
|
||||
2025-08-12 19:51:55,432 - random-walk-example - INFO - Routing table size: 15
|
||||
2025-08-12 19:51:55,432 - random-walk-example - INFO - Connected peers: 8
|
||||
2025-08-12 19:51:55,432 - random-walk-example - INFO - Peerstore size: 42
|
||||
|
||||
You can also run the example in client mode:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ python random_walk.py --mode client
|
||||
2025-08-12 19:52:15,424 - random-walk-example - INFO - === Random Walk Example for py-libp2p ===
|
||||
2025-08-12 19:52:15,424 - random-walk-example - INFO - Mode: client, Port: 0 Demo interval: 30s
|
||||
2025-08-12 19:52:15,426 - random-walk-example - INFO - Starting client node on port 51234
|
||||
2025-08-12 19:52:15,426 - random-walk-example - INFO - Node peer ID: 16Uiu2HAmAbc123xyz...
|
||||
2025-08-12 19:52:15,427 - random-walk-example - INFO - DHT service started in CLIENT mode
|
||||
2025-08-12 19:52:45,432 - random-walk-example - INFO - --- Iteration 1 ---
|
||||
2025-08-12 19:52:45,432 - random-walk-example - INFO - Routing table size: 8
|
||||
2025-08-12 19:52:45,432 - random-walk-example - INFO - Connected peers: 5
|
||||
2025-08-12 19:52:45,432 - random-walk-example - INFO - Peerstore size: 25
|
||||
|
||||
Command Line Options
|
||||
--------------------
|
||||
|
||||
The example supports several command-line options:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ python random_walk.py --help
|
||||
usage: random_walk.py [-h] [--mode {server,client}] [--port PORT]
|
||||
[--demo-interval DEMO_INTERVAL] [--verbose]
|
||||
|
||||
Random Walk Example for py-libp2p Kademlia DHT
|
||||
|
||||
optional arguments:
|
||||
-h, --help show this help message and exit
|
||||
--mode {server,client}
|
||||
Node mode: server (DHT server), or client (DHT client)
|
||||
--port PORT Port to listen on (0 for random)
|
||||
--demo-interval DEMO_INTERVAL
|
||||
Interval between random walk demonstrations in seconds
|
||||
--verbose Enable verbose logging
|
||||
|
||||
Key Features Demonstrated
|
||||
-------------------------
|
||||
|
||||
**Automatic Random Walk Discovery**
|
||||
The example shows how the Random Walk module automatically:
|
||||
|
||||
* Generates random 256-bit peer IDs for discovery queries
|
||||
* Performs concurrent random walks to maximize peer discovery
|
||||
* Validates discovered peers and adds them to the routing table
|
||||
* Maintains routing table health through periodic refreshes
|
||||
|
||||
**Real-time Network Statistics**
|
||||
The example displays live statistics every 30 seconds (configurable):
|
||||
|
||||
* **Routing Table Size**: Number of peers in the Kademlia routing table
|
||||
* **Connected Peers**: Number of actively connected peers
|
||||
* **Peerstore Size**: Total number of known peers with addresses
|
||||
|
||||
**Connection Management**
|
||||
The example includes sophisticated connection management:
|
||||
|
||||
* Automatically maintains connections to healthy peers
|
||||
* Filters for compatible peers (TCP + IPv4 addresses)
|
||||
* Reconnects to maintain optimal network connectivity
|
||||
* Handles connection failures gracefully
|
||||
|
||||
**DHT Integration**
|
||||
Shows seamless integration between Random Walk and Kademlia DHT:
|
||||
|
||||
* RT Refresh Manager coordinates with the DHT routing table
|
||||
* Peer discovery feeds directly into DHT operations
|
||||
* Both SERVER and CLIENT modes supported
|
||||
* Bootstrap connectivity to public IPFS nodes
|
||||
|
||||
Understanding the Output
|
||||
------------------------
|
||||
|
||||
When you run the example, you'll see periodic statistics that show how the Random Walk module is working:
|
||||
|
||||
* **Initial Phase**: Routing table starts empty and quickly discovers peers
|
||||
* **Growth Phase**: Routing table size increases as more peers are discovered
|
||||
* **Maintenance Phase**: Routing table size stabilizes as the system maintains optimal peer connections
|
||||
|
||||
The Random Walk module runs automatically in the background, performing peer discovery queries every few minutes to ensure the routing table remains populated with fresh, reachable peers.
|
||||
|
||||
Configuration
|
||||
-------------
|
||||
|
||||
The Random Walk module can be configured through the following parameters in ``libp2p.discovery.random_walk.config``:
|
||||
|
||||
* ``RANDOM_WALK_ENABLED``: Enable/disable automatic random walks (default: True)
|
||||
* ``REFRESH_INTERVAL``: Time between automatic refreshes in seconds (default: 300)
|
||||
* ``RANDOM_WALK_CONCURRENCY``: Number of concurrent random walks (default: 3)
|
||||
* ``MIN_RT_REFRESH_THRESHOLD``: Minimum routing table size before triggering refresh (default: 4)
|
||||
|
||||
See Also
|
||||
--------
|
||||
|
||||
* :doc:`examples.kademlia` - Kademlia DHT value storage and content routing
|
||||
* :doc:`libp2p.discovery.random_walk` - Random Walk module API documentation
|
||||
@ -9,11 +9,8 @@ Examples
|
||||
examples.identify_push
|
||||
examples.chat
|
||||
examples.echo
|
||||
examples.echo_quic
|
||||
examples.ping
|
||||
examples.pubsub
|
||||
examples.circuit_relay
|
||||
examples.kademlia
|
||||
examples.mDNS
|
||||
examples.random_walk
|
||||
examples.multiple_connections
|
||||
|
||||
@ -28,11 +28,6 @@ For Python, the most common transport is TCP. Here's how to set up a basic TCP t
|
||||
.. literalinclude:: ../examples/doc-examples/example_transport.py
|
||||
:language: python
|
||||
|
||||
Also, QUIC is a modern transport protocol that provides built-in TLS security and stream multiplexing over UDP:
|
||||
|
||||
.. literalinclude:: ../examples/doc-examples/example_quic_transport.py
|
||||
:language: python
|
||||
|
||||
Connection Encryption
|
||||
^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
|
||||
@ -1,48 +0,0 @@
|
||||
libp2p.discovery.random_walk package
|
||||
====================================
|
||||
|
||||
The Random Walk module implements a peer discovery mechanism.
|
||||
It performs random walks through the DHT network to discover new peers and maintain routing table health through periodic refreshes.
|
||||
|
||||
Submodules
|
||||
----------
|
||||
|
||||
libp2p.discovery.random_walk.config module
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. automodule:: libp2p.discovery.random_walk.config
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
|
||||
libp2p.discovery.random_walk.exceptions module
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. automodule:: libp2p.discovery.random_walk.exceptions
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
|
||||
libp2p.discovery.random_walk.random_walk module
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. automodule:: libp2p.discovery.random_walk.random_walk
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
|
||||
libp2p.discovery.random_walk.rt_refresh_manager module
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. automodule:: libp2p.discovery.random_walk.rt_refresh_manager
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
|
||||
Module contents
|
||||
---------------
|
||||
|
||||
.. automodule:: libp2p.discovery.random_walk
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
@ -10,7 +10,6 @@ Subpackages
|
||||
libp2p.discovery.bootstrap
|
||||
libp2p.discovery.events
|
||||
libp2p.discovery.mdns
|
||||
libp2p.discovery.random_walk
|
||||
|
||||
Submodules
|
||||
----------
|
||||
|
||||
@ -1,77 +0,0 @@
|
||||
libp2p.transport.quic package
|
||||
=============================
|
||||
|
||||
Submodules
|
||||
----------
|
||||
|
||||
libp2p.transport.quic.config module
|
||||
-----------------------------------
|
||||
|
||||
.. automodule:: libp2p.transport.quic.config
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
|
||||
libp2p.transport.quic.connection module
|
||||
---------------------------------------
|
||||
|
||||
.. automodule:: libp2p.transport.quic.connection
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
|
||||
libp2p.transport.quic.exceptions module
|
||||
---------------------------------------
|
||||
|
||||
.. automodule:: libp2p.transport.quic.exceptions
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
|
||||
libp2p.transport.quic.listener module
|
||||
-------------------------------------
|
||||
|
||||
.. automodule:: libp2p.transport.quic.listener
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
|
||||
libp2p.transport.quic.security module
|
||||
-------------------------------------
|
||||
|
||||
.. automodule:: libp2p.transport.quic.security
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
|
||||
libp2p.transport.quic.stream module
|
||||
-----------------------------------
|
||||
|
||||
.. automodule:: libp2p.transport.quic.stream
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
|
||||
libp2p.transport.quic.transport module
|
||||
--------------------------------------
|
||||
|
||||
.. automodule:: libp2p.transport.quic.transport
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
|
||||
libp2p.transport.quic.utils module
|
||||
----------------------------------
|
||||
|
||||
.. automodule:: libp2p.transport.quic.utils
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
|
||||
Module contents
|
||||
---------------
|
||||
|
||||
.. automodule:: libp2p.transport.quic
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
@ -9,11 +9,6 @@ Subpackages
|
||||
|
||||
libp2p.transport.tcp
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 4
|
||||
|
||||
libp2p.transport.quic
|
||||
|
||||
Submodules
|
||||
----------
|
||||
|
||||
|
||||
@ -1,90 +0,0 @@
|
||||
"""
|
||||
Advanced demonstration of Thin Waist address handling.
|
||||
|
||||
Run:
|
||||
python -m examples.advanced.network_discovery
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from multiaddr import Multiaddr
|
||||
|
||||
try:
|
||||
from libp2p.utils.address_validation import (
|
||||
expand_wildcard_address,
|
||||
get_available_interfaces,
|
||||
get_optimal_binding_address,
|
||||
get_wildcard_address,
|
||||
)
|
||||
except ImportError:
|
||||
# Fallbacks if utilities are missing - use minimal network discovery
|
||||
import socket
|
||||
|
||||
def get_available_interfaces(port: int, protocol: str = "tcp"):
|
||||
# Try to get local network interfaces, fallback to loopback
|
||||
addrs = []
|
||||
try:
|
||||
# Get hostname IP (better than hardcoded localhost)
|
||||
hostname = socket.gethostname()
|
||||
local_ip = socket.gethostbyname(hostname)
|
||||
if local_ip != "127.0.0.1":
|
||||
addrs.append(Multiaddr(f"/ip4/{local_ip}/{protocol}/{port}"))
|
||||
except Exception:
|
||||
pass
|
||||
# Always include loopback as fallback
|
||||
addrs.append(Multiaddr(f"/ip4/127.0.0.1/{protocol}/{port}"))
|
||||
return addrs
|
||||
|
||||
def expand_wildcard_address(addr: Multiaddr, port: int | None = None):
|
||||
if port is None:
|
||||
return [addr]
|
||||
addr_str = str(addr).rsplit("/", 1)[0]
|
||||
return [Multiaddr(addr_str + f"/{port}")]
|
||||
|
||||
def get_optimal_binding_address(port: int, protocol: str = "tcp"):
|
||||
# Try to get a non-loopback address first
|
||||
interfaces = get_available_interfaces(port, protocol)
|
||||
for addr in interfaces:
|
||||
if "127.0.0.1" not in str(addr):
|
||||
return addr
|
||||
# Fallback to loopback if no other interfaces found
|
||||
return Multiaddr(f"/ip4/127.0.0.1/{protocol}/{port}")
|
||||
|
||||
def get_wildcard_address(port: int, protocol: str = "tcp"):
|
||||
return Multiaddr(f"/ip4/0.0.0.0/{protocol}/{port}")
|
||||
|
||||
|
||||
def main() -> None:
|
||||
port = 8080
|
||||
interfaces = get_available_interfaces(port)
|
||||
print(f"Discovered interfaces for port {port}:")
|
||||
for a in interfaces:
|
||||
print(f" - {a}")
|
||||
|
||||
# Demonstrate wildcard address as a feature
|
||||
wildcard_v4 = get_wildcard_address(port)
|
||||
print(f"\nWildcard address (feature): {wildcard_v4}")
|
||||
|
||||
expanded_v4 = expand_wildcard_address(wildcard_v4)
|
||||
print("\nExpanded IPv4 wildcard:")
|
||||
for a in expanded_v4:
|
||||
print(f" - {a}")
|
||||
|
||||
wildcard_v6 = Multiaddr(f"/ip6/::/tcp/{port}")
|
||||
expanded_v6 = expand_wildcard_address(wildcard_v6)
|
||||
print("\nExpanded IPv6 wildcard:")
|
||||
for a in expanded_v6:
|
||||
print(f" - {a}")
|
||||
|
||||
print("\nOptimal binding address heuristic result:")
|
||||
print(f" -> {get_optimal_binding_address(port)}")
|
||||
|
||||
override_port = 9000
|
||||
overridden = expand_wildcard_address(wildcard_v4, port=override_port)
|
||||
print(f"\nPort override expansion to {override_port}:")
|
||||
for a in overridden:
|
||||
print(f" - {a}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@ -2,6 +2,7 @@ import argparse
|
||||
import logging
|
||||
import secrets
|
||||
|
||||
import multiaddr
|
||||
import trio
|
||||
|
||||
from libp2p import new_host
|
||||
@ -53,26 +54,18 @@ BOOTSTRAP_PEERS = [
|
||||
|
||||
async def run(port: int, bootstrap_addrs: list[str]) -> None:
|
||||
"""Run the bootstrap discovery example."""
|
||||
from libp2p.utils.address_validation import (
|
||||
find_free_port,
|
||||
get_available_interfaces,
|
||||
get_optimal_binding_address,
|
||||
)
|
||||
|
||||
if port <= 0:
|
||||
port = find_free_port()
|
||||
|
||||
# Generate key pair
|
||||
secret = secrets.token_bytes(32)
|
||||
key_pair = create_new_key_pair(secret)
|
||||
|
||||
# Create listen addresses for all available interfaces
|
||||
listen_addrs = get_available_interfaces(port)
|
||||
# Create listen address
|
||||
listen_addr = multiaddr.Multiaddr(f"/ip4/0.0.0.0/tcp/{port}")
|
||||
|
||||
# Register peer discovery handler
|
||||
peerDiscovery.register_peer_discovered_handler(on_peer_discovery)
|
||||
|
||||
logger.info("🚀 Starting Bootstrap Discovery Example")
|
||||
logger.info(f"📍 Listening on: {listen_addr}")
|
||||
logger.info(f"🌐 Bootstrap peers: {len(bootstrap_addrs)}")
|
||||
|
||||
print("\n" + "=" * 60)
|
||||
@ -87,22 +80,7 @@ async def run(port: int, bootstrap_addrs: list[str]) -> None:
|
||||
host = new_host(key_pair=key_pair, bootstrap=bootstrap_addrs)
|
||||
|
||||
try:
|
||||
async with host.run(listen_addrs=listen_addrs):
|
||||
# Get all available addresses with peer ID
|
||||
all_addrs = host.get_addrs()
|
||||
|
||||
logger.info("Listener ready, listening on:")
|
||||
print("Listener ready, listening on:")
|
||||
for addr in all_addrs:
|
||||
logger.info(f"{addr}")
|
||||
print(f"{addr}")
|
||||
|
||||
# Display optimal address for reference
|
||||
optimal_addr = get_optimal_binding_address(port)
|
||||
optimal_addr_with_peer = f"{optimal_addr}/p2p/{host.get_id().to_string()}"
|
||||
logger.info(f"Optimal address: {optimal_addr_with_peer}")
|
||||
print(f"Optimal address: {optimal_addr_with_peer}")
|
||||
|
||||
async with host.run(listen_addrs=[listen_addr]):
|
||||
# Keep running and log peer discovery events
|
||||
await trio.sleep_forever()
|
||||
except KeyboardInterrupt:
|
||||
@ -120,7 +98,7 @@ def main() -> None:
|
||||
Usage:
|
||||
python bootstrap.py -p 8000
|
||||
python bootstrap.py -p 8001 --custom-bootstrap \\
|
||||
"/ip4/[HOST_IP]/tcp/8000/p2p/QmYourPeerID"
|
||||
"/ip4/127.0.0.1/tcp/8000/p2p/QmYourPeerID"
|
||||
"""
|
||||
|
||||
parser = argparse.ArgumentParser(
|
||||
|
||||
@ -1,5 +1,4 @@
|
||||
import argparse
|
||||
import logging
|
||||
import sys
|
||||
|
||||
import multiaddr
|
||||
@ -18,11 +17,6 @@ from libp2p.peer.peerinfo import (
|
||||
info_from_p2p_addr,
|
||||
)
|
||||
|
||||
# Configure minimal logging
|
||||
logging.basicConfig(level=logging.WARNING)
|
||||
logging.getLogger("multiaddr").setLevel(logging.WARNING)
|
||||
logging.getLogger("libp2p").setLevel(logging.WARNING)
|
||||
|
||||
PROTOCOL_ID = TProtocol("/chat/1.0.0")
|
||||
MAX_READ_LEN = 2**32 - 1
|
||||
|
||||
@ -46,18 +40,9 @@ async def write_data(stream: INetStream) -> None:
|
||||
|
||||
|
||||
async def run(port: int, destination: str) -> None:
|
||||
from libp2p.utils.address_validation import (
|
||||
find_free_port,
|
||||
get_available_interfaces,
|
||||
get_optimal_binding_address,
|
||||
)
|
||||
|
||||
if port <= 0:
|
||||
port = find_free_port()
|
||||
|
||||
listen_addrs = get_available_interfaces(port)
|
||||
listen_addr = multiaddr.Multiaddr(f"/ip4/0.0.0.0/tcp/{port}")
|
||||
host = new_host()
|
||||
async with host.run(listen_addrs=listen_addrs), trio.open_nursery() as nursery:
|
||||
async with host.run(listen_addrs=[listen_addr]), trio.open_nursery() as nursery:
|
||||
# Start the peer-store cleanup task
|
||||
nursery.start_soon(host.get_peerstore().start_cleanup_task, 60)
|
||||
|
||||
@ -69,19 +54,10 @@ async def run(port: int, destination: str) -> None:
|
||||
|
||||
host.set_stream_handler(PROTOCOL_ID, stream_handler)
|
||||
|
||||
# Get all available addresses with peer ID
|
||||
all_addrs = host.get_addrs()
|
||||
|
||||
print("Listener ready, listening on:\n")
|
||||
for addr in all_addrs:
|
||||
print(f"{addr}")
|
||||
|
||||
# Use optimal address for the client command
|
||||
optimal_addr = get_optimal_binding_address(port)
|
||||
optimal_addr_with_peer = f"{optimal_addr}/p2p/{host.get_id().to_string()}"
|
||||
print(
|
||||
f"\nRun this from the same folder in another console:\n\n"
|
||||
f"chat-demo -d {optimal_addr_with_peer}\n"
|
||||
"Run this from the same folder in another console:\n\n"
|
||||
f"chat-demo "
|
||||
f"-d {host.get_addrs()[0]}\n"
|
||||
)
|
||||
print("Waiting for incoming connection...")
|
||||
|
||||
@ -110,7 +86,7 @@ def main() -> None:
|
||||
where <DESTINATION> is the multiaddress of the previous listener host.
|
||||
"""
|
||||
example_maddr = (
|
||||
"/ip4/[HOST_IP]/tcp/8000/p2p/QmQn4SwGkDZKkUEpBRBvTmheQycxAHJUNmVEnjA2v1qe8Q"
|
||||
"/ip4/127.0.0.1/tcp/8000/p2p/QmQn4SwGkDZKkUEpBRBvTmheQycxAHJUNmVEnjA2v1qe8Q"
|
||||
)
|
||||
parser = argparse.ArgumentParser(description=description)
|
||||
parser.add_argument("-p", "--port", default=0, type=int, help="source port number")
|
||||
|
||||
@ -1,5 +1,6 @@
|
||||
import secrets
|
||||
|
||||
import multiaddr
|
||||
import trio
|
||||
|
||||
from libp2p import (
|
||||
@ -8,10 +9,9 @@ from libp2p import (
|
||||
from libp2p.crypto.secp256k1 import (
|
||||
create_new_key_pair,
|
||||
)
|
||||
from libp2p.security.insecure.transport import PLAINTEXT_PROTOCOL_ID, InsecureTransport
|
||||
from libp2p.utils.address_validation import (
|
||||
get_available_interfaces,
|
||||
get_optimal_binding_address,
|
||||
from libp2p.security.insecure.transport import (
|
||||
PLAINTEXT_PROTOCOL_ID,
|
||||
InsecureTransport,
|
||||
)
|
||||
|
||||
|
||||
@ -38,19 +38,17 @@ async def main():
|
||||
# Create a host with the key pair and insecure transport
|
||||
host = new_host(key_pair=key_pair, sec_opt=security_options)
|
||||
|
||||
# Configure the listening address using the new paradigm
|
||||
# Configure the listening address
|
||||
port = 8000
|
||||
listen_addrs = get_available_interfaces(port)
|
||||
optimal_addr = get_optimal_binding_address(port)
|
||||
listen_addr = multiaddr.Multiaddr(f"/ip4/0.0.0.0/tcp/{port}")
|
||||
|
||||
# Start the host
|
||||
async with host.run(listen_addrs=listen_addrs):
|
||||
async with host.run(listen_addrs=[listen_addr]):
|
||||
print(
|
||||
"libp2p has started with insecure transport "
|
||||
"(not recommended for production)"
|
||||
)
|
||||
print("libp2p is listening on:", host.get_addrs())
|
||||
print(f"Optimal address: {optimal_addr}")
|
||||
# Keep the host running
|
||||
await trio.sleep_forever()
|
||||
|
||||
|
||||
@ -1,5 +1,6 @@
|
||||
import secrets
|
||||
|
||||
import multiaddr
|
||||
import trio
|
||||
|
||||
from libp2p import (
|
||||
@ -12,10 +13,6 @@ from libp2p.security.noise.transport import (
|
||||
PROTOCOL_ID as NOISE_PROTOCOL_ID,
|
||||
Transport as NoiseTransport,
|
||||
)
|
||||
from libp2p.utils.address_validation import (
|
||||
get_available_interfaces,
|
||||
get_optimal_binding_address,
|
||||
)
|
||||
|
||||
|
||||
async def main():
|
||||
@ -42,16 +39,14 @@ async def main():
|
||||
# Create a host with the key pair and Noise security
|
||||
host = new_host(key_pair=key_pair, sec_opt=security_options)
|
||||
|
||||
# Configure the listening address using the new paradigm
|
||||
# Configure the listening address
|
||||
port = 8000
|
||||
listen_addrs = get_available_interfaces(port)
|
||||
optimal_addr = get_optimal_binding_address(port)
|
||||
listen_addr = multiaddr.Multiaddr(f"/ip4/0.0.0.0/tcp/{port}")
|
||||
|
||||
# Start the host
|
||||
async with host.run(listen_addrs=listen_addrs):
|
||||
async with host.run(listen_addrs=[listen_addr]):
|
||||
print("libp2p has started with Noise encryption")
|
||||
print("libp2p is listening on:", host.get_addrs())
|
||||
print(f"Optimal address: {optimal_addr}")
|
||||
# Keep the host running
|
||||
await trio.sleep_forever()
|
||||
|
||||
|
||||
@ -1,5 +1,6 @@
|
||||
import secrets
|
||||
|
||||
import multiaddr
|
||||
import trio
|
||||
|
||||
from libp2p import (
|
||||
@ -12,10 +13,6 @@ from libp2p.security.secio.transport import (
|
||||
ID as SECIO_PROTOCOL_ID,
|
||||
Transport as SecioTransport,
|
||||
)
|
||||
from libp2p.utils.address_validation import (
|
||||
get_available_interfaces,
|
||||
get_optimal_binding_address,
|
||||
)
|
||||
|
||||
|
||||
async def main():
|
||||
@ -35,16 +32,14 @@ async def main():
|
||||
# Create a host with the key pair and SECIO security
|
||||
host = new_host(key_pair=key_pair, sec_opt=security_options)
|
||||
|
||||
# Configure the listening address using the new paradigm
|
||||
# Configure the listening address
|
||||
port = 8000
|
||||
listen_addrs = get_available_interfaces(port)
|
||||
optimal_addr = get_optimal_binding_address(port)
|
||||
listen_addr = multiaddr.Multiaddr(f"/ip4/0.0.0.0/tcp/{port}")
|
||||
|
||||
# Start the host
|
||||
async with host.run(listen_addrs=listen_addrs):
|
||||
async with host.run(listen_addrs=[listen_addr]):
|
||||
print("libp2p has started with SECIO encryption")
|
||||
print("libp2p is listening on:", host.get_addrs())
|
||||
print(f"Optimal address: {optimal_addr}")
|
||||
# Keep the host running
|
||||
await trio.sleep_forever()
|
||||
|
||||
|
||||
@ -1,5 +1,6 @@
|
||||
import secrets
|
||||
|
||||
import multiaddr
|
||||
import trio
|
||||
|
||||
from libp2p import (
|
||||
@ -12,10 +13,6 @@ from libp2p.security.noise.transport import (
|
||||
PROTOCOL_ID as NOISE_PROTOCOL_ID,
|
||||
Transport as NoiseTransport,
|
||||
)
|
||||
from libp2p.utils.address_validation import (
|
||||
get_available_interfaces,
|
||||
get_optimal_binding_address,
|
||||
)
|
||||
|
||||
|
||||
async def main():
|
||||
@ -42,16 +39,14 @@ async def main():
|
||||
# Create a host with the key pair, Noise security, and mplex multiplexer
|
||||
host = new_host(key_pair=key_pair, sec_opt=security_options)
|
||||
|
||||
# Configure the listening address using the new paradigm
|
||||
# Configure the listening address
|
||||
port = 8000
|
||||
listen_addrs = get_available_interfaces(port)
|
||||
optimal_addr = get_optimal_binding_address(port)
|
||||
listen_addr = multiaddr.Multiaddr(f"/ip4/0.0.0.0/tcp/{port}")
|
||||
|
||||
# Start the host
|
||||
async with host.run(listen_addrs=listen_addrs):
|
||||
async with host.run(listen_addrs=[listen_addr]):
|
||||
print("libp2p has started with Noise encryption and mplex multiplexing")
|
||||
print("libp2p is listening on:", host.get_addrs())
|
||||
print(f"Optimal address: {optimal_addr}")
|
||||
# Keep the host running
|
||||
await trio.sleep_forever()
|
||||
|
||||
|
||||
@ -38,10 +38,6 @@ from libp2p.network.stream.net_stream import (
|
||||
from libp2p.peer.peerinfo import (
|
||||
info_from_p2p_addr,
|
||||
)
|
||||
from libp2p.utils.address_validation import (
|
||||
get_available_interfaces,
|
||||
get_optimal_binding_address,
|
||||
)
|
||||
|
||||
PROTOCOL_ID = TProtocol("/echo/1.0.0")
|
||||
|
||||
@ -177,9 +173,7 @@ async def run_enhanced_demo(
|
||||
"""
|
||||
Run enhanced echo demo with NetStream state management.
|
||||
"""
|
||||
# Use the new address paradigm
|
||||
listen_addrs = get_available_interfaces(port)
|
||||
optimal_addr = get_optimal_binding_address(port)
|
||||
listen_addr = multiaddr.Multiaddr(f"/ip4/0.0.0.0/tcp/{port}")
|
||||
|
||||
# Generate or use provided key
|
||||
if seed:
|
||||
@ -191,7 +185,7 @@ async def run_enhanced_demo(
|
||||
|
||||
host = new_host(key_pair=create_new_key_pair(secret))
|
||||
|
||||
async with host.run(listen_addrs=listen_addrs):
|
||||
async with host.run(listen_addrs=[listen_addr]):
|
||||
print(f"Host ID: {host.get_id().to_string()}")
|
||||
print("=" * 60)
|
||||
|
||||
@ -202,12 +196,10 @@ async def run_enhanced_demo(
|
||||
# type: ignore: Stream is type of NetStream
|
||||
host.set_stream_handler(PROTOCOL_ID, enhanced_echo_handler)
|
||||
|
||||
# Use optimal address for client command
|
||||
optimal_addr_with_peer = f"{optimal_addr}/p2p/{host.get_id().to_string()}"
|
||||
print(
|
||||
"Run client from another console:\n"
|
||||
f"python3 example_net_stream.py "
|
||||
f"-d {optimal_addr_with_peer}\n"
|
||||
f"-d {host.get_addrs()[0]}\n"
|
||||
)
|
||||
print("Waiting for connections...")
|
||||
print("Press Ctrl+C to stop server")
|
||||
@ -234,7 +226,7 @@ async def run_enhanced_demo(
|
||||
|
||||
def main() -> None:
|
||||
example_maddr = (
|
||||
"/ip4/[HOST_IP]/tcp/8000/p2p/QmQn4SwGkDZKkUEpBRBvTmheQycxAHJUNmVEnjA2v1qe8Q"
|
||||
"/ip4/127.0.0.1/tcp/8000/p2p/QmQn4SwGkDZKkUEpBRBvTmheQycxAHJUNmVEnjA2v1qe8Q"
|
||||
)
|
||||
|
||||
parser = argparse.ArgumentParser(
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
import secrets
|
||||
|
||||
from multiaddr import Multiaddr
|
||||
import multiaddr
|
||||
import trio
|
||||
|
||||
from libp2p import (
|
||||
@ -16,10 +16,6 @@ from libp2p.security.noise.transport import (
|
||||
PROTOCOL_ID as NOISE_PROTOCOL_ID,
|
||||
Transport as NoiseTransport,
|
||||
)
|
||||
from libp2p.utils.address_validation import (
|
||||
get_available_interfaces,
|
||||
get_optimal_binding_address,
|
||||
)
|
||||
|
||||
|
||||
async def main():
|
||||
@ -46,16 +42,14 @@ async def main():
|
||||
# Create a host with the key pair, Noise security, and mplex multiplexer
|
||||
host = new_host(key_pair=key_pair, sec_opt=security_options)
|
||||
|
||||
# Configure the listening address using the new paradigm
|
||||
# Configure the listening address
|
||||
port = 8000
|
||||
listen_addrs = get_available_interfaces(port)
|
||||
optimal_addr = get_optimal_binding_address(port)
|
||||
listen_addr = multiaddr.Multiaddr(f"/ip4/0.0.0.0/tcp/{port}")
|
||||
|
||||
# Start the host
|
||||
async with host.run(listen_addrs=listen_addrs):
|
||||
async with host.run(listen_addrs=[listen_addr]):
|
||||
print("libp2p has started")
|
||||
print("libp2p is listening on:", host.get_addrs())
|
||||
print(f"Optimal address: {optimal_addr}")
|
||||
|
||||
# Connect to bootstrap peers manually
|
||||
bootstrap_list = [
|
||||
@ -67,7 +61,7 @@ async def main():
|
||||
|
||||
for addr in bootstrap_list:
|
||||
try:
|
||||
peer_info = info_from_p2p_addr(Multiaddr(addr))
|
||||
peer_info = info_from_p2p_addr(multiaddr.Multiaddr(addr))
|
||||
await host.connect(peer_info)
|
||||
print(f"Connected to {peer_info.peer_id.to_string()}")
|
||||
except Exception as e:
|
||||
|
||||
@ -1,49 +0,0 @@
|
||||
import secrets
|
||||
|
||||
import trio
|
||||
|
||||
from libp2p import (
|
||||
new_host,
|
||||
)
|
||||
from libp2p.crypto.secp256k1 import (
|
||||
create_new_key_pair,
|
||||
)
|
||||
from libp2p.utils.address_validation import (
|
||||
get_available_interfaces,
|
||||
get_optimal_binding_address,
|
||||
)
|
||||
|
||||
|
||||
async def main():
|
||||
# Create a key pair for the host
|
||||
secret = secrets.token_bytes(32)
|
||||
key_pair = create_new_key_pair(secret)
|
||||
|
||||
# Create a host with the key pair
|
||||
host = new_host(key_pair=key_pair, enable_quic=True)
|
||||
|
||||
# Configure the listening address using the new paradigm
|
||||
port = 8000
|
||||
listen_addrs = get_available_interfaces(port, protocol="udp")
|
||||
# Convert TCP addresses to QUIC-v1 addresses
|
||||
quic_addrs = []
|
||||
for addr in listen_addrs:
|
||||
addr_str = str(addr).replace("/tcp/", "/udp/") + "/quic-v1"
|
||||
from multiaddr import Multiaddr
|
||||
|
||||
quic_addrs.append(Multiaddr(addr_str))
|
||||
|
||||
optimal_addr = get_optimal_binding_address(port, protocol="udp")
|
||||
optimal_quic_str = str(optimal_addr).replace("/tcp/", "/udp/") + "/quic-v1"
|
||||
|
||||
# Start the host
|
||||
async with host.run(listen_addrs=quic_addrs):
|
||||
print("libp2p has started with QUIC transport")
|
||||
print("libp2p is listening on:", host.get_addrs())
|
||||
print(f"Optimal address: {optimal_quic_str}")
|
||||
# Keep the host running
|
||||
await trio.sleep_forever()
|
||||
|
||||
|
||||
# Run the async function
|
||||
trio.run(main)
|
||||
@ -1,5 +1,6 @@
|
||||
import secrets
|
||||
|
||||
import multiaddr
|
||||
import trio
|
||||
|
||||
from libp2p import (
|
||||
@ -12,10 +13,6 @@ from libp2p.security.noise.transport import (
|
||||
PROTOCOL_ID as NOISE_PROTOCOL_ID,
|
||||
Transport as NoiseTransport,
|
||||
)
|
||||
from libp2p.utils.address_validation import (
|
||||
get_available_interfaces,
|
||||
get_optimal_binding_address,
|
||||
)
|
||||
|
||||
|
||||
async def main():
|
||||
@ -42,16 +39,14 @@ async def main():
|
||||
# Create a host with the key pair, Noise security, and mplex multiplexer
|
||||
host = new_host(key_pair=key_pair, sec_opt=security_options)
|
||||
|
||||
# Configure the listening address using the new paradigm
|
||||
# Configure the listening address
|
||||
port = 8000
|
||||
listen_addrs = get_available_interfaces(port)
|
||||
optimal_addr = get_optimal_binding_address(port)
|
||||
listen_addr = multiaddr.Multiaddr(f"/ip4/0.0.0.0/tcp/{port}")
|
||||
|
||||
# Start the host
|
||||
async with host.run(listen_addrs=listen_addrs):
|
||||
async with host.run(listen_addrs=[listen_addr]):
|
||||
print("libp2p has started")
|
||||
print("libp2p is listening on:", host.get_addrs())
|
||||
print(f"Optimal address: {optimal_addr}")
|
||||
# Keep the host running
|
||||
await trio.sleep_forever()
|
||||
|
||||
|
||||
@ -1,5 +1,6 @@
|
||||
import secrets
|
||||
|
||||
import multiaddr
|
||||
import trio
|
||||
|
||||
from libp2p import (
|
||||
@ -8,10 +9,6 @@ from libp2p import (
|
||||
from libp2p.crypto.secp256k1 import (
|
||||
create_new_key_pair,
|
||||
)
|
||||
from libp2p.utils.address_validation import (
|
||||
get_available_interfaces,
|
||||
get_optimal_binding_address,
|
||||
)
|
||||
|
||||
|
||||
async def main():
|
||||
@ -22,16 +19,14 @@ async def main():
|
||||
# Create a host with the key pair
|
||||
host = new_host(key_pair=key_pair)
|
||||
|
||||
# Configure the listening address using the new paradigm
|
||||
# Configure the listening address
|
||||
port = 8000
|
||||
listen_addrs = get_available_interfaces(port)
|
||||
optimal_addr = get_optimal_binding_address(port)
|
||||
listen_addr = multiaddr.Multiaddr(f"/ip4/0.0.0.0/tcp/{port}")
|
||||
|
||||
# Start the host
|
||||
async with host.run(listen_addrs=listen_addrs):
|
||||
async with host.run(listen_addrs=[listen_addr]):
|
||||
print("libp2p has started with TCP transport")
|
||||
print("libp2p is listening on:", host.get_addrs())
|
||||
print(f"Optimal address: {optimal_addr}")
|
||||
# Keep the host running
|
||||
await trio.sleep_forever()
|
||||
|
||||
|
||||
@ -1,210 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Example demonstrating multiple connections per peer support in libp2p.
|
||||
|
||||
This example shows how to:
|
||||
1. Configure multiple connections per peer
|
||||
2. Use different load balancing strategies
|
||||
3. Access multiple connections through the new API
|
||||
4. Maintain backward compatibility
|
||||
5. Use the new address paradigm for network configuration
|
||||
"""
|
||||
|
||||
import logging
|
||||
|
||||
import trio
|
||||
|
||||
from libp2p import new_swarm
|
||||
from libp2p.network.swarm import ConnectionConfig, RetryConfig
|
||||
from libp2p.utils import get_available_interfaces, get_optimal_binding_address
|
||||
|
||||
# Set up logging
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def example_basic_multiple_connections() -> None:
|
||||
"""Example of basic multiple connections per peer usage."""
|
||||
logger.info("Creating swarm with multiple connections support...")
|
||||
|
||||
# Create swarm with default configuration
|
||||
swarm = new_swarm()
|
||||
default_connection = ConnectionConfig()
|
||||
|
||||
logger.info(f"Swarm created with peer ID: {swarm.get_peer_id()}")
|
||||
logger.info(
|
||||
f"Connection config: max_connections_per_peer="
|
||||
f"{default_connection.max_connections_per_peer}"
|
||||
)
|
||||
|
||||
await swarm.close()
|
||||
logger.info("Basic multiple connections example completed")
|
||||
|
||||
|
||||
async def example_custom_connection_config() -> None:
|
||||
"""Example of custom connection configuration."""
|
||||
logger.info("Creating swarm with custom connection configuration...")
|
||||
|
||||
# Custom connection configuration for high-performance scenarios
|
||||
connection_config = ConnectionConfig(
|
||||
max_connections_per_peer=5, # More connections per peer
|
||||
connection_timeout=60.0, # Longer timeout
|
||||
load_balancing_strategy="least_loaded", # Use least loaded strategy
|
||||
)
|
||||
|
||||
# Create swarm with custom connection config
|
||||
swarm = new_swarm(connection_config=connection_config)
|
||||
|
||||
logger.info("Custom connection config applied:")
|
||||
logger.info(
|
||||
f" Max connections per peer: {connection_config.max_connections_per_peer}"
|
||||
)
|
||||
logger.info(f" Connection timeout: {connection_config.connection_timeout}s")
|
||||
logger.info(
|
||||
f" Load balancing strategy: {connection_config.load_balancing_strategy}"
|
||||
)
|
||||
|
||||
await swarm.close()
|
||||
logger.info("Custom connection config example completed")
|
||||
|
||||
|
||||
async def example_multiple_connections_api() -> None:
|
||||
"""Example of using the new multiple connections API."""
|
||||
logger.info("Demonstrating multiple connections API...")
|
||||
|
||||
connection_config = ConnectionConfig(
|
||||
max_connections_per_peer=3, load_balancing_strategy="round_robin"
|
||||
)
|
||||
|
||||
swarm = new_swarm(connection_config=connection_config)
|
||||
|
||||
logger.info("Multiple connections API features:")
|
||||
logger.info(" - dial_peer() returns list[INetConn]")
|
||||
logger.info(" - get_connections(peer_id) returns list[INetConn]")
|
||||
logger.info(" - get_connections_map() returns dict[ID, list[INetConn]]")
|
||||
logger.info(
|
||||
" - get_connection(peer_id) returns INetConn | None (backward compatibility)"
|
||||
)
|
||||
|
||||
await swarm.close()
|
||||
logger.info("Multiple connections API example completed")
|
||||
|
||||
|
||||
async def example_backward_compatibility() -> None:
|
||||
"""Example of backward compatibility features."""
|
||||
logger.info("Demonstrating backward compatibility...")
|
||||
|
||||
swarm = new_swarm()
|
||||
|
||||
logger.info("Backward compatibility features:")
|
||||
logger.info(" - connections_legacy property provides 1:1 mapping")
|
||||
logger.info(" - get_connection() method for single connection access")
|
||||
logger.info(" - Existing code continues to work")
|
||||
|
||||
await swarm.close()
|
||||
logger.info("Backward compatibility example completed")
|
||||
|
||||
|
||||
async def example_network_address_paradigm() -> None:
|
||||
"""Example of using the new address paradigm with multiple connections."""
|
||||
logger.info("Demonstrating network address paradigm...")
|
||||
|
||||
# Get available interfaces using the new paradigm
|
||||
port = 8000 # Example port
|
||||
available_interfaces = get_available_interfaces(port)
|
||||
logger.info(f"Available interfaces: {available_interfaces}")
|
||||
|
||||
# Get optimal binding address
|
||||
optimal_address = get_optimal_binding_address(port)
|
||||
logger.info(f"Optimal binding address: {optimal_address}")
|
||||
|
||||
# Create connection config for multiple connections with network awareness
|
||||
connection_config = ConnectionConfig(
|
||||
max_connections_per_peer=3, load_balancing_strategy="round_robin"
|
||||
)
|
||||
|
||||
# Create swarm with address paradigm
|
||||
swarm = new_swarm(connection_config=connection_config)
|
||||
|
||||
logger.info("Network address paradigm features:")
|
||||
logger.info(" - get_available_interfaces() for interface discovery")
|
||||
logger.info(" - get_optimal_binding_address() for smart address selection")
|
||||
logger.info(" - Multiple connections with proper network binding")
|
||||
|
||||
await swarm.close()
|
||||
logger.info("Network address paradigm example completed")
|
||||
|
||||
|
||||
async def example_production_ready_config() -> None:
|
||||
"""Example of production-ready configuration."""
|
||||
logger.info("Creating swarm with production-ready configuration...")
|
||||
|
||||
# Get optimal network configuration using the new paradigm
|
||||
port = 8001 # Example port
|
||||
optimal_address = get_optimal_binding_address(port)
|
||||
logger.info(f"Using optimal binding address: {optimal_address}")
|
||||
|
||||
# Production-ready retry configuration
|
||||
retry_config = RetryConfig(
|
||||
max_retries=3, # Reasonable retry limit
|
||||
initial_delay=0.1, # Quick initial retry
|
||||
max_delay=30.0, # Cap exponential backoff
|
||||
backoff_multiplier=2.0, # Standard exponential backoff
|
||||
jitter_factor=0.1, # Small jitter to prevent thundering herd
|
||||
)
|
||||
|
||||
# Production-ready connection configuration
|
||||
connection_config = ConnectionConfig(
|
||||
max_connections_per_peer=3, # Balance between performance and resource usage
|
||||
connection_timeout=30.0, # Reasonable timeout
|
||||
load_balancing_strategy="round_robin", # Simple, predictable strategy
|
||||
)
|
||||
|
||||
# Create swarm with production config
|
||||
swarm = new_swarm(retry_config=retry_config, connection_config=connection_config)
|
||||
|
||||
logger.info("Production-ready configuration applied:")
|
||||
logger.info(
|
||||
f" Retry: {retry_config.max_retries} retries, "
|
||||
f"{retry_config.max_delay}s max delay"
|
||||
)
|
||||
logger.info(f" Connections: {connection_config.max_connections_per_peer} per peer")
|
||||
logger.info(f" Load balancing: {connection_config.load_balancing_strategy}")
|
||||
|
||||
await swarm.close()
|
||||
logger.info("Production-ready configuration example completed")
|
||||
|
||||
|
||||
async def main() -> None:
|
||||
"""Run all examples."""
|
||||
logger.info("Multiple Connections Per Peer Examples")
|
||||
logger.info("=" * 50)
|
||||
|
||||
try:
|
||||
await example_basic_multiple_connections()
|
||||
logger.info("-" * 30)
|
||||
|
||||
await example_custom_connection_config()
|
||||
logger.info("-" * 30)
|
||||
|
||||
await example_multiple_connections_api()
|
||||
logger.info("-" * 30)
|
||||
|
||||
await example_backward_compatibility()
|
||||
logger.info("-" * 30)
|
||||
|
||||
await example_network_address_paradigm()
|
||||
logger.info("-" * 30)
|
||||
|
||||
await example_production_ready_config()
|
||||
logger.info("-" * 30)
|
||||
|
||||
logger.info("All examples completed successfully!")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Example failed: {e}")
|
||||
raise
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
trio.run(main)
|
||||
@ -1,7 +1,4 @@
|
||||
import argparse
|
||||
import logging
|
||||
import random
|
||||
import secrets
|
||||
|
||||
import multiaddr
|
||||
import trio
|
||||
@ -15,60 +12,40 @@ from libp2p.crypto.secp256k1 import (
|
||||
from libp2p.custom_types import (
|
||||
TProtocol,
|
||||
)
|
||||
from libp2p.network.stream.exceptions import (
|
||||
StreamEOF,
|
||||
)
|
||||
from libp2p.network.stream.net_stream import (
|
||||
INetStream,
|
||||
)
|
||||
from libp2p.peer.peerinfo import (
|
||||
info_from_p2p_addr,
|
||||
)
|
||||
from libp2p.utils.address_validation import (
|
||||
find_free_port,
|
||||
get_available_interfaces,
|
||||
get_optimal_binding_address,
|
||||
)
|
||||
|
||||
# Configure minimal logging
|
||||
logging.basicConfig(level=logging.WARNING)
|
||||
logging.getLogger("multiaddr").setLevel(logging.WARNING)
|
||||
logging.getLogger("libp2p").setLevel(logging.WARNING)
|
||||
|
||||
PROTOCOL_ID = TProtocol("/echo/1.0.0")
|
||||
MAX_READ_LEN = 2**32 - 1
|
||||
|
||||
|
||||
async def _echo_stream_handler(stream: INetStream) -> None:
|
||||
try:
|
||||
peer_id = stream.muxed_conn.peer_id
|
||||
print(f"Received connection from {peer_id}")
|
||||
# Wait until EOF
|
||||
msg = await stream.read(MAX_READ_LEN)
|
||||
print(f"Echoing message: {msg.decode('utf-8')}")
|
||||
await stream.write(msg)
|
||||
except StreamEOF:
|
||||
print("Stream closed by remote peer.")
|
||||
except Exception as e:
|
||||
print(f"Error in echo handler: {e}")
|
||||
finally:
|
||||
await stream.close()
|
||||
# Wait until EOF
|
||||
msg = await stream.read(MAX_READ_LEN)
|
||||
await stream.write(msg)
|
||||
await stream.close()
|
||||
|
||||
|
||||
async def run(port: int, destination: str, seed: int | None = None) -> None:
|
||||
if port <= 0:
|
||||
port = find_free_port()
|
||||
listen_addr = get_available_interfaces(port)
|
||||
listen_addr = multiaddr.Multiaddr(f"/ip4/0.0.0.0/tcp/{port}")
|
||||
|
||||
if seed:
|
||||
import random
|
||||
|
||||
random.seed(seed)
|
||||
secret_number = random.getrandbits(32 * 8)
|
||||
secret = secret_number.to_bytes(length=32, byteorder="big")
|
||||
else:
|
||||
import secrets
|
||||
|
||||
secret = secrets.token_bytes(32)
|
||||
|
||||
host = new_host(key_pair=create_new_key_pair(secret))
|
||||
async with host.run(listen_addrs=listen_addr), trio.open_nursery() as nursery:
|
||||
async with host.run(listen_addrs=[listen_addr]), trio.open_nursery() as nursery:
|
||||
# Start the peer-store cleanup task
|
||||
nursery.start_soon(host.get_peerstore().start_cleanup_task, 60)
|
||||
|
||||
@ -77,19 +54,10 @@ async def run(port: int, destination: str, seed: int | None = None) -> None:
|
||||
if not destination: # its the server
|
||||
host.set_stream_handler(PROTOCOL_ID, _echo_stream_handler)
|
||||
|
||||
# Print all listen addresses with peer ID (JS parity)
|
||||
print("Listener ready, listening on:\n")
|
||||
peer_id = host.get_id().to_string()
|
||||
for addr in listen_addr:
|
||||
print(f"{addr}/p2p/{peer_id}")
|
||||
|
||||
# Get optimal address for display
|
||||
optimal_addr = get_optimal_binding_address(port)
|
||||
optimal_addr_with_peer = f"{optimal_addr}/p2p/{peer_id}"
|
||||
|
||||
print(
|
||||
"\nRun this from the same folder in another console:\n\n"
|
||||
f"echo-demo -d {optimal_addr_with_peer}\n"
|
||||
"Run this from the same folder in another console:\n\n"
|
||||
f"echo-demo "
|
||||
f"-d {host.get_addrs()[0]}\n"
|
||||
)
|
||||
print("Waiting for incoming connections...")
|
||||
await trio.sleep_forever()
|
||||
@ -125,7 +93,7 @@ def main() -> None:
|
||||
where <DESTINATION> is the multiaddress of the previous listener host.
|
||||
"""
|
||||
example_maddr = (
|
||||
"/ip4/[HOST_IP]/tcp/8000/p2p/QmQn4SwGkDZKkUEpBRBvTmheQycxAHJUNmVEnjA2v1qe8Q"
|
||||
"/ip4/127.0.0.1/tcp/8000/p2p/QmQn4SwGkDZKkUEpBRBvTmheQycxAHJUNmVEnjA2v1qe8Q"
|
||||
)
|
||||
parser = argparse.ArgumentParser(description=description)
|
||||
parser.add_argument("-p", "--port", default=0, type=int, help="source port number")
|
||||
|
||||
@ -1,207 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
QUIC Echo Example - Fixed version with proper client/server separation
|
||||
|
||||
This program demonstrates a simple echo protocol using QUIC transport where a peer
|
||||
listens for connections and copies back any input received on a stream.
|
||||
|
||||
Fixed to properly separate client and server modes - clients don't start listeners.
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import logging
|
||||
|
||||
from multiaddr import Multiaddr
|
||||
import trio
|
||||
|
||||
from libp2p import new_host
|
||||
from libp2p.crypto.secp256k1 import create_new_key_pair
|
||||
from libp2p.custom_types import TProtocol
|
||||
from libp2p.network.stream.net_stream import INetStream
|
||||
from libp2p.peer.peerinfo import info_from_p2p_addr
|
||||
|
||||
# Configure minimal logging
|
||||
logging.basicConfig(level=logging.WARNING)
|
||||
logging.getLogger("multiaddr").setLevel(logging.WARNING)
|
||||
logging.getLogger("libp2p").setLevel(logging.WARNING)
|
||||
|
||||
PROTOCOL_ID = TProtocol("/echo/1.0.0")
|
||||
|
||||
|
||||
async def _echo_stream_handler(stream: INetStream) -> None:
|
||||
try:
|
||||
msg = await stream.read()
|
||||
await stream.write(msg)
|
||||
await stream.close()
|
||||
except Exception as e:
|
||||
print(f"Echo handler error: {e}")
|
||||
try:
|
||||
await stream.close()
|
||||
except: # noqa: E722
|
||||
pass
|
||||
|
||||
|
||||
async def run_server(port: int, seed: int | None = None) -> None:
|
||||
"""Run echo server with QUIC transport."""
|
||||
from libp2p.utils.address_validation import (
|
||||
find_free_port,
|
||||
get_available_interfaces,
|
||||
get_optimal_binding_address,
|
||||
)
|
||||
|
||||
if port <= 0:
|
||||
port = find_free_port()
|
||||
|
||||
# For QUIC, we need UDP addresses - use the new address paradigm
|
||||
tcp_addrs = get_available_interfaces(port)
|
||||
# Convert TCP addresses to QUIC addresses
|
||||
quic_addrs = []
|
||||
for addr in tcp_addrs:
|
||||
addr_str = str(addr).replace("/tcp/", "/udp/") + "/quic"
|
||||
quic_addrs.append(Multiaddr(addr_str))
|
||||
|
||||
if seed:
|
||||
import random
|
||||
|
||||
random.seed(seed)
|
||||
secret_number = random.getrandbits(32 * 8)
|
||||
secret = secret_number.to_bytes(length=32, byteorder="big")
|
||||
else:
|
||||
import secrets
|
||||
|
||||
secret = secrets.token_bytes(32)
|
||||
|
||||
# Create host with QUIC transport
|
||||
host = new_host(
|
||||
enable_quic=True,
|
||||
key_pair=create_new_key_pair(secret),
|
||||
)
|
||||
|
||||
# Server mode: start listener
|
||||
async with host.run(listen_addrs=quic_addrs):
|
||||
try:
|
||||
print(f"I am {host.get_id().to_string()}")
|
||||
host.set_stream_handler(PROTOCOL_ID, _echo_stream_handler)
|
||||
|
||||
# Get all available addresses with peer ID
|
||||
all_addrs = host.get_addrs()
|
||||
|
||||
print("Listener ready, listening on:")
|
||||
for addr in all_addrs:
|
||||
print(f"{addr}")
|
||||
|
||||
# Use optimal address for the client command
|
||||
optimal_tcp = get_optimal_binding_address(port)
|
||||
optimal_quic_str = str(optimal_tcp).replace("/tcp/", "/udp/") + "/quic"
|
||||
peer_id = host.get_id().to_string()
|
||||
optimal_quic_with_peer = f"{optimal_quic_str}/p2p/{peer_id}"
|
||||
print(
|
||||
f"\nRun this from the same folder in another console:\n\n"
|
||||
f"python3 ./examples/echo/echo_quic.py -d {optimal_quic_with_peer}\n"
|
||||
)
|
||||
print("Waiting for incoming QUIC connections...")
|
||||
await trio.sleep_forever()
|
||||
except KeyboardInterrupt:
|
||||
print("Closing server gracefully...")
|
||||
await host.close()
|
||||
return
|
||||
|
||||
|
||||
async def run_client(destination: str, seed: int | None = None) -> None:
|
||||
"""Run echo client with QUIC transport."""
|
||||
if seed:
|
||||
import random
|
||||
|
||||
random.seed(seed)
|
||||
secret_number = random.getrandbits(32 * 8)
|
||||
secret = secret_number.to_bytes(length=32, byteorder="big")
|
||||
else:
|
||||
import secrets
|
||||
|
||||
secret = secrets.token_bytes(32)
|
||||
|
||||
# Create host with QUIC transport
|
||||
host = new_host(
|
||||
enable_quic=True,
|
||||
key_pair=create_new_key_pair(secret),
|
||||
)
|
||||
|
||||
# Client mode: NO listener, just connect
|
||||
async with host.run(listen_addrs=[]): # Empty listen_addrs for client
|
||||
print(f"I am {host.get_id().to_string()}")
|
||||
|
||||
maddr = Multiaddr(destination)
|
||||
info = info_from_p2p_addr(maddr)
|
||||
|
||||
# Connect to server
|
||||
print("STARTING CLIENT CONNECTION PROCESS")
|
||||
await host.connect(info)
|
||||
print("CLIENT CONNECTED TO SERVER")
|
||||
|
||||
# Start a stream with the destination
|
||||
stream = await host.new_stream(info.peer_id, [PROTOCOL_ID])
|
||||
|
||||
msg = b"hi, there!\n"
|
||||
|
||||
await stream.write(msg)
|
||||
response = await stream.read()
|
||||
|
||||
print(f"Sent: {msg.decode('utf-8')}")
|
||||
print(f"Got: {response.decode('utf-8')}")
|
||||
await stream.close()
|
||||
await host.disconnect(info.peer_id)
|
||||
|
||||
|
||||
async def run(port: int, destination: str, seed: int | None = None) -> None:
|
||||
"""
|
||||
Run echo server or client with QUIC transport.
|
||||
|
||||
Fixed version that properly separates client and server modes.
|
||||
"""
|
||||
if not destination: # Server mode
|
||||
await run_server(port, seed)
|
||||
else: # Client mode
|
||||
await run_client(destination, seed)
|
||||
|
||||
|
||||
def main() -> None:
|
||||
"""Main function - help text updated for QUIC."""
|
||||
description = """
|
||||
This program demonstrates a simple echo protocol using QUIC
|
||||
transport where a peer listens for connections and copies back
|
||||
any input received on a stream.
|
||||
|
||||
QUIC provides built-in TLS security and stream multiplexing over UDP.
|
||||
|
||||
To use it, first run 'echo-quic-demo -p <PORT>', where <PORT> is
|
||||
the UDP port number. Then, run another host with ,
|
||||
'echo-quic-demo -d <DESTINATION>'
|
||||
where <DESTINATION> is the QUIC multiaddress of the previous listener host.
|
||||
"""
|
||||
|
||||
example_maddr = "/ip4/[HOST_IP]/udp/8000/quic/p2p/QmQn4SwGkDZKkUEpBRBv"
|
||||
|
||||
parser = argparse.ArgumentParser(description=description)
|
||||
parser.add_argument("-p", "--port", default=0, type=int, help="UDP port number")
|
||||
parser.add_argument(
|
||||
"-d",
|
||||
"--destination",
|
||||
type=str,
|
||||
help=f"destination multiaddr string, e.g. {example_maddr}",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-s",
|
||||
"--seed",
|
||||
type=int,
|
||||
help="provide a seed to the random number generator",
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
try:
|
||||
trio.run(run, args.port, args.destination, args.seed)
|
||||
except KeyboardInterrupt:
|
||||
pass
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@ -20,11 +20,6 @@ from libp2p.peer.peerinfo import (
|
||||
info_from_p2p_addr,
|
||||
)
|
||||
|
||||
# Configure minimal logging
|
||||
logging.basicConfig(level=logging.WARNING)
|
||||
logging.getLogger("multiaddr").setLevel(logging.WARNING)
|
||||
logging.getLogger("libp2p").setLevel(logging.WARNING)
|
||||
|
||||
logger = logging.getLogger("libp2p.identity.identify-example")
|
||||
|
||||
|
||||
@ -63,19 +58,11 @@ def print_identify_response(identify_response: Identify):
|
||||
|
||||
|
||||
async def run(port: int, destination: str, use_varint_format: bool = True) -> None:
|
||||
from libp2p.utils.address_validation import (
|
||||
get_available_interfaces,
|
||||
get_optimal_binding_address,
|
||||
)
|
||||
localhost_ip = "0.0.0.0"
|
||||
|
||||
if not destination:
|
||||
# Create first host (listener)
|
||||
if port <= 0:
|
||||
from libp2p.utils.address_validation import find_free_port
|
||||
|
||||
port = find_free_port()
|
||||
|
||||
listen_addrs = get_available_interfaces(port)
|
||||
listen_addr = multiaddr.Multiaddr(f"/ip4/{localhost_ip}/tcp/{port}")
|
||||
host_a = new_host()
|
||||
|
||||
# Set up identify handler with specified format
|
||||
@ -86,49 +73,25 @@ async def run(port: int, destination: str, use_varint_format: bool = True) -> No
|
||||
host_a.set_stream_handler(IDENTIFY_PROTOCOL_ID, identify_handler)
|
||||
|
||||
async with (
|
||||
host_a.run(listen_addrs=listen_addrs),
|
||||
host_a.run(listen_addrs=[listen_addr]),
|
||||
trio.open_nursery() as nursery,
|
||||
):
|
||||
# Start the peer-store cleanup task
|
||||
nursery.start_soon(host_a.get_peerstore().start_cleanup_task, 60)
|
||||
|
||||
# Get all available addresses with peer ID
|
||||
all_addrs = host_a.get_addrs()
|
||||
# Get the actual address and replace 0.0.0.0 with 127.0.0.1 for client
|
||||
# connections
|
||||
server_addr = str(host_a.get_addrs()[0])
|
||||
client_addr = server_addr.replace("/ip4/0.0.0.0/", "/ip4/127.0.0.1/")
|
||||
|
||||
if use_varint_format:
|
||||
format_name = "length-prefixed"
|
||||
print(f"First host listening (using {format_name} format).")
|
||||
print("Listener ready, listening on:\n")
|
||||
for addr in all_addrs:
|
||||
print(f"{addr}")
|
||||
|
||||
# Use optimal address for the client command
|
||||
optimal_addr = get_optimal_binding_address(port)
|
||||
optimal_addr_with_peer = (
|
||||
f"{optimal_addr}/p2p/{host_a.get_id().to_string()}"
|
||||
)
|
||||
print(
|
||||
f"\nRun this from the same folder in another console:\n\n"
|
||||
f"identify-demo -d {optimal_addr_with_peer}\n"
|
||||
)
|
||||
print("Waiting for incoming identify request...")
|
||||
else:
|
||||
format_name = "raw protobuf"
|
||||
print(f"First host listening (using {format_name} format).")
|
||||
print("Listener ready, listening on:\n")
|
||||
for addr in all_addrs:
|
||||
print(f"{addr}")
|
||||
|
||||
# Use optimal address for the client command
|
||||
optimal_addr = get_optimal_binding_address(port)
|
||||
optimal_addr_with_peer = (
|
||||
f"{optimal_addr}/p2p/{host_a.get_id().to_string()}"
|
||||
)
|
||||
print(
|
||||
f"\nRun this from the same folder in another console:\n\n"
|
||||
f"identify-demo -d {optimal_addr_with_peer}\n"
|
||||
)
|
||||
print("Waiting for incoming identify request...")
|
||||
format_name = "length-prefixed" if use_varint_format else "raw protobuf"
|
||||
format_flag = "--raw-format" if not use_varint_format else ""
|
||||
print(
|
||||
f"First host listening (using {format_name} format). "
|
||||
f"Run this from another console:\n\n"
|
||||
f"identify-demo {format_flag} -d {client_addr}\n"
|
||||
)
|
||||
print("Waiting for incoming identify request...")
|
||||
|
||||
# Add a custom handler to show connection events
|
||||
async def custom_identify_handler(stream):
|
||||
@ -171,20 +134,11 @@ async def run(port: int, destination: str, use_varint_format: bool = True) -> No
|
||||
|
||||
else:
|
||||
# Create second host (dialer)
|
||||
from libp2p.utils.address_validation import (
|
||||
find_free_port,
|
||||
get_available_interfaces,
|
||||
get_optimal_binding_address,
|
||||
)
|
||||
|
||||
if port <= 0:
|
||||
port = find_free_port()
|
||||
|
||||
listen_addrs = get_available_interfaces(port)
|
||||
listen_addr = multiaddr.Multiaddr(f"/ip4/{localhost_ip}/tcp/{port}")
|
||||
host_b = new_host()
|
||||
|
||||
async with (
|
||||
host_b.run(listen_addrs=listen_addrs),
|
||||
host_b.run(listen_addrs=[listen_addr]),
|
||||
trio.open_nursery() as nursery,
|
||||
):
|
||||
# Start the peer-store cleanup task
|
||||
@ -280,7 +234,7 @@ def main() -> None:
|
||||
"""
|
||||
|
||||
example_maddr = (
|
||||
"/ip4/[HOST_IP]/tcp/8888/p2p/QmQn4SwGkDZKkUEpBRBvTmheQycxAHJUNmVEnjA2v1qe8Q"
|
||||
"/ip4/127.0.0.1/tcp/8888/p2p/QmQn4SwGkDZKkUEpBRBvTmheQycxAHJUNmVEnjA2v1qe8Q"
|
||||
)
|
||||
|
||||
parser = argparse.ArgumentParser(description=description)
|
||||
@ -304,7 +258,7 @@ def main() -> None:
|
||||
|
||||
# Determine format: use varint (length-prefixed) if --raw-format is specified,
|
||||
# otherwise use raw protobuf format (old format)
|
||||
use_varint_format = not args.raw_format
|
||||
use_varint_format = args.raw_format
|
||||
|
||||
try:
|
||||
if args.destination:
|
||||
|
||||
@ -36,9 +36,6 @@ from libp2p.identity.identify_push import (
|
||||
from libp2p.peer.peerinfo import (
|
||||
info_from_p2p_addr,
|
||||
)
|
||||
from libp2p.utils.address_validation import (
|
||||
get_available_interfaces,
|
||||
)
|
||||
|
||||
# Configure logging
|
||||
logger = logging.getLogger(__name__)
|
||||
@ -210,13 +207,13 @@ async def main() -> None:
|
||||
ID_PUSH, create_custom_identify_push_handler(host_2, "Host 2")
|
||||
)
|
||||
|
||||
# Start listening on available interfaces using random ports
|
||||
listen_addrs_1 = get_available_interfaces(0) # 0 for random port
|
||||
listen_addrs_2 = get_available_interfaces(0) # 0 for random port
|
||||
# Start listening on random ports using the run context manager
|
||||
listen_addr_1 = multiaddr.Multiaddr("/ip4/127.0.0.1/tcp/0")
|
||||
listen_addr_2 = multiaddr.Multiaddr("/ip4/127.0.0.1/tcp/0")
|
||||
|
||||
async with (
|
||||
host_1.run(listen_addrs_1),
|
||||
host_2.run(listen_addrs_2),
|
||||
host_1.run([listen_addr_1]),
|
||||
host_2.run([listen_addr_2]),
|
||||
trio.open_nursery() as nursery,
|
||||
):
|
||||
# Start the peer-store cleanup task
|
||||
|
||||
@ -14,7 +14,7 @@ Usage:
|
||||
python identify_push_listener_dialer.py
|
||||
|
||||
# Then in another console, run as a dialer (default port 8889):
|
||||
python identify_push_listener_dialer.py -d /ip4/[HOST_IP]/tcp/8888/p2p/PEER_ID
|
||||
python identify_push_listener_dialer.py -d /ip4/127.0.0.1/tcp/8888/p2p/PEER_ID
|
||||
(where PEER_ID is the peer ID displayed by the listener)
|
||||
"""
|
||||
|
||||
@ -56,11 +56,6 @@ from libp2p.peer.peerinfo import (
|
||||
info_from_p2p_addr,
|
||||
)
|
||||
|
||||
# Configure minimal logging
|
||||
logging.basicConfig(level=logging.WARNING)
|
||||
logging.getLogger("multiaddr").setLevel(logging.WARNING)
|
||||
logging.getLogger("libp2p").setLevel(logging.WARNING)
|
||||
|
||||
# Configure logging
|
||||
logger = logging.getLogger("libp2p.identity.identify-push-example")
|
||||
|
||||
@ -199,11 +194,6 @@ async def run_listener(
|
||||
port: int, use_varint_format: bool = True, raw_format_flag: bool = False
|
||||
) -> None:
|
||||
"""Run a host in listener mode."""
|
||||
from libp2p.utils.address_validation import find_free_port, get_available_interfaces
|
||||
|
||||
if port <= 0:
|
||||
port = find_free_port()
|
||||
|
||||
format_name = "length-prefixed" if use_varint_format else "raw protobuf"
|
||||
print(
|
||||
f"\n==== Starting Identify-Push Listener on port {port} "
|
||||
@ -225,33 +215,26 @@ async def run_listener(
|
||||
custom_identify_push_handler_for(host, use_varint_format=use_varint_format),
|
||||
)
|
||||
|
||||
# Start listening on all available interfaces
|
||||
listen_addrs = get_available_interfaces(port)
|
||||
# Start listening
|
||||
listen_addr = multiaddr.Multiaddr(f"/ip4/0.0.0.0/tcp/{port}")
|
||||
|
||||
try:
|
||||
async with host.run(listen_addrs):
|
||||
all_addrs = host.get_addrs()
|
||||
async with host.run([listen_addr]):
|
||||
addr = host.get_addrs()[0]
|
||||
logger.info("Listener host ready!")
|
||||
print("Listener host ready!")
|
||||
|
||||
logger.info("Listener ready, listening on:")
|
||||
print("Listener ready, listening on:")
|
||||
for addr in all_addrs:
|
||||
logger.info(f"{addr}")
|
||||
print(f"{addr}")
|
||||
logger.info(f"Listening on: {addr}")
|
||||
print(f"Listening on: {addr}")
|
||||
|
||||
logger.info(f"Peer ID: {host.get_id().pretty()}")
|
||||
print(f"Peer ID: {host.get_id().pretty()}")
|
||||
|
||||
# Use the first address as the default for the dialer command
|
||||
default_addr = all_addrs[0]
|
||||
print("\nRun this from the same folder in another console:")
|
||||
print("\nRun dialer with command:")
|
||||
if raw_format_flag:
|
||||
print(
|
||||
f"identify-push-listener-dialer-demo -d {default_addr} --raw-format"
|
||||
)
|
||||
print(f"identify-push-listener-dialer-demo -d {addr} --raw-format")
|
||||
else:
|
||||
print(f"identify-push-listener-dialer-demo -d {default_addr}")
|
||||
print(f"identify-push-listener-dialer-demo -d {addr}")
|
||||
print("\nWaiting for incoming identify/push requests... (Ctrl+C to exit)")
|
||||
|
||||
# Keep running until interrupted
|
||||
@ -291,12 +274,10 @@ async def run_dialer(
|
||||
identify_push_handler_for(host, use_varint_format=use_varint_format),
|
||||
)
|
||||
|
||||
# Start listening on available interfaces
|
||||
from libp2p.utils.address_validation import get_available_interfaces
|
||||
# Start listening on a different port
|
||||
listen_addr = multiaddr.Multiaddr(f"/ip4/0.0.0.0/tcp/{port}")
|
||||
|
||||
listen_addrs = get_available_interfaces(port)
|
||||
|
||||
async with host.run(listen_addrs):
|
||||
async with host.run([listen_addr]):
|
||||
logger.info("Dialer host ready!")
|
||||
print("Dialer host ready!")
|
||||
|
||||
|
||||
@ -41,7 +41,6 @@ from libp2p.tools.async_service import (
|
||||
from libp2p.tools.utils import (
|
||||
info_from_p2p_addr,
|
||||
)
|
||||
from libp2p.utils.paths import get_script_dir, join_paths
|
||||
|
||||
# Configure logging
|
||||
logging.basicConfig(
|
||||
@ -54,8 +53,8 @@ logger = logging.getLogger("kademlia-example")
|
||||
# Configure DHT module loggers to inherit from the parent logger
|
||||
# This ensures all kademlia-example.* loggers use the same configuration
|
||||
# Get the directory where this script is located
|
||||
SCRIPT_DIR = get_script_dir(__file__)
|
||||
SERVER_ADDR_LOG = join_paths(SCRIPT_DIR, "server_node_addr.txt")
|
||||
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
|
||||
SERVER_ADDR_LOG = os.path.join(SCRIPT_DIR, "server_node_addr.txt")
|
||||
|
||||
# Set the level for all child loggers
|
||||
for module in [
|
||||
@ -150,43 +149,26 @@ async def run_node(
|
||||
|
||||
key_pair = create_new_key_pair(secrets.token_bytes(32))
|
||||
host = new_host(key_pair=key_pair)
|
||||
listen_addr = Multiaddr(f"/ip4/127.0.0.1/tcp/{port}")
|
||||
|
||||
from libp2p.utils.address_validation import (
|
||||
get_available_interfaces,
|
||||
get_optimal_binding_address,
|
||||
)
|
||||
|
||||
listen_addrs = get_available_interfaces(port)
|
||||
|
||||
async with host.run(listen_addrs=listen_addrs), trio.open_nursery() as nursery:
|
||||
async with host.run(listen_addrs=[listen_addr]), trio.open_nursery() as nursery:
|
||||
# Start the peer-store cleanup task
|
||||
nursery.start_soon(host.get_peerstore().start_cleanup_task, 60)
|
||||
|
||||
peer_id = host.get_id().pretty()
|
||||
|
||||
# Get all available addresses with peer ID
|
||||
all_addrs = host.get_addrs()
|
||||
|
||||
logger.info("Listener ready, listening on:")
|
||||
for addr in all_addrs:
|
||||
logger.info(f"{addr}")
|
||||
|
||||
# Use optimal address for the bootstrap command
|
||||
optimal_addr = get_optimal_binding_address(port)
|
||||
optimal_addr_with_peer = f"{optimal_addr}/p2p/{host.get_id().to_string()}"
|
||||
bootstrap_cmd = f"--bootstrap {optimal_addr_with_peer}"
|
||||
logger.info("To connect to this node, use: %s", bootstrap_cmd)
|
||||
|
||||
addr_str = f"/ip4/127.0.0.1/tcp/{port}/p2p/{peer_id}"
|
||||
await connect_to_bootstrap_nodes(host, bootstrap_nodes)
|
||||
dht = KadDHT(host, dht_mode)
|
||||
# take all peer ids from the host and add them to the dht
|
||||
for peer_id in host.get_peerstore().peer_ids():
|
||||
await dht.routing_table.add_peer(peer_id)
|
||||
logger.info(f"Connected to bootstrap nodes: {host.get_connected_peers()}")
|
||||
bootstrap_cmd = f"--bootstrap {addr_str}"
|
||||
logger.info("To connect to this node, use: %s", bootstrap_cmd)
|
||||
|
||||
# Save server address in server mode
|
||||
if dht_mode == DHTMode.SERVER:
|
||||
save_server_addr(str(optimal_addr_with_peer))
|
||||
save_server_addr(addr_str)
|
||||
|
||||
# Start the DHT service
|
||||
async with background_trio_service(dht):
|
||||
@ -245,7 +227,7 @@ async def run_node(
|
||||
|
||||
# Keep the node running
|
||||
while True:
|
||||
logger.info(
|
||||
logger.debug(
|
||||
"Status - Connected peers: %d,"
|
||||
"Peers in store: %d, Values in store: %d",
|
||||
len(dht.host.get_connected_peers()),
|
||||
|
||||
@ -2,6 +2,7 @@ import argparse
|
||||
import logging
|
||||
import secrets
|
||||
|
||||
import multiaddr
|
||||
import trio
|
||||
|
||||
from libp2p import (
|
||||
@ -13,11 +14,6 @@ from libp2p.crypto.secp256k1 import (
|
||||
)
|
||||
from libp2p.discovery.events.peerDiscovery import peerDiscovery
|
||||
|
||||
# Configure minimal logging
|
||||
logging.basicConfig(level=logging.WARNING)
|
||||
logging.getLogger("multiaddr").setLevel(logging.WARNING)
|
||||
logging.getLogger("libp2p").setLevel(logging.WARNING)
|
||||
|
||||
logger = logging.getLogger("libp2p.discovery.mdns")
|
||||
logger.setLevel(logging.INFO)
|
||||
handler = logging.StreamHandler()
|
||||
@ -26,43 +22,34 @@ handler.setFormatter(
|
||||
)
|
||||
logger.addHandler(handler)
|
||||
|
||||
# Set root logger to DEBUG to capture all logs from dependencies
|
||||
logging.getLogger().setLevel(logging.DEBUG)
|
||||
|
||||
|
||||
def onPeerDiscovery(peerinfo: PeerInfo):
|
||||
logger.info(f"Discovered: {peerinfo.peer_id}")
|
||||
|
||||
|
||||
async def run(port: int) -> None:
|
||||
from libp2p.utils.address_validation import find_free_port, get_available_interfaces
|
||||
|
||||
if port <= 0:
|
||||
port = find_free_port()
|
||||
|
||||
secret = secrets.token_bytes(32)
|
||||
key_pair = create_new_key_pair(secret)
|
||||
listen_addrs = get_available_interfaces(port)
|
||||
listen_addr = multiaddr.Multiaddr(f"/ip4/0.0.0.0/tcp/{port}")
|
||||
|
||||
peerDiscovery.register_peer_discovered_handler(onPeerDiscovery)
|
||||
|
||||
print(
|
||||
"Run this from the same folder in another console to "
|
||||
"start another peer on a different port:\n\n"
|
||||
"mdns-demo -p <ANOTHER_PORT>\n"
|
||||
)
|
||||
print("Waiting for mDNS peer discovery events...\n")
|
||||
|
||||
logger.info("Starting peer Discovery")
|
||||
host = new_host(key_pair=key_pair, enable_mDNS=True)
|
||||
async with host.run(listen_addrs=listen_addrs), trio.open_nursery() as nursery:
|
||||
async with host.run(listen_addrs=[listen_addr]), trio.open_nursery() as nursery:
|
||||
# Start the peer-store cleanup task
|
||||
nursery.start_soon(host.get_peerstore().start_cleanup_task, 60)
|
||||
|
||||
# Get all available addresses with peer ID
|
||||
all_addrs = host.get_addrs()
|
||||
|
||||
print("Listener ready, listening on:")
|
||||
for addr in all_addrs:
|
||||
print(f"{addr}")
|
||||
|
||||
print(
|
||||
"\nRun this from the same folder in another console to "
|
||||
"start another peer on a different port:\n\n"
|
||||
"mdns-demo -p <ANOTHER_PORT>\n"
|
||||
)
|
||||
print("Waiting for mDNS peer discovery events...\n")
|
||||
|
||||
await trio.sleep_forever()
|
||||
|
||||
|
||||
|
||||
@ -1,5 +1,4 @@
|
||||
import argparse
|
||||
import logging
|
||||
|
||||
import multiaddr
|
||||
import trio
|
||||
@ -17,11 +16,6 @@ from libp2p.peer.peerinfo import (
|
||||
info_from_p2p_addr,
|
||||
)
|
||||
|
||||
# Configure minimal logging
|
||||
logging.basicConfig(level=logging.WARNING)
|
||||
logging.getLogger("multiaddr").setLevel(logging.WARNING)
|
||||
logging.getLogger("libp2p").setLevel(logging.WARNING)
|
||||
|
||||
PING_PROTOCOL_ID = TProtocol("/ipfs/ping/1.0.0")
|
||||
PING_LENGTH = 32
|
||||
RESP_TIMEOUT = 60
|
||||
@ -61,38 +55,20 @@ async def send_ping(stream: INetStream) -> None:
|
||||
|
||||
|
||||
async def run(port: int, destination: str) -> None:
|
||||
from libp2p.utils.address_validation import (
|
||||
find_free_port,
|
||||
get_available_interfaces,
|
||||
get_optimal_binding_address,
|
||||
)
|
||||
listen_addr = multiaddr.Multiaddr(f"/ip4/0.0.0.0/tcp/{port}")
|
||||
host = new_host(listen_addrs=[listen_addr])
|
||||
|
||||
if port <= 0:
|
||||
port = find_free_port()
|
||||
|
||||
listen_addrs = get_available_interfaces(port)
|
||||
host = new_host(listen_addrs=listen_addrs)
|
||||
|
||||
async with host.run(listen_addrs=listen_addrs), trio.open_nursery() as nursery:
|
||||
async with host.run(listen_addrs=[listen_addr]), trio.open_nursery() as nursery:
|
||||
# Start the peer-store cleanup task
|
||||
nursery.start_soon(host.get_peerstore().start_cleanup_task, 60)
|
||||
|
||||
if not destination:
|
||||
host.set_stream_handler(PING_PROTOCOL_ID, handle_ping)
|
||||
|
||||
# Get all available addresses with peer ID
|
||||
all_addrs = host.get_addrs()
|
||||
|
||||
print("Listener ready, listening on:\n")
|
||||
for addr in all_addrs:
|
||||
print(f"{addr}")
|
||||
|
||||
# Use optimal address for the client command
|
||||
optimal_addr = get_optimal_binding_address(port)
|
||||
optimal_addr_with_peer = f"{optimal_addr}/p2p/{host.get_id().to_string()}"
|
||||
print(
|
||||
f"\nRun this from the same folder in another console:\n\n"
|
||||
f"ping-demo -d {optimal_addr_with_peer}\n"
|
||||
"Run this from the same folder in another console:\n\n"
|
||||
f"ping-demo "
|
||||
f"-d {host.get_addrs()[0]}\n"
|
||||
)
|
||||
print("Waiting for incoming connection...")
|
||||
|
||||
@ -118,7 +94,7 @@ def main() -> None:
|
||||
"""
|
||||
|
||||
example_maddr = (
|
||||
"/ip4/[HOST_IP]/tcp/8000/p2p/QmQn4SwGkDZKkUEpBRBvTmheQycxAHJUNmVEnjA2v1qe8Q"
|
||||
"/ip4/127.0.0.1/tcp/8000/p2p/QmQn4SwGkDZKkUEpBRBvTmheQycxAHJUNmVEnjA2v1qe8Q"
|
||||
)
|
||||
|
||||
parser = argparse.ArgumentParser(description=description)
|
||||
|
||||
@ -1,5 +1,6 @@
|
||||
import argparse
|
||||
import logging
|
||||
import socket
|
||||
|
||||
import base58
|
||||
import multiaddr
|
||||
@ -30,9 +31,6 @@ from libp2p.stream_muxer.mplex.mplex import (
|
||||
from libp2p.tools.async_service.trio_service import (
|
||||
background_trio_service,
|
||||
)
|
||||
from libp2p.utils.address_validation import (
|
||||
find_free_port,
|
||||
)
|
||||
|
||||
# Configure logging
|
||||
logging.basicConfig(
|
||||
@ -79,6 +77,13 @@ async def publish_loop(pubsub, topic, termination_event):
|
||||
await trio.sleep(1) # Avoid tight loop on error
|
||||
|
||||
|
||||
def find_free_port():
|
||||
"""Find a free port on localhost."""
|
||||
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
|
||||
s.bind(("", 0)) # Bind to a free port provided by the OS
|
||||
return s.getsockname()[1]
|
||||
|
||||
|
||||
async def monitor_peer_topics(pubsub, nursery, termination_event):
|
||||
"""
|
||||
Monitor for new topics that peers are subscribed to and
|
||||
@ -102,16 +107,14 @@ async def monitor_peer_topics(pubsub, nursery, termination_event):
|
||||
|
||||
|
||||
async def run(topic: str, destination: str | None, port: int | None) -> None:
|
||||
from libp2p.utils.address_validation import (
|
||||
get_available_interfaces,
|
||||
get_optimal_binding_address,
|
||||
)
|
||||
# Initialize network settings
|
||||
localhost_ip = "127.0.0.1"
|
||||
|
||||
if port is None or port == 0:
|
||||
port = find_free_port()
|
||||
logger.info(f"Using random available port: {port}")
|
||||
|
||||
listen_addrs = get_available_interfaces(port)
|
||||
listen_addr = multiaddr.Multiaddr(f"/ip4/0.0.0.0/tcp/{port}")
|
||||
|
||||
# Create a new libp2p host
|
||||
host = new_host(
|
||||
@ -140,11 +143,12 @@ async def run(topic: str, destination: str | None, port: int | None) -> None:
|
||||
|
||||
pubsub = Pubsub(host, gossipsub)
|
||||
termination_event = trio.Event() # Event to signal termination
|
||||
async with host.run(listen_addrs=listen_addrs), trio.open_nursery() as nursery:
|
||||
async with host.run(listen_addrs=[listen_addr]), trio.open_nursery() as nursery:
|
||||
# Start the peer-store cleanup task
|
||||
nursery.start_soon(host.get_peerstore().start_cleanup_task, 60)
|
||||
|
||||
logger.info(f"Node started with peer ID: {host.get_id()}")
|
||||
logger.info(f"Listening on: {listen_addr}")
|
||||
logger.info("Initializing PubSub and GossipSub...")
|
||||
async with background_trio_service(pubsub):
|
||||
async with background_trio_service(gossipsub):
|
||||
@ -158,21 +162,10 @@ async def run(topic: str, destination: str | None, port: int | None) -> None:
|
||||
|
||||
if not destination:
|
||||
# Server mode
|
||||
# Get all available addresses with peer ID
|
||||
all_addrs = host.get_addrs()
|
||||
|
||||
logger.info("Listener ready, listening on:")
|
||||
for addr in all_addrs:
|
||||
logger.info(f"{addr}")
|
||||
|
||||
# Use optimal address for the client command
|
||||
optimal_addr = get_optimal_binding_address(port)
|
||||
optimal_addr_with_peer = (
|
||||
f"{optimal_addr}/p2p/{host.get_id().to_string()}"
|
||||
)
|
||||
logger.info(
|
||||
f"\nRun this from the same folder in another console:\n\n"
|
||||
f"pubsub-demo -d {optimal_addr_with_peer}\n"
|
||||
"Run this script in another console with:\n"
|
||||
f"pubsub-demo "
|
||||
f"-d /ip4/{localhost_ip}/tcp/{port}/p2p/{host.get_id()}\n"
|
||||
)
|
||||
logger.info("Waiting for peers...")
|
||||
|
||||
@ -194,6 +187,11 @@ async def run(topic: str, destination: str | None, port: int | None) -> None:
|
||||
f"Connecting to peer: {info.peer_id} "
|
||||
f"using protocols: {protocols_in_maddr}"
|
||||
)
|
||||
logger.info(
|
||||
"Run this script in another console with:\n"
|
||||
f"pubsub-demo "
|
||||
f"-d /ip4/{localhost_ip}/tcp/{port}/p2p/{host.get_id()}\n"
|
||||
)
|
||||
try:
|
||||
await host.connect(info)
|
||||
logger.info(f"Connected to peer: {info.peer_id}")
|
||||
|
||||
@ -1,228 +0,0 @@
|
||||
"""
|
||||
Random Walk Example for py-libp2p Kademlia DHT
|
||||
|
||||
This example demonstrates the Random Walk module's peer discovery capabilities
|
||||
using real libp2p hosts and Kademlia DHT. It shows how the Random Walk module
|
||||
automatically discovers new peers and maintains routing table health.
|
||||
|
||||
Usage:
|
||||
# Start server nodes (they will discover peers via random walk)
|
||||
python3 random_walk.py --mode server
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import logging
|
||||
import random
|
||||
import secrets
|
||||
import sys
|
||||
|
||||
import trio
|
||||
|
||||
from libp2p import new_host
|
||||
from libp2p.abc import IHost
|
||||
from libp2p.crypto.secp256k1 import create_new_key_pair
|
||||
from libp2p.kad_dht.kad_dht import DHTMode, KadDHT
|
||||
from libp2p.tools.async_service import background_trio_service
|
||||
|
||||
|
||||
# Simple logging configuration
|
||||
def setup_logging(verbose: bool = False):
|
||||
"""Setup unified logging configuration."""
|
||||
level = logging.DEBUG if verbose else logging.INFO
|
||||
logging.basicConfig(
|
||||
level=level,
|
||||
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
|
||||
handlers=[logging.StreamHandler()],
|
||||
)
|
||||
|
||||
# Configure key module loggers
|
||||
for module in ["libp2p.discovery.random_walk", "libp2p.kad_dht"]:
|
||||
logging.getLogger(module).setLevel(level)
|
||||
|
||||
# Suppress noisy logs
|
||||
logging.getLogger("multiaddr").setLevel(logging.WARNING)
|
||||
|
||||
|
||||
logger = logging.getLogger("random-walk-example")
|
||||
|
||||
# Default bootstrap nodes
|
||||
DEFAULT_BOOTSTRAP_NODES = [
|
||||
"/ip4/104.131.131.82/tcp/4001/p2p/QmaCpDMGvV2BGHeYERUEnRQAwe3N8SzbUtfsmvsqQLuvuJ"
|
||||
]
|
||||
|
||||
|
||||
def filter_compatible_peer_info(peer_info) -> bool:
|
||||
"""Filter peer info to check if it has compatible addresses (TCP + IPv4)."""
|
||||
if not hasattr(peer_info, "addrs") or not peer_info.addrs:
|
||||
return False
|
||||
|
||||
for addr in peer_info.addrs:
|
||||
addr_str = str(addr)
|
||||
if "/tcp/" in addr_str and "/ip4/" in addr_str and "/quic" not in addr_str:
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
async def maintain_connections(host: IHost) -> None:
|
||||
"""Maintain connections to ensure the host remains connected to healthy peers."""
|
||||
while True:
|
||||
try:
|
||||
connected_peers = host.get_connected_peers()
|
||||
list_peers = host.get_peerstore().peers_with_addrs()
|
||||
|
||||
if len(connected_peers) < 20:
|
||||
logger.debug("Reconnecting to maintain peer connections...")
|
||||
|
||||
# Find compatible peers
|
||||
compatible_peers = []
|
||||
for peer_id in list_peers:
|
||||
try:
|
||||
peer_info = host.get_peerstore().peer_info(peer_id)
|
||||
if filter_compatible_peer_info(peer_info):
|
||||
compatible_peers.append(peer_id)
|
||||
except Exception:
|
||||
continue
|
||||
|
||||
# Connect to random subset of compatible peers
|
||||
if compatible_peers:
|
||||
random_peers = random.sample(
|
||||
compatible_peers, min(50, len(compatible_peers))
|
||||
)
|
||||
for peer_id in random_peers:
|
||||
if peer_id not in connected_peers:
|
||||
try:
|
||||
with trio.move_on_after(5):
|
||||
peer_info = host.get_peerstore().peer_info(peer_id)
|
||||
await host.connect(peer_info)
|
||||
logger.debug(f"Connected to peer: {peer_id}")
|
||||
except Exception as e:
|
||||
logger.debug(f"Failed to connect to {peer_id}: {e}")
|
||||
|
||||
await trio.sleep(15)
|
||||
except Exception as e:
|
||||
logger.error(f"Error maintaining connections: {e}")
|
||||
|
||||
|
||||
async def demonstrate_random_walk_discovery(dht: KadDHT, interval: int = 30) -> None:
|
||||
"""Demonstrate Random Walk peer discovery with periodic statistics."""
|
||||
iteration = 0
|
||||
while True:
|
||||
iteration += 1
|
||||
logger.info(f"--- Iteration {iteration} ---")
|
||||
logger.info(f"Routing table size: {dht.get_routing_table_size()}")
|
||||
logger.info(f"Connected peers: {len(dht.host.get_connected_peers())}")
|
||||
logger.info(f"Peerstore size: {len(dht.host.get_peerstore().peer_ids())}")
|
||||
await trio.sleep(interval)
|
||||
|
||||
|
||||
async def run_node(port: int, mode: str, demo_interval: int = 30) -> None:
|
||||
"""Run a node that demonstrates Random Walk peer discovery."""
|
||||
try:
|
||||
if port <= 0:
|
||||
port = random.randint(10000, 60000)
|
||||
|
||||
logger.info(f"Starting {mode} node on port {port}")
|
||||
|
||||
# Determine DHT mode
|
||||
dht_mode = DHTMode.SERVER if mode == "server" else DHTMode.CLIENT
|
||||
|
||||
# Create host and DHT
|
||||
key_pair = create_new_key_pair(secrets.token_bytes(32))
|
||||
host = new_host(key_pair=key_pair, bootstrap=DEFAULT_BOOTSTRAP_NODES)
|
||||
|
||||
from libp2p.utils.address_validation import get_available_interfaces
|
||||
|
||||
listen_addrs = get_available_interfaces(port)
|
||||
|
||||
async with host.run(listen_addrs=listen_addrs), trio.open_nursery() as nursery:
|
||||
# Start maintenance tasks
|
||||
nursery.start_soon(host.get_peerstore().start_cleanup_task, 60)
|
||||
nursery.start_soon(maintain_connections, host)
|
||||
|
||||
peer_id = host.get_id().pretty()
|
||||
logger.info(f"Node peer ID: {peer_id}")
|
||||
|
||||
# Get all available addresses with peer ID
|
||||
all_addrs = host.get_addrs()
|
||||
logger.info("Listener ready, listening on:")
|
||||
for addr in all_addrs:
|
||||
logger.info(f"{addr}")
|
||||
|
||||
# Create and start DHT with Random Walk enabled
|
||||
dht = KadDHT(host, dht_mode, enable_random_walk=True)
|
||||
logger.info(f"Initial routing table size: {dht.get_routing_table_size()}")
|
||||
|
||||
async with background_trio_service(dht):
|
||||
logger.info(f"DHT service started in {dht_mode.value} mode")
|
||||
logger.info(f"Random Walk enabled: {dht.is_random_walk_enabled()}")
|
||||
|
||||
async with trio.open_nursery() as task_nursery:
|
||||
# Start demonstration and status reporting
|
||||
task_nursery.start_soon(
|
||||
demonstrate_random_walk_discovery, dht, demo_interval
|
||||
)
|
||||
|
||||
# Periodic status updates
|
||||
async def status_reporter():
|
||||
while True:
|
||||
await trio.sleep(30)
|
||||
logger.debug(
|
||||
f"Connected: {len(dht.host.get_connected_peers())}, "
|
||||
f"Routing table: {dht.get_routing_table_size()}, "
|
||||
f"Peerstore: {len(dht.host.get_peerstore().peer_ids())}"
|
||||
)
|
||||
|
||||
task_nursery.start_soon(status_reporter)
|
||||
await trio.sleep_forever()
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Node error: {e}", exc_info=True)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def parse_args():
|
||||
"""Parse command line arguments."""
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Random Walk Example for py-libp2p Kademlia DHT",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--mode",
|
||||
choices=["server", "client"],
|
||||
default="server",
|
||||
help="Node mode: server (DHT server), or client (DHT client)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--port", type=int, default=0, help="Port to listen on (0 for random)"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--demo-interval",
|
||||
type=int,
|
||||
default=30,
|
||||
help="Interval between random walk demonstrations in seconds",
|
||||
)
|
||||
parser.add_argument("--verbose", action="store_true", help="Enable verbose logging")
|
||||
return parser.parse_args()
|
||||
|
||||
|
||||
def main():
|
||||
"""Main entry point for the random walk example."""
|
||||
try:
|
||||
args = parse_args()
|
||||
setup_logging(args.verbose)
|
||||
|
||||
logger.info("=== Random Walk Example for py-libp2p ===")
|
||||
logger.info(
|
||||
f"Mode: {args.mode}, Port: {args.port} Demo interval: {args.demo_interval}s"
|
||||
)
|
||||
|
||||
trio.run(run_node, args.port, args.mode, args.demo_interval)
|
||||
|
||||
except KeyboardInterrupt:
|
||||
logger.info("Received interrupt signal, shutting down...")
|
||||
except Exception as e:
|
||||
logger.critical(f"Example failed: {e}", exc_info=True)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@ -1,446 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
TCP P2P Data Transfer Test
|
||||
|
||||
This test proves that TCP peer-to-peer data transfer works correctly in libp2p.
|
||||
This serves as a baseline to compare with WebSocket tests.
|
||||
"""
|
||||
|
||||
import pytest
|
||||
from multiaddr import Multiaddr
|
||||
import trio
|
||||
|
||||
from libp2p import create_yamux_muxer_option, new_host
|
||||
from libp2p.crypto.secp256k1 import create_new_key_pair
|
||||
from libp2p.custom_types import TProtocol
|
||||
from libp2p.peer.peerinfo import info_from_p2p_addr
|
||||
from libp2p.security.insecure.transport import PLAINTEXT_PROTOCOL_ID, InsecureTransport
|
||||
|
||||
# Test protocol for data exchange
|
||||
TCP_DATA_PROTOCOL = TProtocol("/test/tcp-data-exchange/1.0.0")
|
||||
|
||||
|
||||
async def create_tcp_host_pair():
|
||||
"""Create a pair of hosts configured for TCP communication."""
|
||||
# Create key pairs
|
||||
key_pair_a = create_new_key_pair()
|
||||
key_pair_b = create_new_key_pair()
|
||||
|
||||
# Create security options (using plaintext for simplicity)
|
||||
def security_options(kp):
|
||||
return {
|
||||
PLAINTEXT_PROTOCOL_ID: InsecureTransport(
|
||||
local_key_pair=kp, secure_bytes_provider=None, peerstore=None
|
||||
)
|
||||
}
|
||||
|
||||
# Host A (listener) - TCP transport (default)
|
||||
host_a = new_host(
|
||||
key_pair=key_pair_a,
|
||||
sec_opt=security_options(key_pair_a),
|
||||
muxer_opt=create_yamux_muxer_option(),
|
||||
listen_addrs=[Multiaddr("/ip4/127.0.0.1/tcp/0")],
|
||||
)
|
||||
|
||||
# Host B (dialer) - TCP transport (default)
|
||||
host_b = new_host(
|
||||
key_pair=key_pair_b,
|
||||
sec_opt=security_options(key_pair_b),
|
||||
muxer_opt=create_yamux_muxer_option(),
|
||||
listen_addrs=[Multiaddr("/ip4/127.0.0.1/tcp/0")],
|
||||
)
|
||||
|
||||
return host_a, host_b
|
||||
|
||||
|
||||
@pytest.mark.trio
|
||||
async def test_tcp_basic_connection():
|
||||
"""Test basic TCP connection establishment."""
|
||||
host_a, host_b = await create_tcp_host_pair()
|
||||
|
||||
connection_established = False
|
||||
|
||||
async def connection_handler(stream):
|
||||
nonlocal connection_established
|
||||
connection_established = True
|
||||
await stream.close()
|
||||
|
||||
host_a.set_stream_handler(TCP_DATA_PROTOCOL, connection_handler)
|
||||
|
||||
async with (
|
||||
host_a.run(listen_addrs=[Multiaddr("/ip4/127.0.0.1/tcp/0")]),
|
||||
host_b.run(listen_addrs=[]),
|
||||
):
|
||||
# Get host A's listen address
|
||||
listen_addrs = host_a.get_addrs()
|
||||
assert listen_addrs, "Host A should have listen addresses"
|
||||
|
||||
# Extract TCP address
|
||||
tcp_addr = None
|
||||
for addr in listen_addrs:
|
||||
if "/tcp/" in str(addr) and "/ws" not in str(addr):
|
||||
tcp_addr = addr
|
||||
break
|
||||
|
||||
assert tcp_addr, f"No TCP address found in {listen_addrs}"
|
||||
print(f"🔗 Host A listening on: {tcp_addr}")
|
||||
|
||||
# Create peer info for host A
|
||||
peer_info = info_from_p2p_addr(tcp_addr)
|
||||
|
||||
# Host B connects to host A
|
||||
await host_b.connect(peer_info)
|
||||
print("✅ TCP connection established")
|
||||
|
||||
# Open a stream to test the connection
|
||||
stream = await host_b.new_stream(peer_info.peer_id, [TCP_DATA_PROTOCOL])
|
||||
await stream.close()
|
||||
|
||||
# Wait a bit for the handler to be called
|
||||
await trio.sleep(0.1)
|
||||
|
||||
assert connection_established, "TCP connection handler should have been called"
|
||||
print("✅ TCP basic connection test successful!")
|
||||
|
||||
|
||||
@pytest.mark.trio
|
||||
async def test_tcp_data_transfer():
|
||||
"""Test TCP peer-to-peer data transfer."""
|
||||
host_a, host_b = await create_tcp_host_pair()
|
||||
|
||||
# Test data
|
||||
test_data = b"Hello TCP P2P Data Transfer! This is a test message."
|
||||
received_data = None
|
||||
transfer_complete = trio.Event()
|
||||
|
||||
async def data_handler(stream):
|
||||
nonlocal received_data
|
||||
try:
|
||||
# Read the incoming data
|
||||
received_data = await stream.read(len(test_data))
|
||||
# Echo it back to confirm successful transfer
|
||||
await stream.write(received_data)
|
||||
await stream.close()
|
||||
transfer_complete.set()
|
||||
except Exception as e:
|
||||
print(f"Handler error: {e}")
|
||||
transfer_complete.set()
|
||||
|
||||
host_a.set_stream_handler(TCP_DATA_PROTOCOL, data_handler)
|
||||
|
||||
async with (
|
||||
host_a.run(listen_addrs=[Multiaddr("/ip4/127.0.0.1/tcp/0")]),
|
||||
host_b.run(listen_addrs=[]),
|
||||
):
|
||||
# Get host A's listen address
|
||||
listen_addrs = host_a.get_addrs()
|
||||
assert listen_addrs, "Host A should have listen addresses"
|
||||
|
||||
# Extract TCP address
|
||||
tcp_addr = None
|
||||
for addr in listen_addrs:
|
||||
if "/tcp/" in str(addr) and "/ws" not in str(addr):
|
||||
tcp_addr = addr
|
||||
break
|
||||
|
||||
assert tcp_addr, f"No TCP address found in {listen_addrs}"
|
||||
print(f"🔗 Host A listening on: {tcp_addr}")
|
||||
|
||||
# Create peer info for host A
|
||||
peer_info = info_from_p2p_addr(tcp_addr)
|
||||
|
||||
# Host B connects to host A
|
||||
await host_b.connect(peer_info)
|
||||
print("✅ TCP connection established")
|
||||
|
||||
# Open a stream for data transfer
|
||||
stream = await host_b.new_stream(peer_info.peer_id, [TCP_DATA_PROTOCOL])
|
||||
print("✅ TCP stream opened")
|
||||
|
||||
# Send test data
|
||||
await stream.write(test_data)
|
||||
print(f"📤 Sent data: {test_data}")
|
||||
|
||||
# Read echoed data back
|
||||
echoed_data = await stream.read(len(test_data))
|
||||
print(f"📥 Received echo: {echoed_data}")
|
||||
|
||||
await stream.close()
|
||||
|
||||
# Wait for transfer to complete
|
||||
with trio.fail_after(5.0): # 5 second timeout
|
||||
await transfer_complete.wait()
|
||||
|
||||
# Verify data transfer
|
||||
assert received_data == test_data, (
|
||||
f"Data mismatch: {received_data} != {test_data}"
|
||||
)
|
||||
assert echoed_data == test_data, f"Echo mismatch: {echoed_data} != {test_data}"
|
||||
|
||||
print("✅ TCP P2P data transfer successful!")
|
||||
print(f" Original: {test_data}")
|
||||
print(f" Received: {received_data}")
|
||||
print(f" Echoed: {echoed_data}")
|
||||
|
||||
|
||||
@pytest.mark.trio
|
||||
async def test_tcp_large_data_transfer():
|
||||
"""Test TCP with larger data payloads."""
|
||||
host_a, host_b = await create_tcp_host_pair()
|
||||
|
||||
# Large test data (10KB)
|
||||
test_data = b"TCP Large Data Test! " * 500 # ~10KB
|
||||
received_data = None
|
||||
transfer_complete = trio.Event()
|
||||
|
||||
async def large_data_handler(stream):
|
||||
nonlocal received_data
|
||||
try:
|
||||
# Read data in chunks
|
||||
chunks = []
|
||||
total_received = 0
|
||||
expected_size = len(test_data)
|
||||
|
||||
while total_received < expected_size:
|
||||
chunk = await stream.read(min(1024, expected_size - total_received))
|
||||
if not chunk:
|
||||
break
|
||||
chunks.append(chunk)
|
||||
total_received += len(chunk)
|
||||
|
||||
received_data = b"".join(chunks)
|
||||
|
||||
# Send back confirmation
|
||||
await stream.write(b"RECEIVED_OK")
|
||||
await stream.close()
|
||||
transfer_complete.set()
|
||||
except Exception as e:
|
||||
print(f"Large data handler error: {e}")
|
||||
transfer_complete.set()
|
||||
|
||||
host_a.set_stream_handler(TCP_DATA_PROTOCOL, large_data_handler)
|
||||
|
||||
async with (
|
||||
host_a.run(listen_addrs=[Multiaddr("/ip4/127.0.0.1/tcp/0")]),
|
||||
host_b.run(listen_addrs=[]),
|
||||
):
|
||||
# Get host A's listen address
|
||||
listen_addrs = host_a.get_addrs()
|
||||
assert listen_addrs, "Host A should have listen addresses"
|
||||
|
||||
# Extract TCP address
|
||||
tcp_addr = None
|
||||
for addr in listen_addrs:
|
||||
if "/tcp/" in str(addr) and "/ws" not in str(addr):
|
||||
tcp_addr = addr
|
||||
break
|
||||
|
||||
assert tcp_addr, f"No TCP address found in {listen_addrs}"
|
||||
print(f"🔗 Host A listening on: {tcp_addr}")
|
||||
print(f"📊 Test data size: {len(test_data)} bytes")
|
||||
|
||||
# Create peer info for host A
|
||||
peer_info = info_from_p2p_addr(tcp_addr)
|
||||
|
||||
# Host B connects to host A
|
||||
await host_b.connect(peer_info)
|
||||
print("✅ TCP connection established")
|
||||
|
||||
# Open a stream for data transfer
|
||||
stream = await host_b.new_stream(peer_info.peer_id, [TCP_DATA_PROTOCOL])
|
||||
print("✅ TCP stream opened")
|
||||
|
||||
# Send large test data in chunks
|
||||
chunk_size = 1024
|
||||
sent_bytes = 0
|
||||
for i in range(0, len(test_data), chunk_size):
|
||||
chunk = test_data[i : i + chunk_size]
|
||||
await stream.write(chunk)
|
||||
sent_bytes += len(chunk)
|
||||
if sent_bytes % (chunk_size * 4) == 0: # Progress every 4KB
|
||||
print(f"📤 Sent {sent_bytes}/{len(test_data)} bytes")
|
||||
|
||||
print(f"📤 Sent all {len(test_data)} bytes")
|
||||
|
||||
# Read confirmation
|
||||
confirmation = await stream.read(1024)
|
||||
print(f"📥 Received confirmation: {confirmation}")
|
||||
|
||||
await stream.close()
|
||||
|
||||
# Wait for transfer to complete
|
||||
with trio.fail_after(10.0): # 10 second timeout for large data
|
||||
await transfer_complete.wait()
|
||||
|
||||
# Verify data transfer
|
||||
assert received_data is not None, "No data was received"
|
||||
assert received_data == test_data, (
|
||||
"Large data transfer failed:"
|
||||
+ f" sizes {len(received_data)} != {len(test_data)}"
|
||||
)
|
||||
assert confirmation == b"RECEIVED_OK", f"Confirmation failed: {confirmation}"
|
||||
|
||||
print("✅ TCP large data transfer successful!")
|
||||
print(f" Data size: {len(test_data)} bytes")
|
||||
print(f" Received: {len(received_data)} bytes")
|
||||
print(f" Match: {received_data == test_data}")
|
||||
|
||||
|
||||
@pytest.mark.trio
|
||||
async def test_tcp_bidirectional_transfer():
|
||||
"""Test bidirectional data transfer over TCP."""
|
||||
host_a, host_b = await create_tcp_host_pair()
|
||||
|
||||
# Test data
|
||||
data_a_to_b = b"Message from Host A to Host B via TCP"
|
||||
data_b_to_a = b"Response from Host B to Host A via TCP"
|
||||
|
||||
received_on_a = None
|
||||
received_on_b = None
|
||||
transfer_complete_a = trio.Event()
|
||||
transfer_complete_b = trio.Event()
|
||||
|
||||
async def handler_a(stream):
|
||||
nonlocal received_on_a
|
||||
try:
|
||||
# Read data from B
|
||||
received_on_a = await stream.read(len(data_b_to_a))
|
||||
print(f"🅰️ Host A received: {received_on_a}")
|
||||
await stream.close()
|
||||
transfer_complete_a.set()
|
||||
except Exception as e:
|
||||
print(f"Handler A error: {e}")
|
||||
transfer_complete_a.set()
|
||||
|
||||
async def handler_b(stream):
|
||||
nonlocal received_on_b
|
||||
try:
|
||||
# Read data from A
|
||||
received_on_b = await stream.read(len(data_a_to_b))
|
||||
print(f"🅱️ Host B received: {received_on_b}")
|
||||
await stream.close()
|
||||
transfer_complete_b.set()
|
||||
except Exception as e:
|
||||
print(f"Handler B error: {e}")
|
||||
transfer_complete_b.set()
|
||||
|
||||
# Set up handlers on both hosts
|
||||
protocol_a_to_b = TProtocol("/test/tcp-a-to-b/1.0.0")
|
||||
protocol_b_to_a = TProtocol("/test/tcp-b-to-a/1.0.0")
|
||||
|
||||
host_a.set_stream_handler(protocol_b_to_a, handler_a)
|
||||
host_b.set_stream_handler(protocol_a_to_b, handler_b)
|
||||
|
||||
async with (
|
||||
host_a.run(listen_addrs=[Multiaddr("/ip4/127.0.0.1/tcp/0")]),
|
||||
host_b.run(listen_addrs=[Multiaddr("/ip4/127.0.0.1/tcp/0")]),
|
||||
):
|
||||
# Get addresses
|
||||
addrs_a = host_a.get_addrs()
|
||||
addrs_b = host_b.get_addrs()
|
||||
|
||||
assert addrs_a and addrs_b, "Both hosts should have addresses"
|
||||
|
||||
# Extract TCP addresses
|
||||
tcp_addr_a = next(
|
||||
(
|
||||
addr
|
||||
for addr in addrs_a
|
||||
if "/tcp/" in str(addr) and "/ws" not in str(addr)
|
||||
),
|
||||
None,
|
||||
)
|
||||
tcp_addr_b = next(
|
||||
(
|
||||
addr
|
||||
for addr in addrs_b
|
||||
if "/tcp/" in str(addr) and "/ws" not in str(addr)
|
||||
),
|
||||
None,
|
||||
)
|
||||
|
||||
assert tcp_addr_a and tcp_addr_b, (
|
||||
f"TCP addresses not found: A={addrs_a}, B={addrs_b}"
|
||||
)
|
||||
print(f"🔗 Host A listening on: {tcp_addr_a}")
|
||||
print(f"🔗 Host B listening on: {tcp_addr_b}")
|
||||
|
||||
# Create peer infos
|
||||
peer_info_a = info_from_p2p_addr(tcp_addr_a)
|
||||
peer_info_b = info_from_p2p_addr(tcp_addr_b)
|
||||
|
||||
# Establish connections
|
||||
await host_b.connect(peer_info_a)
|
||||
await host_a.connect(peer_info_b)
|
||||
print("✅ Bidirectional TCP connections established")
|
||||
|
||||
# Send data A -> B
|
||||
stream_a_to_b = await host_a.new_stream(peer_info_b.peer_id, [protocol_a_to_b])
|
||||
await stream_a_to_b.write(data_a_to_b)
|
||||
print(f"📤 A->B: {data_a_to_b}")
|
||||
await stream_a_to_b.close()
|
||||
|
||||
# Send data B -> A
|
||||
stream_b_to_a = await host_b.new_stream(peer_info_a.peer_id, [protocol_b_to_a])
|
||||
await stream_b_to_a.write(data_b_to_a)
|
||||
print(f"📤 B->A: {data_b_to_a}")
|
||||
await stream_b_to_a.close()
|
||||
|
||||
# Wait for both transfers to complete
|
||||
with trio.fail_after(5.0):
|
||||
await transfer_complete_a.wait()
|
||||
await transfer_complete_b.wait()
|
||||
|
||||
# Verify bidirectional transfer
|
||||
assert received_on_a == data_b_to_a, f"A received wrong data: {received_on_a}"
|
||||
assert received_on_b == data_a_to_b, f"B received wrong data: {received_on_b}"
|
||||
|
||||
print("✅ TCP bidirectional data transfer successful!")
|
||||
print(f" A->B: {data_a_to_b}")
|
||||
print(f" B->A: {data_b_to_a}")
|
||||
print(f" ✓ A got: {received_on_a}")
|
||||
print(f" ✓ B got: {received_on_b}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# Run tests directly
|
||||
import logging
|
||||
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
|
||||
print("🧪 Running TCP P2P Data Transfer Tests")
|
||||
print("=" * 50)
|
||||
|
||||
async def run_all_tcp_tests():
|
||||
try:
|
||||
print("\n1. Testing basic TCP connection...")
|
||||
await test_tcp_basic_connection()
|
||||
except Exception as e:
|
||||
print(f"❌ Basic TCP connection test failed: {e}")
|
||||
return
|
||||
|
||||
try:
|
||||
print("\n2. Testing TCP data transfer...")
|
||||
await test_tcp_data_transfer()
|
||||
except Exception as e:
|
||||
print(f"❌ TCP data transfer test failed: {e}")
|
||||
return
|
||||
|
||||
try:
|
||||
print("\n3. Testing TCP large data transfer...")
|
||||
await test_tcp_large_data_transfer()
|
||||
except Exception as e:
|
||||
print(f"❌ TCP large data transfer test failed: {e}")
|
||||
return
|
||||
|
||||
try:
|
||||
print("\n4. Testing TCP bidirectional transfer...")
|
||||
await test_tcp_bidirectional_transfer()
|
||||
except Exception as e:
|
||||
print(f"❌ TCP bidirectional transfer test failed: {e}")
|
||||
return
|
||||
|
||||
print("\n" + "=" * 50)
|
||||
print("🏁 TCP P2P Tests Complete - All Tests PASSED!")
|
||||
|
||||
trio.run(run_all_tcp_tests)
|
||||
@ -1,210 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Demo script showing the new transport integration capabilities in py-libp2p.
|
||||
|
||||
This script demonstrates:
|
||||
1. How to use the transport registry
|
||||
2. How to create transports dynamically based on multiaddrs
|
||||
3. How to register custom transports
|
||||
4. How the new system automatically selects the right transport
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import logging
|
||||
from pathlib import Path
|
||||
import sys
|
||||
|
||||
# Add the libp2p directory to the path so we can import it
|
||||
sys.path.insert(0, str(Path(__file__).parent.parent))
|
||||
|
||||
import multiaddr
|
||||
|
||||
from libp2p.transport import (
|
||||
create_transport,
|
||||
create_transport_for_multiaddr,
|
||||
get_supported_transport_protocols,
|
||||
get_transport_registry,
|
||||
register_transport,
|
||||
)
|
||||
from libp2p.transport.tcp.tcp import TCP
|
||||
from libp2p.transport.upgrader import TransportUpgrader
|
||||
|
||||
# Set up logging
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def demo_transport_registry():
|
||||
"""Demonstrate the transport registry functionality."""
|
||||
print("🔧 Transport Registry Demo")
|
||||
print("=" * 50)
|
||||
|
||||
# Get the global registry
|
||||
registry = get_transport_registry()
|
||||
|
||||
# Show supported protocols
|
||||
supported = get_supported_transport_protocols()
|
||||
print(f"Supported transport protocols: {supported}")
|
||||
|
||||
# Show registered transports
|
||||
print("\nRegistered transports:")
|
||||
for protocol in supported:
|
||||
transport_class = registry.get_transport(protocol)
|
||||
class_name = transport_class.__name__ if transport_class else "None"
|
||||
print(f" {protocol}: {class_name}")
|
||||
|
||||
print()
|
||||
|
||||
|
||||
def demo_transport_factory():
|
||||
"""Demonstrate the transport factory functions."""
|
||||
print("🏭 Transport Factory Demo")
|
||||
print("=" * 50)
|
||||
|
||||
# Create a dummy upgrader for WebSocket transport
|
||||
upgrader = TransportUpgrader({}, {})
|
||||
|
||||
# Create transports using the factory function
|
||||
try:
|
||||
tcp_transport = create_transport("tcp")
|
||||
print(f"✅ Created TCP transport: {type(tcp_transport).__name__}")
|
||||
|
||||
ws_transport = create_transport("ws", upgrader)
|
||||
print(f"✅ Created WebSocket transport: {type(ws_transport).__name__}")
|
||||
|
||||
except Exception as e:
|
||||
print(f"❌ Error creating transport: {e}")
|
||||
|
||||
print()
|
||||
|
||||
|
||||
def demo_multiaddr_transport_selection():
|
||||
"""Demonstrate automatic transport selection based on multiaddrs."""
|
||||
print("🎯 Multiaddr Transport Selection Demo")
|
||||
print("=" * 50)
|
||||
|
||||
# Create a dummy upgrader
|
||||
upgrader = TransportUpgrader({}, {})
|
||||
|
||||
# Test different multiaddr types
|
||||
test_addrs = [
|
||||
"/ip4/127.0.0.1/tcp/8080",
|
||||
"/ip4/127.0.0.1/tcp/8080/ws",
|
||||
"/ip6/::1/tcp/8080/ws",
|
||||
"/dns4/example.com/tcp/443/ws",
|
||||
]
|
||||
|
||||
for addr_str in test_addrs:
|
||||
try:
|
||||
maddr = multiaddr.Multiaddr(addr_str)
|
||||
transport = create_transport_for_multiaddr(maddr, upgrader)
|
||||
|
||||
if transport:
|
||||
print(f"✅ {addr_str} -> {type(transport).__name__}")
|
||||
else:
|
||||
print(f"❌ {addr_str} -> No transport found")
|
||||
|
||||
except Exception as e:
|
||||
print(f"❌ {addr_str} -> Error: {e}")
|
||||
|
||||
print()
|
||||
|
||||
|
||||
def demo_custom_transport_registration():
|
||||
"""Demonstrate how to register custom transports."""
|
||||
print("🔧 Custom Transport Registration Demo")
|
||||
print("=" * 50)
|
||||
|
||||
# Show current supported protocols
|
||||
print(f"Before registration: {get_supported_transport_protocols()}")
|
||||
|
||||
# Register a custom transport (using TCP as an example)
|
||||
class CustomTCPTransport(TCP):
|
||||
"""Custom TCP transport for demonstration."""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.custom_flag = True
|
||||
|
||||
# Register the custom transport
|
||||
register_transport("custom_tcp", CustomTCPTransport)
|
||||
|
||||
# Show updated supported protocols
|
||||
print(f"After registration: {get_supported_transport_protocols()}")
|
||||
|
||||
# Test creating the custom transport
|
||||
try:
|
||||
custom_transport = create_transport("custom_tcp")
|
||||
print(f"✅ Created custom transport: {type(custom_transport).__name__}")
|
||||
# Check if it has the custom flag (type-safe way)
|
||||
if hasattr(custom_transport, "custom_flag"):
|
||||
flag_value = getattr(custom_transport, "custom_flag", "Not found")
|
||||
print(f" Custom flag: {flag_value}")
|
||||
else:
|
||||
print(" Custom flag: Not found")
|
||||
except Exception as e:
|
||||
print(f"❌ Error creating custom transport: {e}")
|
||||
|
||||
print()
|
||||
|
||||
|
||||
def demo_integration_with_libp2p():
|
||||
"""Demonstrate how the new system integrates with libp2p."""
|
||||
print("🚀 Libp2p Integration Demo")
|
||||
print("=" * 50)
|
||||
|
||||
print("The new transport system integrates seamlessly with libp2p:")
|
||||
print()
|
||||
print("1. ✅ Automatic transport selection based on multiaddr")
|
||||
print("2. ✅ Support for WebSocket (/ws) protocol")
|
||||
print("3. ✅ Fallback to TCP for backward compatibility")
|
||||
print("4. ✅ Easy registration of new transport protocols")
|
||||
print("5. ✅ No changes needed to existing libp2p code")
|
||||
print()
|
||||
|
||||
print("Example usage in libp2p:")
|
||||
print(" # This will automatically use WebSocket transport")
|
||||
print(" host = new_host(listen_addrs=['/ip4/127.0.0.1/tcp/8080/ws'])")
|
||||
print()
|
||||
print(" # This will automatically use TCP transport")
|
||||
print(" host = new_host(listen_addrs=['/ip4/127.0.0.1/tcp/8080'])")
|
||||
print()
|
||||
|
||||
print()
|
||||
|
||||
|
||||
async def main():
|
||||
"""Run all demos."""
|
||||
print("🎉 Py-libp2p Transport Integration Demo")
|
||||
print("=" * 60)
|
||||
print()
|
||||
|
||||
# Run all demos
|
||||
demo_transport_registry()
|
||||
demo_transport_factory()
|
||||
demo_multiaddr_transport_selection()
|
||||
demo_custom_transport_registration()
|
||||
demo_integration_with_libp2p()
|
||||
|
||||
print("🎯 Summary of New Features:")
|
||||
print("=" * 40)
|
||||
print("✅ Transport Registry: Central registry for all transport implementations")
|
||||
print("✅ Dynamic Transport Selection: Automatic selection based on multiaddr")
|
||||
print("✅ WebSocket Support: Full /ws protocol support")
|
||||
print("✅ Extensible Architecture: Easy to add new transport protocols")
|
||||
print("✅ Backward Compatibility: Existing TCP code continues to work")
|
||||
print("✅ Factory Functions: Simple API for creating transports")
|
||||
print()
|
||||
print("🚀 The transport system is now ready for production use!")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
try:
|
||||
asyncio.run(main())
|
||||
except KeyboardInterrupt:
|
||||
print("\n👋 Demo interrupted by user")
|
||||
except Exception as e:
|
||||
print(f"\n❌ Demo failed with error: {e}")
|
||||
import traceback
|
||||
|
||||
traceback.print_exc()
|
||||
@ -1,220 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Simple TCP echo demo to verify basic libp2p functionality.
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import logging
|
||||
import traceback
|
||||
|
||||
import multiaddr
|
||||
import trio
|
||||
|
||||
from libp2p.crypto.secp256k1 import create_new_key_pair
|
||||
from libp2p.custom_types import TProtocol
|
||||
from libp2p.host.basic_host import BasicHost
|
||||
from libp2p.network.swarm import Swarm
|
||||
from libp2p.peer.id import ID
|
||||
from libp2p.peer.peerinfo import info_from_p2p_addr
|
||||
from libp2p.peer.peerstore import PeerStore
|
||||
from libp2p.security.insecure.transport import PLAINTEXT_PROTOCOL_ID, InsecureTransport
|
||||
from libp2p.stream_muxer.yamux.yamux import Yamux
|
||||
from libp2p.transport.tcp.tcp import TCP
|
||||
from libp2p.transport.upgrader import TransportUpgrader
|
||||
|
||||
# Enable debug logging
|
||||
logging.basicConfig(level=logging.DEBUG)
|
||||
|
||||
logger = logging.getLogger("libp2p.tcp-example")
|
||||
|
||||
# Simple echo protocol
|
||||
ECHO_PROTOCOL_ID = TProtocol("/echo/1.0.0")
|
||||
|
||||
|
||||
async def echo_handler(stream):
|
||||
"""Simple echo handler that echoes back any data received."""
|
||||
try:
|
||||
data = await stream.read(1024)
|
||||
if data:
|
||||
message = data.decode("utf-8", errors="replace")
|
||||
print(f"📥 Received: {message}")
|
||||
print(f"📤 Echoing back: {message}")
|
||||
await stream.write(data)
|
||||
await stream.close()
|
||||
except Exception as e:
|
||||
logger.error(f"Echo handler error: {e}")
|
||||
await stream.close()
|
||||
|
||||
|
||||
def create_tcp_host():
|
||||
"""Create a host with TCP transport."""
|
||||
# Create key pair and peer store
|
||||
key_pair = create_new_key_pair()
|
||||
peer_id = ID.from_pubkey(key_pair.public_key)
|
||||
peer_store = PeerStore()
|
||||
peer_store.add_key_pair(peer_id, key_pair)
|
||||
|
||||
# Create transport upgrader with plaintext security
|
||||
upgrader = TransportUpgrader(
|
||||
secure_transports_by_protocol={
|
||||
TProtocol(PLAINTEXT_PROTOCOL_ID): InsecureTransport(key_pair)
|
||||
},
|
||||
muxer_transports_by_protocol={TProtocol("/yamux/1.0.0"): Yamux},
|
||||
)
|
||||
|
||||
# Create TCP transport
|
||||
transport = TCP()
|
||||
|
||||
# Create swarm and host
|
||||
swarm = Swarm(peer_id, peer_store, upgrader, transport)
|
||||
host = BasicHost(swarm)
|
||||
|
||||
return host
|
||||
|
||||
|
||||
async def run(port: int, destination: str) -> None:
|
||||
localhost_ip = "0.0.0.0"
|
||||
|
||||
if not destination:
|
||||
# Create first host (listener) with TCP transport
|
||||
listen_addr = multiaddr.Multiaddr(f"/ip4/{localhost_ip}/tcp/{port}")
|
||||
|
||||
try:
|
||||
host = create_tcp_host()
|
||||
logger.debug("Created TCP host")
|
||||
|
||||
# Set up echo handler
|
||||
host.set_stream_handler(ECHO_PROTOCOL_ID, echo_handler)
|
||||
|
||||
async with (
|
||||
host.run(listen_addrs=[listen_addr]),
|
||||
trio.open_nursery() as (nursery),
|
||||
):
|
||||
# Start the peer-store cleanup task
|
||||
nursery.start_soon(host.get_peerstore().start_cleanup_task, 60)
|
||||
|
||||
# Get the actual address and replace 0.0.0.0 with 127.0.0.1 for client
|
||||
# connections
|
||||
addrs = host.get_addrs()
|
||||
logger.debug(f"Host addresses: {addrs}")
|
||||
if not addrs:
|
||||
print("❌ Error: No addresses found for the host")
|
||||
return
|
||||
|
||||
server_addr = str(addrs[0])
|
||||
client_addr = server_addr.replace("/ip4/0.0.0.0/", "/ip4/127.0.0.1/")
|
||||
|
||||
print("🌐 TCP Server Started Successfully!")
|
||||
print("=" * 50)
|
||||
print(f"📍 Server Address: {client_addr}")
|
||||
print("🔧 Protocol: /echo/1.0.0")
|
||||
print("🚀 Transport: TCP")
|
||||
print()
|
||||
print("📋 To test the connection, run this in another terminal:")
|
||||
print(f" python test_tcp_echo.py -d {client_addr}")
|
||||
print()
|
||||
print("⏳ Waiting for incoming TCP connections...")
|
||||
print("─" * 50)
|
||||
|
||||
await trio.sleep_forever()
|
||||
|
||||
except Exception as e:
|
||||
print(f"❌ Error creating TCP server: {e}")
|
||||
traceback.print_exc()
|
||||
return
|
||||
|
||||
else:
|
||||
# Create second host (dialer) with TCP transport
|
||||
listen_addr = multiaddr.Multiaddr(f"/ip4/{localhost_ip}/tcp/{port}")
|
||||
|
||||
try:
|
||||
# Create a single host for client operations
|
||||
host = create_tcp_host()
|
||||
|
||||
# Start the host for client operations
|
||||
async with (
|
||||
host.run(listen_addrs=[listen_addr]),
|
||||
trio.open_nursery() as (nursery),
|
||||
):
|
||||
# Start the peer-store cleanup task
|
||||
nursery.start_soon(host.get_peerstore().start_cleanup_task, 60)
|
||||
maddr = multiaddr.Multiaddr(destination)
|
||||
info = info_from_p2p_addr(maddr)
|
||||
print("🔌 TCP Client Starting...")
|
||||
print("=" * 40)
|
||||
print(f"🎯 Target Peer: {info.peer_id}")
|
||||
print(f"📍 Target Address: {destination}")
|
||||
print()
|
||||
|
||||
try:
|
||||
print("🔗 Connecting to TCP server...")
|
||||
await host.connect(info)
|
||||
print("✅ Successfully connected to TCP server!")
|
||||
except Exception as e:
|
||||
error_msg = str(e)
|
||||
print("\n❌ Connection Failed!")
|
||||
print(f" Peer ID: {info.peer_id}")
|
||||
print(f" Address: {destination}")
|
||||
print(f" Error: {error_msg}")
|
||||
return
|
||||
|
||||
# Create a stream and send test data
|
||||
try:
|
||||
stream = await host.new_stream(info.peer_id, [ECHO_PROTOCOL_ID])
|
||||
except Exception as e:
|
||||
print(f"❌ Failed to create stream: {e}")
|
||||
return
|
||||
|
||||
try:
|
||||
print("🚀 Starting Echo Protocol Test...")
|
||||
print("─" * 40)
|
||||
|
||||
# Send test data
|
||||
test_message = b"Hello TCP Transport!"
|
||||
print(f"📤 Sending message: {test_message.decode('utf-8')}")
|
||||
await stream.write(test_message)
|
||||
|
||||
# Read response
|
||||
print("⏳ Waiting for server response...")
|
||||
response = await stream.read(1024)
|
||||
print(f"📥 Received response: {response.decode('utf-8')}")
|
||||
|
||||
await stream.close()
|
||||
|
||||
print("─" * 40)
|
||||
if response == test_message:
|
||||
print("🎉 Echo test successful!")
|
||||
print("✅ TCP transport is working perfectly!")
|
||||
else:
|
||||
print("❌ Echo test failed!")
|
||||
|
||||
except Exception as e:
|
||||
print(f"Echo protocol error: {e}")
|
||||
traceback.print_exc()
|
||||
|
||||
print("✅ TCP demo completed successfully!")
|
||||
|
||||
except Exception as e:
|
||||
print(f"❌ Error creating TCP client: {e}")
|
||||
traceback.print_exc()
|
||||
return
|
||||
|
||||
|
||||
def main() -> None:
|
||||
description = "Simple TCP echo demo for libp2p"
|
||||
parser = argparse.ArgumentParser(description=description)
|
||||
parser.add_argument("-p", "--port", default=0, type=int, help="source port number")
|
||||
parser.add_argument(
|
||||
"-d", "--destination", type=str, help="destination multiaddr string"
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
try:
|
||||
trio.run(run, args.port, args.destination)
|
||||
except KeyboardInterrupt:
|
||||
pass
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@ -1,145 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Simple test script to verify WebSocket transport functionality.
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import logging
|
||||
from pathlib import Path
|
||||
import sys
|
||||
|
||||
# Add the libp2p directory to the path so we can import it
|
||||
sys.path.insert(0, str(Path(__file__).parent))
|
||||
|
||||
import multiaddr
|
||||
|
||||
from libp2p.transport import create_transport, create_transport_for_multiaddr
|
||||
from libp2p.transport.upgrader import TransportUpgrader
|
||||
|
||||
# Set up logging
|
||||
logging.basicConfig(level=logging.DEBUG)
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def test_websocket_transport():
|
||||
"""Test basic WebSocket transport functionality."""
|
||||
print("🧪 Testing WebSocket Transport Functionality")
|
||||
print("=" * 50)
|
||||
|
||||
# Create a dummy upgrader
|
||||
upgrader = TransportUpgrader({}, {})
|
||||
|
||||
# Test creating WebSocket transport
|
||||
try:
|
||||
ws_transport = create_transport("ws", upgrader)
|
||||
print(f"✅ WebSocket transport created: {type(ws_transport).__name__}")
|
||||
|
||||
# Test creating transport from multiaddr
|
||||
ws_maddr = multiaddr.Multiaddr("/ip4/127.0.0.1/tcp/8080/ws")
|
||||
ws_transport_from_maddr = create_transport_for_multiaddr(ws_maddr, upgrader)
|
||||
print(
|
||||
f"✅ WebSocket transport from multiaddr: "
|
||||
f"{type(ws_transport_from_maddr).__name__}"
|
||||
)
|
||||
|
||||
# Test creating listener
|
||||
handler_called = False
|
||||
|
||||
async def test_handler(conn):
|
||||
nonlocal handler_called
|
||||
handler_called = True
|
||||
print(f"✅ Connection handler called with: {type(conn).__name__}")
|
||||
await conn.close()
|
||||
|
||||
listener = ws_transport.create_listener(test_handler)
|
||||
print(f"✅ WebSocket listener created: {type(listener).__name__}")
|
||||
|
||||
# Test that the transport can be used
|
||||
print(
|
||||
f"✅ WebSocket transport supports dialing: {hasattr(ws_transport, 'dial')}"
|
||||
)
|
||||
print(
|
||||
f"✅ WebSocket transport supports listening: "
|
||||
f"{hasattr(ws_transport, 'create_listener')}"
|
||||
)
|
||||
|
||||
print("\n🎯 WebSocket Transport Test Results:")
|
||||
print("✅ Transport creation: PASS")
|
||||
print("✅ Multiaddr parsing: PASS")
|
||||
print("✅ Listener creation: PASS")
|
||||
print("✅ Interface compliance: PASS")
|
||||
|
||||
except Exception as e:
|
||||
print(f"❌ WebSocket transport test failed: {e}")
|
||||
import traceback
|
||||
|
||||
traceback.print_exc()
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
|
||||
async def test_transport_registry():
|
||||
"""Test the transport registry functionality."""
|
||||
print("\n🔧 Testing Transport Registry")
|
||||
print("=" * 30)
|
||||
|
||||
from libp2p.transport import (
|
||||
get_supported_transport_protocols,
|
||||
get_transport_registry,
|
||||
)
|
||||
|
||||
registry = get_transport_registry()
|
||||
supported = get_supported_transport_protocols()
|
||||
|
||||
print(f"Supported protocols: {supported}")
|
||||
|
||||
# Test getting transports
|
||||
for protocol in supported:
|
||||
transport_class = registry.get_transport(protocol)
|
||||
class_name = transport_class.__name__ if transport_class else "None"
|
||||
print(f" {protocol}: {class_name}")
|
||||
|
||||
# Test creating transports through registry
|
||||
upgrader = TransportUpgrader({}, {})
|
||||
|
||||
for protocol in supported:
|
||||
try:
|
||||
transport = registry.create_transport(protocol, upgrader)
|
||||
if transport:
|
||||
print(f"✅ {protocol}: Created successfully")
|
||||
else:
|
||||
print(f"❌ {protocol}: Failed to create")
|
||||
except Exception as e:
|
||||
print(f"❌ {protocol}: Error - {e}")
|
||||
|
||||
|
||||
async def main():
|
||||
"""Run all tests."""
|
||||
print("🚀 WebSocket Transport Integration Test Suite")
|
||||
print("=" * 60)
|
||||
print()
|
||||
|
||||
# Run tests
|
||||
success = await test_websocket_transport()
|
||||
await test_transport_registry()
|
||||
|
||||
print("\n" + "=" * 60)
|
||||
if success:
|
||||
print("🎉 All tests passed! WebSocket transport is working correctly.")
|
||||
else:
|
||||
print("❌ Some tests failed. Check the output above for details.")
|
||||
|
||||
print("\n🚀 WebSocket transport is ready for use in py-libp2p!")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
try:
|
||||
asyncio.run(main())
|
||||
except KeyboardInterrupt:
|
||||
print("\n👋 Test interrupted by user")
|
||||
except Exception as e:
|
||||
print(f"\n❌ Test failed with error: {e}")
|
||||
import traceback
|
||||
|
||||
traceback.print_exc()
|
||||
@ -1,448 +0,0 @@
|
||||
import argparse
|
||||
import logging
|
||||
import signal
|
||||
import sys
|
||||
import traceback
|
||||
|
||||
import multiaddr
|
||||
import trio
|
||||
|
||||
from libp2p.abc import INotifee
|
||||
from libp2p.crypto.ed25519 import create_new_key_pair as create_ed25519_key_pair
|
||||
from libp2p.crypto.secp256k1 import create_new_key_pair
|
||||
from libp2p.custom_types import TProtocol
|
||||
from libp2p.host.basic_host import BasicHost
|
||||
from libp2p.network.swarm import Swarm
|
||||
from libp2p.peer.id import ID
|
||||
from libp2p.peer.peerinfo import info_from_p2p_addr
|
||||
from libp2p.peer.peerstore import PeerStore
|
||||
from libp2p.security.insecure.transport import PLAINTEXT_PROTOCOL_ID, InsecureTransport
|
||||
from libp2p.security.noise.transport import (
|
||||
PROTOCOL_ID as NOISE_PROTOCOL_ID,
|
||||
Transport as NoiseTransport,
|
||||
)
|
||||
from libp2p.stream_muxer.yamux.yamux import Yamux
|
||||
from libp2p.transport.upgrader import TransportUpgrader
|
||||
from libp2p.transport.websocket.transport import WebsocketTransport
|
||||
|
||||
# Enable debug logging
|
||||
logging.basicConfig(level=logging.DEBUG)
|
||||
|
||||
logger = logging.getLogger("libp2p.websocket-example")
|
||||
|
||||
|
||||
# Suppress KeyboardInterrupt by handling SIGINT directly
|
||||
def signal_handler(signum, frame):
|
||||
print("✅ Clean exit completed.")
|
||||
sys.exit(0)
|
||||
|
||||
|
||||
signal.signal(signal.SIGINT, signal_handler)
|
||||
|
||||
# Simple echo protocol
|
||||
ECHO_PROTOCOL_ID = TProtocol("/echo/1.0.0")
|
||||
|
||||
|
||||
async def echo_handler(stream):
|
||||
"""Simple echo handler that echoes back any data received."""
|
||||
try:
|
||||
data = await stream.read(1024)
|
||||
if data:
|
||||
message = data.decode("utf-8", errors="replace")
|
||||
print(f"📥 Received: {message}")
|
||||
print(f"📤 Echoing back: {message}")
|
||||
await stream.write(data)
|
||||
await stream.close()
|
||||
except Exception as e:
|
||||
logger.error(f"Echo handler error: {e}")
|
||||
await stream.close()
|
||||
|
||||
|
||||
def create_websocket_host(listen_addrs=None, use_plaintext=False):
|
||||
"""Create a host with WebSocket transport."""
|
||||
# Create key pair and peer store
|
||||
key_pair = create_new_key_pair()
|
||||
peer_id = ID.from_pubkey(key_pair.public_key)
|
||||
peer_store = PeerStore()
|
||||
peer_store.add_key_pair(peer_id, key_pair)
|
||||
|
||||
if use_plaintext:
|
||||
# Create transport upgrader with plaintext security
|
||||
upgrader = TransportUpgrader(
|
||||
secure_transports_by_protocol={
|
||||
TProtocol(PLAINTEXT_PROTOCOL_ID): InsecureTransport(key_pair)
|
||||
},
|
||||
muxer_transports_by_protocol={TProtocol("/yamux/1.0.0"): Yamux},
|
||||
)
|
||||
else:
|
||||
# Create separate Ed25519 key for Noise protocol
|
||||
noise_key_pair = create_ed25519_key_pair()
|
||||
|
||||
# Create Noise transport
|
||||
noise_transport = NoiseTransport(
|
||||
libp2p_keypair=key_pair,
|
||||
noise_privkey=noise_key_pair.private_key,
|
||||
early_data=None,
|
||||
with_noise_pipes=False,
|
||||
)
|
||||
|
||||
# Create transport upgrader with Noise security
|
||||
upgrader = TransportUpgrader(
|
||||
secure_transports_by_protocol={
|
||||
TProtocol(NOISE_PROTOCOL_ID): noise_transport
|
||||
},
|
||||
muxer_transports_by_protocol={TProtocol("/yamux/1.0.0"): Yamux},
|
||||
)
|
||||
|
||||
# Create WebSocket transport
|
||||
transport = WebsocketTransport(upgrader)
|
||||
|
||||
# Create swarm and host
|
||||
swarm = Swarm(peer_id, peer_store, upgrader, transport)
|
||||
host = BasicHost(swarm)
|
||||
|
||||
return host
|
||||
|
||||
|
||||
async def run(port: int, destination: str, use_plaintext: bool = False) -> None:
|
||||
localhost_ip = "0.0.0.0"
|
||||
|
||||
if not destination:
|
||||
# Create first host (listener) with WebSocket transport
|
||||
listen_addr = multiaddr.Multiaddr(f"/ip4/{localhost_ip}/tcp/{port}/ws")
|
||||
|
||||
try:
|
||||
host = create_websocket_host(use_plaintext=use_plaintext)
|
||||
logger.debug(f"Created host with use_plaintext={use_plaintext}")
|
||||
|
||||
# Set up echo handler
|
||||
host.set_stream_handler(ECHO_PROTOCOL_ID, echo_handler)
|
||||
|
||||
# Add connection event handlers for debugging
|
||||
class DebugNotifee(INotifee):
|
||||
async def opened_stream(self, network, stream):
|
||||
pass
|
||||
|
||||
async def closed_stream(self, network, stream):
|
||||
pass
|
||||
|
||||
async def connected(self, network, conn):
|
||||
print(
|
||||
f"🔗 New libp2p connection established: "
|
||||
f"{conn.muxed_conn.peer_id}"
|
||||
)
|
||||
if hasattr(conn.muxed_conn, "get_security_protocol"):
|
||||
security = conn.muxed_conn.get_security_protocol()
|
||||
else:
|
||||
security = "Unknown"
|
||||
|
||||
print(f" Security: {security}")
|
||||
|
||||
async def disconnected(self, network, conn):
|
||||
print(f"🔌 libp2p connection closed: {conn.muxed_conn.peer_id}")
|
||||
|
||||
async def listen(self, network, multiaddr):
|
||||
pass
|
||||
|
||||
async def listen_close(self, network, multiaddr):
|
||||
pass
|
||||
|
||||
host.get_network().register_notifee(DebugNotifee())
|
||||
|
||||
# Create a cancellation token for clean shutdown
|
||||
cancel_scope = trio.CancelScope()
|
||||
|
||||
async def signal_handler():
|
||||
with trio.open_signal_receiver(signal.SIGINT, signal.SIGTERM) as (
|
||||
signal_receiver
|
||||
):
|
||||
async for sig in signal_receiver:
|
||||
print(f"\n🛑 Received signal {sig}")
|
||||
print("✅ Shutting down WebSocket server...")
|
||||
cancel_scope.cancel()
|
||||
return
|
||||
|
||||
async with (
|
||||
host.run(listen_addrs=[listen_addr]),
|
||||
trio.open_nursery() as (nursery),
|
||||
):
|
||||
# Start the peer-store cleanup task
|
||||
nursery.start_soon(host.get_peerstore().start_cleanup_task, 60)
|
||||
|
||||
# Start the signal handler
|
||||
nursery.start_soon(signal_handler)
|
||||
|
||||
# Get the actual address and replace 0.0.0.0 with 127.0.0.1 for client
|
||||
# connections
|
||||
addrs = host.get_addrs()
|
||||
logger.debug(f"Host addresses: {addrs}")
|
||||
if not addrs:
|
||||
print("❌ Error: No addresses found for the host")
|
||||
print("Debug: host.get_addrs() returned empty list")
|
||||
return
|
||||
|
||||
server_addr = str(addrs[0])
|
||||
client_addr = server_addr.replace("/ip4/0.0.0.0/", "/ip4/127.0.0.1/")
|
||||
|
||||
print("🌐 WebSocket Server Started Successfully!")
|
||||
print("=" * 50)
|
||||
print(f"📍 Server Address: {client_addr}")
|
||||
print("🔧 Protocol: /echo/1.0.0")
|
||||
print("🚀 Transport: WebSocket (/ws)")
|
||||
print()
|
||||
print("📋 To test the connection, run this in another terminal:")
|
||||
plaintext_flag = " --plaintext" if use_plaintext else ""
|
||||
print(f" python websocket_demo.py -d {client_addr}{plaintext_flag}")
|
||||
print()
|
||||
print("⏳ Waiting for incoming WebSocket connections...")
|
||||
print("─" * 50)
|
||||
|
||||
# Add a custom handler to show connection events
|
||||
async def custom_echo_handler(stream):
|
||||
peer_id = stream.muxed_conn.peer_id
|
||||
print("\n🔗 New WebSocket Connection!")
|
||||
print(f" Peer ID: {peer_id}")
|
||||
print(" Protocol: /echo/1.0.0")
|
||||
|
||||
# Show remote address in multiaddr format
|
||||
try:
|
||||
remote_address = stream.get_remote_address()
|
||||
if remote_address:
|
||||
print(f" Remote: {remote_address}")
|
||||
except Exception:
|
||||
print(" Remote: Unknown")
|
||||
|
||||
print(" ─" * 40)
|
||||
|
||||
# Call the original handler
|
||||
await echo_handler(stream)
|
||||
|
||||
print(" ─" * 40)
|
||||
print(f"✅ Echo request completed for peer: {peer_id}")
|
||||
print()
|
||||
|
||||
# Replace the handler with our custom one
|
||||
host.set_stream_handler(ECHO_PROTOCOL_ID, custom_echo_handler)
|
||||
|
||||
# Wait indefinitely or until cancelled
|
||||
with cancel_scope:
|
||||
await trio.sleep_forever()
|
||||
|
||||
except Exception as e:
|
||||
print(f"❌ Error creating WebSocket server: {e}")
|
||||
traceback.print_exc()
|
||||
return
|
||||
|
||||
else:
|
||||
# Create second host (dialer) with WebSocket transport
|
||||
listen_addr = multiaddr.Multiaddr(f"/ip4/{localhost_ip}/tcp/{port}/ws")
|
||||
|
||||
try:
|
||||
# Create a single host for client operations
|
||||
host = create_websocket_host(use_plaintext=use_plaintext)
|
||||
|
||||
# Start the host for client operations
|
||||
async with (
|
||||
host.run(listen_addrs=[listen_addr]),
|
||||
trio.open_nursery() as (nursery),
|
||||
):
|
||||
# Start the peer-store cleanup task
|
||||
nursery.start_soon(host.get_peerstore().start_cleanup_task, 60)
|
||||
|
||||
# Add connection event handlers for debugging
|
||||
class ClientDebugNotifee(INotifee):
|
||||
async def opened_stream(self, network, stream):
|
||||
pass
|
||||
|
||||
async def closed_stream(self, network, stream):
|
||||
pass
|
||||
|
||||
async def connected(self, network, conn):
|
||||
print(
|
||||
f"🔗 Client: libp2p connection established: "
|
||||
f"{conn.muxed_conn.peer_id}"
|
||||
)
|
||||
|
||||
async def disconnected(self, network, conn):
|
||||
print(
|
||||
f"🔌 Client: libp2p connection closed: "
|
||||
f"{conn.muxed_conn.peer_id}"
|
||||
)
|
||||
|
||||
async def listen(self, network, multiaddr):
|
||||
pass
|
||||
|
||||
async def listen_close(self, network, multiaddr):
|
||||
pass
|
||||
|
||||
host.get_network().register_notifee(ClientDebugNotifee())
|
||||
|
||||
maddr = multiaddr.Multiaddr(destination)
|
||||
info = info_from_p2p_addr(maddr)
|
||||
print("🔌 WebSocket Client Starting...")
|
||||
print("=" * 40)
|
||||
print(f"🎯 Target Peer: {info.peer_id}")
|
||||
print(f"📍 Target Address: {destination}")
|
||||
print()
|
||||
|
||||
try:
|
||||
print("🔗 Connecting to WebSocket server...")
|
||||
print(f" Security: {'Plaintext' if use_plaintext else 'Noise'}")
|
||||
await host.connect(info)
|
||||
print("✅ Successfully connected to WebSocket server!")
|
||||
except Exception as e:
|
||||
error_msg = str(e)
|
||||
print("\n❌ Connection Failed!")
|
||||
print(f" Peer ID: {info.peer_id}")
|
||||
print(f" Address: {destination}")
|
||||
print(f" Security: {'Plaintext' if use_plaintext else 'Noise'}")
|
||||
print(f" Error: {error_msg}")
|
||||
print(f" Error type: {type(e).__name__}")
|
||||
|
||||
# Add more detailed error information for debugging
|
||||
if hasattr(e, "__cause__") and e.__cause__:
|
||||
print(f" Root cause: {e.__cause__}")
|
||||
print(f" Root cause type: {type(e.__cause__).__name__}")
|
||||
|
||||
print()
|
||||
print("💡 Troubleshooting:")
|
||||
print(" • Make sure the WebSocket server is running")
|
||||
print(" • Check that the server address is correct")
|
||||
print(" • Verify the server is listening on the right port")
|
||||
print(
|
||||
" • Ensure both client and server use the same sec protocol"
|
||||
)
|
||||
if not use_plaintext:
|
||||
print(" • Noise over WebSocket may have compatibility issues")
|
||||
return
|
||||
|
||||
# Create a stream and send test data
|
||||
try:
|
||||
stream = await host.new_stream(info.peer_id, [ECHO_PROTOCOL_ID])
|
||||
except Exception as e:
|
||||
print(f"❌ Failed to create stream: {e}")
|
||||
return
|
||||
|
||||
try:
|
||||
print("🚀 Starting Echo Protocol Test...")
|
||||
print("─" * 40)
|
||||
|
||||
# Send test data
|
||||
test_message = b"Hello WebSocket Transport!"
|
||||
print(f"📤 Sending message: {test_message.decode('utf-8')}")
|
||||
await stream.write(test_message)
|
||||
|
||||
# Read response
|
||||
print("⏳ Waiting for server response...")
|
||||
response = await stream.read(1024)
|
||||
print(f"📥 Received response: {response.decode('utf-8')}")
|
||||
|
||||
await stream.close()
|
||||
|
||||
print("─" * 40)
|
||||
if response == test_message:
|
||||
print("🎉 Echo test successful!")
|
||||
print("✅ WebSocket transport is working perfectly!")
|
||||
print("✅ Client completed successfully, exiting.")
|
||||
else:
|
||||
print("❌ Echo test failed!")
|
||||
print(" Response doesn't match sent data.")
|
||||
print(f" Sent: {test_message}")
|
||||
print(f" Received: {response}")
|
||||
|
||||
except Exception as e:
|
||||
error_msg = str(e)
|
||||
print(f"Echo protocol error: {error_msg}")
|
||||
traceback.print_exc()
|
||||
finally:
|
||||
# Ensure stream is closed
|
||||
try:
|
||||
if stream:
|
||||
# Check if stream has is_closed method and use it
|
||||
has_is_closed = hasattr(stream, "is_closed") and callable(
|
||||
getattr(stream, "is_closed")
|
||||
)
|
||||
if has_is_closed:
|
||||
# type: ignore[attr-defined]
|
||||
if not await stream.is_closed():
|
||||
await stream.close()
|
||||
else:
|
||||
# Fallback: just try to close the stream
|
||||
await stream.close()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# host.run() context manager handles cleanup automatically
|
||||
print()
|
||||
print("🎉 WebSocket Demo Completed Successfully!")
|
||||
print("=" * 50)
|
||||
print("✅ WebSocket transport is working perfectly!")
|
||||
print("✅ Echo protocol communication successful!")
|
||||
print("✅ libp2p integration verified!")
|
||||
print()
|
||||
print("🚀 Your WebSocket transport is ready for production use!")
|
||||
|
||||
# Add a small delay to ensure all cleanup is complete
|
||||
await trio.sleep(0.1)
|
||||
|
||||
except Exception as e:
|
||||
print(f"❌ Error creating WebSocket client: {e}")
|
||||
traceback.print_exc()
|
||||
return
|
||||
|
||||
|
||||
def main() -> None:
|
||||
description = """
|
||||
This program demonstrates the libp2p WebSocket transport.
|
||||
First run
|
||||
'python websocket_demo.py -p <PORT> [--plaintext]' to start a WebSocket server.
|
||||
Then run
|
||||
'python websocket_demo.py <ANOTHER_PORT> -d <DESTINATION> [--plaintext]'
|
||||
where <DESTINATION> is the multiaddress shown by the server.
|
||||
|
||||
By default, this example uses Noise encryption for secure communication.
|
||||
Use --plaintext for testing with unencrypted communication
|
||||
(not recommended for production).
|
||||
"""
|
||||
|
||||
example_maddr = (
|
||||
"/ip4/127.0.0.1/tcp/8888/ws/p2p/QmQn4SwGkDZKkUEpBRBvTmheQycxAHJUNmVEnjA2v1qe8Q"
|
||||
)
|
||||
|
||||
parser = argparse.ArgumentParser(description=description)
|
||||
parser.add_argument("-p", "--port", default=0, type=int, help="source port number")
|
||||
parser.add_argument(
|
||||
"-d",
|
||||
"--destination",
|
||||
type=str,
|
||||
help=f"destination multiaddr string, e.g. {example_maddr}",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--plaintext",
|
||||
action="store_true",
|
||||
help=(
|
||||
"use plaintext security instead of Noise encryption "
|
||||
"(not recommended for production)"
|
||||
),
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
# Determine security mode: use Noise by default,
|
||||
# plaintext if --plaintext is specified
|
||||
use_plaintext = args.plaintext
|
||||
|
||||
try:
|
||||
trio.run(run, args.port, args.destination, use_plaintext)
|
||||
except KeyboardInterrupt:
|
||||
# This is expected when Ctrl+C is pressed
|
||||
# The signal handler already printed the shutdown message
|
||||
print("✅ Clean exit completed.")
|
||||
return
|
||||
except Exception as e:
|
||||
print(f"❌ Unexpected error: {e}")
|
||||
return
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@ -1,12 +1,3 @@
|
||||
"""Libp2p Python implementation."""
|
||||
|
||||
import logging
|
||||
import ssl
|
||||
|
||||
from libp2p.transport.quic.utils import is_quic_multiaddr
|
||||
from typing import Any
|
||||
from libp2p.transport.quic.transport import QUICTransport
|
||||
from libp2p.transport.quic.config import QUICTransportConfig
|
||||
from collections.abc import (
|
||||
Mapping,
|
||||
Sequence,
|
||||
@ -15,17 +6,19 @@ from importlib.metadata import version as __version
|
||||
from typing import (
|
||||
Literal,
|
||||
Optional,
|
||||
Type,
|
||||
cast,
|
||||
)
|
||||
|
||||
import multiaddr
|
||||
|
||||
from libp2p.abc import (
|
||||
IHost,
|
||||
IMuxedConn,
|
||||
INetworkService,
|
||||
IPeerRouting,
|
||||
IPeerStore,
|
||||
ISecureTransport,
|
||||
ITransport,
|
||||
)
|
||||
from libp2p.crypto.keys import (
|
||||
KeyPair,
|
||||
@ -39,6 +32,9 @@ from libp2p.custom_types import (
|
||||
TProtocol,
|
||||
TSecurityOptions,
|
||||
)
|
||||
from libp2p.discovery.mdns.mdns import (
|
||||
MDNSDiscovery,
|
||||
)
|
||||
from libp2p.host.basic_host import (
|
||||
BasicHost,
|
||||
)
|
||||
@ -48,44 +44,33 @@ from libp2p.host.routed_host import (
|
||||
from libp2p.network.swarm import (
|
||||
Swarm,
|
||||
)
|
||||
from libp2p.network.config import (
|
||||
ConnectionConfig,
|
||||
RetryConfig
|
||||
)
|
||||
from libp2p.peer.id import (
|
||||
ID,
|
||||
)
|
||||
from libp2p.peer.peerstore import (
|
||||
PeerStore,
|
||||
create_signed_peer_record,
|
||||
)
|
||||
from libp2p.security.insecure.transport import (
|
||||
PLAINTEXT_PROTOCOL_ID,
|
||||
InsecureTransport,
|
||||
)
|
||||
from libp2p.security.noise.transport import (
|
||||
PROTOCOL_ID as NOISE_PROTOCOL_ID,
|
||||
Transport as NoiseTransport,
|
||||
)
|
||||
from libp2p.security.noise.transport import PROTOCOL_ID as NOISE_PROTOCOL_ID
|
||||
from libp2p.security.noise.transport import Transport as NoiseTransport
|
||||
import libp2p.security.secio.transport as secio
|
||||
from libp2p.stream_muxer.mplex.mplex import (
|
||||
MPLEX_PROTOCOL_ID,
|
||||
Mplex,
|
||||
)
|
||||
from libp2p.stream_muxer.yamux.yamux import (
|
||||
PROTOCOL_ID as YAMUX_PROTOCOL_ID,
|
||||
Yamux,
|
||||
)
|
||||
from libp2p.stream_muxer.yamux.yamux import PROTOCOL_ID as YAMUX_PROTOCOL_ID
|
||||
from libp2p.transport.tcp.tcp import (
|
||||
TCP,
|
||||
)
|
||||
from libp2p.transport.upgrader import (
|
||||
TransportUpgrader,
|
||||
)
|
||||
from libp2p.transport.transport_registry import (
|
||||
create_transport_for_multiaddr,
|
||||
get_supported_transport_protocols,
|
||||
)
|
||||
from libp2p.utils.logging import (
|
||||
setup_logging,
|
||||
)
|
||||
@ -101,7 +86,7 @@ MUXER_YAMUX = "YAMUX"
|
||||
MUXER_MPLEX = "MPLEX"
|
||||
DEFAULT_NEGOTIATE_TIMEOUT = 5
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def set_default_muxer(muxer_name: Literal["YAMUX", "MPLEX"]) -> None:
|
||||
"""
|
||||
@ -170,6 +155,7 @@ def get_default_muxer_options() -> TMuxerOptions:
|
||||
else: # YAMUX is default
|
||||
return create_yamux_muxer_option()
|
||||
|
||||
|
||||
def new_swarm(
|
||||
key_pair: KeyPair | None = None,
|
||||
muxer_opt: TMuxerOptions | None = None,
|
||||
@ -177,13 +163,7 @@ def new_swarm(
|
||||
peerstore_opt: IPeerStore | None = None,
|
||||
muxer_preference: Literal["YAMUX", "MPLEX"] | None = None,
|
||||
listen_addrs: Sequence[multiaddr.Multiaddr] | None = None,
|
||||
enable_quic: bool = False,
|
||||
retry_config: Optional["RetryConfig"] = None,
|
||||
connection_config: ConnectionConfig | QUICTransportConfig | None = None,
|
||||
tls_client_config: ssl.SSLContext | None = None,
|
||||
tls_server_config: ssl.SSLContext | None = None,
|
||||
) -> INetworkService:
|
||||
logger.debug(f"new_swarm: enable_quic={enable_quic}, listen_addrs={listen_addrs}")
|
||||
"""
|
||||
Create a swarm instance based on the parameters.
|
||||
|
||||
@ -193,8 +173,6 @@ def new_swarm(
|
||||
:param peerstore_opt: optional peerstore
|
||||
:param muxer_preference: optional explicit muxer preference
|
||||
:param listen_addrs: optional list of multiaddrs to listen on
|
||||
:param enable_quic: enable quic for transport
|
||||
:param quic_transport_opt: options for transport
|
||||
:return: return a default swarm instance
|
||||
|
||||
Note: Yamux (/yamux/1.0.0) is the preferred stream multiplexer
|
||||
@ -207,48 +185,16 @@ def new_swarm(
|
||||
|
||||
id_opt = generate_peer_id_from(key_pair)
|
||||
|
||||
transport: TCP | QUICTransport | ITransport
|
||||
quic_transport_opt = connection_config if isinstance(connection_config, QUICTransportConfig) else None
|
||||
|
||||
if listen_addrs is None:
|
||||
if enable_quic:
|
||||
transport = QUICTransport(key_pair.private_key, config=quic_transport_opt)
|
||||
else:
|
||||
transport = TCP()
|
||||
transport = TCP()
|
||||
else:
|
||||
# Use transport registry to select the appropriate transport
|
||||
from libp2p.transport.transport_registry import create_transport_for_multiaddr
|
||||
|
||||
# Create a temporary upgrader for transport selection
|
||||
# We'll create the real upgrader later with the proper configuration
|
||||
temp_upgrader = TransportUpgrader(
|
||||
secure_transports_by_protocol={},
|
||||
muxer_transports_by_protocol={}
|
||||
)
|
||||
|
||||
addr = listen_addrs[0]
|
||||
logger.debug(f"new_swarm: Creating transport for address: {addr}")
|
||||
transport_maybe = create_transport_for_multiaddr(
|
||||
addr,
|
||||
temp_upgrader,
|
||||
private_key=key_pair.private_key,
|
||||
config=quic_transport_opt,
|
||||
tls_client_config=tls_client_config,
|
||||
tls_server_config=tls_server_config
|
||||
)
|
||||
|
||||
if transport_maybe is None:
|
||||
raise ValueError(f"Unsupported transport for listen_addrs: {listen_addrs}")
|
||||
|
||||
transport = transport_maybe
|
||||
logger.debug(f"new_swarm: Created transport: {type(transport)}")
|
||||
|
||||
# If enable_quic is True but we didn't get a QUIC transport, force QUIC
|
||||
if enable_quic and not isinstance(transport, QUICTransport):
|
||||
logger.debug(f"new_swarm: Forcing QUIC transport (enable_quic=True but got {type(transport)})")
|
||||
transport = QUICTransport(key_pair.private_key, config=quic_transport_opt)
|
||||
|
||||
logger.debug(f"new_swarm: Final transport type: {type(transport)}")
|
||||
if addr.__contains__("tcp"):
|
||||
transport = TCP()
|
||||
elif addr.__contains__("quic"):
|
||||
raise ValueError("QUIC not yet supported")
|
||||
else:
|
||||
raise ValueError(f"Unknown transport in listen_addrs: {listen_addrs}")
|
||||
|
||||
# Generate X25519 keypair for Noise
|
||||
noise_key_pair = create_new_x25519_key_pair()
|
||||
@ -289,19 +235,11 @@ def new_swarm(
|
||||
muxer_transports_by_protocol=muxer_transports_by_protocol,
|
||||
)
|
||||
|
||||
|
||||
peerstore = peerstore_opt or PeerStore()
|
||||
# Store our key pair in peerstore
|
||||
peerstore.add_key_pair(id_opt, key_pair)
|
||||
|
||||
return Swarm(
|
||||
id_opt,
|
||||
peerstore,
|
||||
upgrader,
|
||||
transport,
|
||||
retry_config=retry_config,
|
||||
connection_config=connection_config
|
||||
)
|
||||
return Swarm(id_opt, peerstore, upgrader, transport)
|
||||
|
||||
|
||||
def new_host(
|
||||
@ -315,10 +253,6 @@ def new_host(
|
||||
enable_mDNS: bool = False,
|
||||
bootstrap: list[str] | None = None,
|
||||
negotiate_timeout: int = DEFAULT_NEGOTIATE_TIMEOUT,
|
||||
enable_quic: bool = False,
|
||||
quic_transport_opt: QUICTransportConfig | None = None,
|
||||
tls_client_config: ssl.SSLContext | None = None,
|
||||
tls_server_config: ssl.SSLContext | None = None,
|
||||
) -> IHost:
|
||||
"""
|
||||
Create a new libp2p host based on the given parameters.
|
||||
@ -332,37 +266,19 @@ def new_host(
|
||||
:param listen_addrs: optional list of multiaddrs to listen on
|
||||
:param enable_mDNS: whether to enable mDNS discovery
|
||||
:param bootstrap: optional list of bootstrap peer addresses as strings
|
||||
:param enable_quic: optinal choice to use QUIC for transport
|
||||
:param quic_transport_opt: optional configuration for quic transport
|
||||
:param tls_client_config: optional TLS client configuration for WebSocket transport
|
||||
:param tls_server_config: optional TLS server configuration for WebSocket transport
|
||||
:return: return a host instance
|
||||
"""
|
||||
|
||||
if not enable_quic and quic_transport_opt is not None:
|
||||
logger.warning(f"QUIC config provided but QUIC not enabled, ignoring QUIC config")
|
||||
|
||||
swarm = new_swarm(
|
||||
enable_quic=enable_quic,
|
||||
key_pair=key_pair,
|
||||
muxer_opt=muxer_opt,
|
||||
sec_opt=sec_opt,
|
||||
peerstore_opt=peerstore_opt,
|
||||
muxer_preference=muxer_preference,
|
||||
listen_addrs=listen_addrs,
|
||||
connection_config=quic_transport_opt if enable_quic else None,
|
||||
tls_client_config=tls_client_config,
|
||||
tls_server_config=tls_server_config
|
||||
)
|
||||
|
||||
if disc_opt is not None:
|
||||
return RoutedHost(swarm, disc_opt, enable_mDNS, bootstrap)
|
||||
return BasicHost(
|
||||
network=swarm,
|
||||
enable_mDNS=enable_mDNS,
|
||||
bootstrap=bootstrap,
|
||||
negotitate_timeout=negotiate_timeout
|
||||
)
|
||||
|
||||
return BasicHost(network=swarm,enable_mDNS=enable_mDNS , bootstrap=bootstrap, negotitate_timeout=negotiate_timeout)
|
||||
|
||||
__version__ = __version("libp2p")
|
||||
|
||||
@ -970,14 +970,6 @@ class IPeerStore(
|
||||
|
||||
# --------CERTIFIED-ADDR-BOOK----------
|
||||
|
||||
@abstractmethod
|
||||
def get_local_record(self) -> Optional["Envelope"]:
|
||||
"""Get the local-peer-record wrapped in Envelope"""
|
||||
|
||||
@abstractmethod
|
||||
def set_local_record(self, envelope: "Envelope") -> None:
|
||||
"""Set the local-peer-record wrapped in Envelope"""
|
||||
|
||||
@abstractmethod
|
||||
def consume_peer_record(self, envelope: "Envelope", ttl: int) -> bool:
|
||||
"""
|
||||
@ -1412,16 +1404,15 @@ class INetwork(ABC):
|
||||
----------
|
||||
peerstore : IPeerStore
|
||||
The peer store for managing peer information.
|
||||
connections : dict[ID, list[INetConn]]
|
||||
A mapping of peer IDs to lists of network connections
|
||||
(multiple connections per peer).
|
||||
connections : dict[ID, INetConn]
|
||||
A mapping of peer IDs to network connections.
|
||||
listeners : dict[str, IListener]
|
||||
A mapping of listener identifiers to listener instances.
|
||||
|
||||
"""
|
||||
|
||||
peerstore: IPeerStore
|
||||
connections: dict[ID, list[INetConn]]
|
||||
connections: dict[ID, INetConn]
|
||||
listeners: dict[str, IListener]
|
||||
|
||||
@abstractmethod
|
||||
@ -1437,56 +1428,9 @@ class INetwork(ABC):
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def get_connections(self, peer_id: ID | None = None) -> list[INetConn]:
|
||||
async def dial_peer(self, peer_id: ID) -> INetConn:
|
||||
"""
|
||||
Get connections for peer (like JS getConnections, Go ConnsToPeer).
|
||||
|
||||
Parameters
|
||||
----------
|
||||
peer_id : ID | None
|
||||
The peer ID to get connections for. If None, returns all connections.
|
||||
|
||||
Returns
|
||||
-------
|
||||
list[INetConn]
|
||||
List of connections to the specified peer, or all connections
|
||||
if peer_id is None.
|
||||
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def get_connections_map(self) -> dict[ID, list[INetConn]]:
|
||||
"""
|
||||
Get all connections map (like JS getConnectionsMap).
|
||||
|
||||
Returns
|
||||
-------
|
||||
dict[ID, list[INetConn]]
|
||||
The complete mapping of peer IDs to their connection lists.
|
||||
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def get_connection(self, peer_id: ID) -> INetConn | None:
|
||||
"""
|
||||
Get single connection for backward compatibility.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
peer_id : ID
|
||||
The peer ID to get a connection for.
|
||||
|
||||
Returns
|
||||
-------
|
||||
INetConn | None
|
||||
The first available connection, or None if no connections exist.
|
||||
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
async def dial_peer(self, peer_id: ID) -> list[INetConn]:
|
||||
"""
|
||||
Create connections to the specified peer with load balancing.
|
||||
Create a connection to the specified peer.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
@ -1495,8 +1439,8 @@ class INetwork(ABC):
|
||||
|
||||
Returns
|
||||
-------
|
||||
list[INetConn]
|
||||
List of established connections to the peer.
|
||||
INetConn
|
||||
The network connection instance to the specified peer.
|
||||
|
||||
Raises
|
||||
------
|
||||
|
||||
@ -5,17 +5,17 @@ from collections.abc import (
|
||||
)
|
||||
from typing import TYPE_CHECKING, NewType, Union, cast
|
||||
|
||||
from libp2p.transport.quic.stream import QUICStream
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from libp2p.abc import IMuxedConn, IMuxedStream, INetStream, ISecureTransport
|
||||
from libp2p.transport.quic.connection import QUICConnection
|
||||
from libp2p.abc import (
|
||||
IMuxedConn,
|
||||
INetStream,
|
||||
ISecureTransport,
|
||||
)
|
||||
else:
|
||||
IMuxedConn = cast(type, object)
|
||||
INetStream = cast(type, object)
|
||||
ISecureTransport = cast(type, object)
|
||||
IMuxedStream = cast(type, object)
|
||||
QUICConnection = cast(type, object)
|
||||
|
||||
|
||||
from libp2p.io.abc import (
|
||||
ReadWriteCloser,
|
||||
@ -37,6 +37,3 @@ SyncValidatorFn = Callable[[ID, rpc_pb2.Message], bool]
|
||||
AsyncValidatorFn = Callable[[ID, rpc_pb2.Message], Awaitable[bool]]
|
||||
ValidatorFn = Union[SyncValidatorFn, AsyncValidatorFn]
|
||||
UnsubscribeFn = Callable[[], Awaitable[None]]
|
||||
TQUICStreamHandlerFn = Callable[[QUICStream], Awaitable[None]]
|
||||
TQUICConnHandlerFn = Callable[[QUICConnection], Awaitable[None]]
|
||||
MessageID = NewType("MessageID", str)
|
||||
|
||||
@ -2,20 +2,15 @@ import logging
|
||||
|
||||
from multiaddr import Multiaddr
|
||||
from multiaddr.resolvers import DNSResolver
|
||||
import trio
|
||||
|
||||
from libp2p.abc import ID, INetworkService, PeerInfo
|
||||
from libp2p.discovery.bootstrap.utils import validate_bootstrap_addresses
|
||||
from libp2p.discovery.events.peerDiscovery import peerDiscovery
|
||||
from libp2p.network.exceptions import SwarmException
|
||||
from libp2p.peer.peerinfo import info_from_p2p_addr
|
||||
from libp2p.peer.peerstore import PERMANENT_ADDR_TTL
|
||||
|
||||
logger = logging.getLogger("libp2p.discovery.bootstrap")
|
||||
resolver = DNSResolver()
|
||||
|
||||
DEFAULT_CONNECTION_TIMEOUT = 10
|
||||
|
||||
|
||||
class BootstrapDiscovery:
|
||||
"""
|
||||
@ -24,147 +19,68 @@ class BootstrapDiscovery:
|
||||
"""
|
||||
|
||||
def __init__(self, swarm: INetworkService, bootstrap_addrs: list[str]):
|
||||
"""
|
||||
Initialize BootstrapDiscovery.
|
||||
|
||||
Args:
|
||||
swarm: The network service (swarm) instance
|
||||
bootstrap_addrs: List of bootstrap peer multiaddresses
|
||||
|
||||
"""
|
||||
self.swarm = swarm
|
||||
self.peerstore = swarm.peerstore
|
||||
self.bootstrap_addrs = bootstrap_addrs or []
|
||||
self.discovered_peers: set[str] = set()
|
||||
self.connection_timeout: int = DEFAULT_CONNECTION_TIMEOUT
|
||||
|
||||
async def start(self) -> None:
|
||||
"""Process bootstrap addresses and emit peer discovery events in parallel."""
|
||||
logger.info(
|
||||
"""Process bootstrap addresses and emit peer discovery events."""
|
||||
logger.debug(
|
||||
f"Starting bootstrap discovery with "
|
||||
f"{len(self.bootstrap_addrs)} bootstrap addresses"
|
||||
)
|
||||
|
||||
# Show all bootstrap addresses being processed
|
||||
for i, addr in enumerate(self.bootstrap_addrs):
|
||||
logger.debug(f"{i + 1}. {addr}")
|
||||
|
||||
# Validate and filter bootstrap addresses
|
||||
self.bootstrap_addrs = validate_bootstrap_addresses(self.bootstrap_addrs)
|
||||
logger.info(f"Valid addresses after validation: {len(self.bootstrap_addrs)}")
|
||||
|
||||
# Use Trio nursery for PARALLEL address processing
|
||||
try:
|
||||
async with trio.open_nursery() as nursery:
|
||||
logger.debug(
|
||||
f"Starting {len(self.bootstrap_addrs)} parallel address "
|
||||
f"processing tasks"
|
||||
)
|
||||
|
||||
# Start all bootstrap address processing tasks in parallel
|
||||
for addr_str in self.bootstrap_addrs:
|
||||
logger.debug(f"Starting parallel task for: {addr_str}")
|
||||
nursery.start_soon(self._process_bootstrap_addr, addr_str)
|
||||
|
||||
# The nursery will wait for all address processing tasks to complete
|
||||
logger.debug(
|
||||
"Nursery active - waiting for address processing tasks to complete"
|
||||
)
|
||||
|
||||
except trio.Cancelled:
|
||||
logger.debug("Bootstrap address processing cancelled - cleaning up tasks")
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f"Bootstrap address processing failed: {e}")
|
||||
raise
|
||||
|
||||
logger.info("Bootstrap discovery startup complete - all tasks finished")
|
||||
for addr_str in self.bootstrap_addrs:
|
||||
try:
|
||||
await self._process_bootstrap_addr(addr_str)
|
||||
except Exception as e:
|
||||
logger.debug(f"Failed to process bootstrap address {addr_str}: {e}")
|
||||
|
||||
def stop(self) -> None:
|
||||
"""Clean up bootstrap discovery resources."""
|
||||
logger.info("Stopping bootstrap discovery and cleaning up tasks")
|
||||
|
||||
# Clear discovered peers
|
||||
logger.debug("Stopping bootstrap discovery")
|
||||
self.discovered_peers.clear()
|
||||
|
||||
logger.debug("Bootstrap discovery cleanup completed")
|
||||
|
||||
async def _process_bootstrap_addr(self, addr_str: str) -> None:
|
||||
"""Convert string address to PeerInfo and add to peerstore."""
|
||||
try:
|
||||
try:
|
||||
multiaddr = Multiaddr(addr_str)
|
||||
except Exception as e:
|
||||
logger.debug(f"Invalid multiaddr format '{addr_str}': {e}")
|
||||
return
|
||||
|
||||
if self.is_dns_addr(multiaddr):
|
||||
resolved_addrs = await resolver.resolve(multiaddr)
|
||||
if resolved_addrs is None:
|
||||
logger.warning(f"DNS resolution returned None for: {addr_str}")
|
||||
return
|
||||
|
||||
peer_id_str = multiaddr.get_peer_id()
|
||||
if peer_id_str is None:
|
||||
logger.warning(f"Missing peer ID in DNS address: {addr_str}")
|
||||
return
|
||||
peer_id = ID.from_base58(peer_id_str)
|
||||
addrs = [addr for addr in resolved_addrs]
|
||||
if not addrs:
|
||||
logger.warning(f"No addresses resolved for DNS address: {addr_str}")
|
||||
return
|
||||
peer_info = PeerInfo(peer_id, addrs)
|
||||
await self.add_addr(peer_info)
|
||||
else:
|
||||
peer_info = info_from_p2p_addr(multiaddr)
|
||||
await self.add_addr(peer_info)
|
||||
multiaddr = Multiaddr(addr_str)
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to process bootstrap address {addr_str}: {e}")
|
||||
logger.debug(f"Invalid multiaddr format '{addr_str}': {e}")
|
||||
return
|
||||
if self.is_dns_addr(multiaddr):
|
||||
resolved_addrs = await resolver.resolve(multiaddr)
|
||||
peer_id_str = multiaddr.get_peer_id()
|
||||
if peer_id_str is None:
|
||||
logger.warning(f"Missing peer ID in DNS address: {addr_str}")
|
||||
return
|
||||
peer_id = ID.from_base58(peer_id_str)
|
||||
addrs = [addr for addr in resolved_addrs]
|
||||
if not addrs:
|
||||
logger.warning(f"No addresses resolved for DNS address: {addr_str}")
|
||||
return
|
||||
peer_info = PeerInfo(peer_id, addrs)
|
||||
self.add_addr(peer_info)
|
||||
else:
|
||||
self.add_addr(info_from_p2p_addr(multiaddr))
|
||||
|
||||
def is_dns_addr(self, addr: Multiaddr) -> bool:
|
||||
"""Check if the address is a DNS address."""
|
||||
return any(protocol.name == "dnsaddr" for protocol in addr.protocols())
|
||||
|
||||
async def add_addr(self, peer_info: PeerInfo) -> None:
|
||||
"""
|
||||
Add a peer to the peerstore, emit discovery event,
|
||||
and attempt connection in parallel.
|
||||
"""
|
||||
logger.debug(
|
||||
f"Adding peer {peer_info.peer_id} with {len(peer_info.addrs)} addresses"
|
||||
)
|
||||
|
||||
def add_addr(self, peer_info: PeerInfo) -> None:
|
||||
"""Add a peer to the peerstore and emit discovery event."""
|
||||
# Skip if it's our own peer
|
||||
if peer_info.peer_id == self.swarm.get_peer_id():
|
||||
logger.debug(f"Skipping own peer ID: {peer_info.peer_id}")
|
||||
return
|
||||
|
||||
# Filter addresses to only include IPv4+TCP (only supported protocol)
|
||||
ipv4_tcp_addrs = []
|
||||
filtered_out_addrs = []
|
||||
|
||||
for addr in peer_info.addrs:
|
||||
if self._is_ipv4_tcp_addr(addr):
|
||||
ipv4_tcp_addrs.append(addr)
|
||||
else:
|
||||
filtered_out_addrs.append(addr)
|
||||
|
||||
# Log filtering results
|
||||
logger.debug(
|
||||
f"Address filtering for {peer_info.peer_id}: "
|
||||
f"{len(ipv4_tcp_addrs)} IPv4+TCP, {len(filtered_out_addrs)} filtered"
|
||||
)
|
||||
|
||||
# Skip peer if no IPv4+TCP addresses available
|
||||
if not ipv4_tcp_addrs:
|
||||
logger.warning(
|
||||
f"❌ No IPv4+TCP addresses for {peer_info.peer_id} - "
|
||||
f"skipping connection attempts"
|
||||
)
|
||||
return
|
||||
|
||||
# Add only IPv4+TCP addresses to peerstore
|
||||
self.peerstore.add_addrs(peer_info.peer_id, ipv4_tcp_addrs, PERMANENT_ADDR_TTL)
|
||||
# Always add addresses to peerstore (allows multiple addresses for same peer)
|
||||
self.peerstore.add_addrs(peer_info.peer_id, peer_info.addrs, 10)
|
||||
|
||||
# Only emit discovery event if this is the first time we see this peer
|
||||
peer_id_str = str(peer_info.peer_id)
|
||||
@ -173,140 +89,6 @@ class BootstrapDiscovery:
|
||||
self.discovered_peers.add(peer_id_str)
|
||||
# Emit peer discovery event
|
||||
peerDiscovery.emit_peer_discovered(peer_info)
|
||||
logger.info(f"Peer discovered: {peer_info.peer_id}")
|
||||
|
||||
# Connect to peer (parallel across different bootstrap addresses)
|
||||
logger.debug("Connecting to discovered peer...")
|
||||
await self._connect_to_peer(peer_info.peer_id)
|
||||
|
||||
logger.debug(f"Peer discovered: {peer_info.peer_id}")
|
||||
else:
|
||||
logger.debug(
|
||||
f"Additional addresses added for existing peer: {peer_info.peer_id}"
|
||||
)
|
||||
# Even for existing peers, try to connect if not already connected
|
||||
if peer_info.peer_id not in self.swarm.connections:
|
||||
logger.debug("Connecting to existing peer...")
|
||||
await self._connect_to_peer(peer_info.peer_id)
|
||||
|
||||
async def _connect_to_peer(self, peer_id: ID) -> None:
|
||||
"""
|
||||
Attempt to establish a connection to a peer with timeout.
|
||||
|
||||
Uses swarm.dial_peer to connect using addresses stored in peerstore.
|
||||
Times out after self.connection_timeout seconds to prevent hanging.
|
||||
"""
|
||||
logger.debug(f"Connection attempt for peer: {peer_id}")
|
||||
|
||||
# Pre-connection validation: Check if already connected
|
||||
if peer_id in self.swarm.connections:
|
||||
logger.debug(
|
||||
f"Already connected to {peer_id} - skipping connection attempt"
|
||||
)
|
||||
return
|
||||
|
||||
# Check available addresses before attempting connection
|
||||
available_addrs = self.peerstore.addrs(peer_id)
|
||||
logger.debug(f"Connecting to {peer_id} ({len(available_addrs)} addresses)")
|
||||
|
||||
if not available_addrs:
|
||||
logger.error(f"❌ No addresses available for {peer_id} - cannot connect")
|
||||
return
|
||||
|
||||
# Record start time for connection attempt monitoring
|
||||
connection_start_time = trio.current_time()
|
||||
|
||||
try:
|
||||
with trio.move_on_after(self.connection_timeout):
|
||||
# Log connection attempt
|
||||
logger.debug(
|
||||
f"Attempting connection to {peer_id} using "
|
||||
f"{len(available_addrs)} addresses"
|
||||
)
|
||||
|
||||
# Use swarm.dial_peer to connect using stored addresses
|
||||
await self.swarm.dial_peer(peer_id)
|
||||
|
||||
# Calculate connection time
|
||||
connection_time = trio.current_time() - connection_start_time
|
||||
|
||||
# Post-connection validation: Verify connection was actually established
|
||||
if peer_id in self.swarm.connections:
|
||||
logger.info(
|
||||
f"✅ Connected to {peer_id} (took {connection_time:.2f}s)"
|
||||
)
|
||||
|
||||
else:
|
||||
logger.warning(
|
||||
f"Dial succeeded but connection not found for {peer_id}"
|
||||
)
|
||||
except trio.TooSlowError:
|
||||
logger.warning(
|
||||
f"❌ Connection to {peer_id} timed out after {self.connection_timeout}s"
|
||||
)
|
||||
except SwarmException as e:
|
||||
# Calculate failed connection time
|
||||
failed_connection_time = trio.current_time() - connection_start_time
|
||||
|
||||
# Enhanced error logging
|
||||
error_msg = str(e)
|
||||
if "no addresses established a successful connection" in error_msg:
|
||||
logger.warning(
|
||||
f"❌ Failed to connect to {peer_id} after trying all "
|
||||
f"{len(available_addrs)} addresses "
|
||||
f"(took {failed_connection_time:.2f}s)"
|
||||
)
|
||||
# Log individual address failures if this is a MultiError
|
||||
if (
|
||||
e.__cause__ is not None
|
||||
and hasattr(e.__cause__, "exceptions")
|
||||
and getattr(e.__cause__, "exceptions", None) is not None
|
||||
):
|
||||
exceptions_list = getattr(e.__cause__, "exceptions")
|
||||
logger.debug("📋 Individual address failure details:")
|
||||
for i, addr_exception in enumerate(exceptions_list, 1):
|
||||
logger.debug(f"Address {i}: {addr_exception}")
|
||||
# Also log the actual address that failed
|
||||
if i <= len(available_addrs):
|
||||
logger.debug(f"Failed address: {available_addrs[i - 1]}")
|
||||
else:
|
||||
logger.warning("No detailed exception information available")
|
||||
else:
|
||||
logger.warning(
|
||||
f"❌ Failed to connect to {peer_id}: {e} "
|
||||
f"(took {failed_connection_time:.2f}s)"
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
# Handle unexpected errors that aren't swarm-specific
|
||||
failed_connection_time = trio.current_time() - connection_start_time
|
||||
logger.error(
|
||||
f"❌ Unexpected error connecting to {peer_id}: "
|
||||
f"{e} (took {failed_connection_time:.2f}s)"
|
||||
)
|
||||
# Don't re-raise to prevent killing the nursery and other parallel tasks
|
||||
|
||||
def _is_ipv4_tcp_addr(self, addr: Multiaddr) -> bool:
|
||||
"""
|
||||
Check if address is IPv4 with TCP protocol only.
|
||||
|
||||
Filters out IPv6, UDP, QUIC, WebSocket, and other unsupported protocols.
|
||||
Only IPv4+TCP addresses are supported by the current transport.
|
||||
"""
|
||||
try:
|
||||
protocols = addr.protocols()
|
||||
|
||||
# Must have IPv4 protocol
|
||||
has_ipv4 = any(p.name == "ip4" for p in protocols)
|
||||
if not has_ipv4:
|
||||
return False
|
||||
|
||||
# Must have TCP protocol
|
||||
has_tcp = any(p.name == "tcp" for p in protocols)
|
||||
if not has_tcp:
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
except Exception:
|
||||
# If we can't parse the address, don't use it
|
||||
return False
|
||||
logger.debug(f"Additional addresses added for peer: {peer_info.peer_id}")
|
||||
|
||||
@ -1,17 +0,0 @@
|
||||
"""Random walk discovery modules for py-libp2p."""
|
||||
|
||||
from .rt_refresh_manager import RTRefreshManager
|
||||
from .random_walk import RandomWalk
|
||||
from .exceptions import (
|
||||
RoutingTableRefreshError,
|
||||
RandomWalkError,
|
||||
PeerValidationError,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
"RTRefreshManager",
|
||||
"RandomWalk",
|
||||
"RoutingTableRefreshError",
|
||||
"RandomWalkError",
|
||||
"PeerValidationError",
|
||||
]
|
||||
@ -1,16 +0,0 @@
|
||||
from typing import Final
|
||||
|
||||
# Timing constants (matching go-libp2p)
|
||||
PEER_PING_TIMEOUT: Final[float] = 10.0 # seconds
|
||||
REFRESH_QUERY_TIMEOUT: Final[float] = 60.0 # seconds
|
||||
REFRESH_INTERVAL: Final[float] = 300.0 # 5 minutes
|
||||
SUCCESSFUL_OUTBOUND_QUERY_GRACE_PERIOD: Final[float] = 60.0 # 1 minute
|
||||
|
||||
# Routing table thresholds
|
||||
MIN_RT_REFRESH_THRESHOLD: Final[int] = 4 # Minimum peers before triggering refresh
|
||||
MAX_N_BOOTSTRAPPERS: Final[int] = 2 # Maximum bootstrap peers to try
|
||||
|
||||
# Random walk specific
|
||||
RANDOM_WALK_CONCURRENCY: Final[int] = 3 # Number of concurrent random walks
|
||||
RANDOM_WALK_ENABLED: Final[bool] = True # Enable automatic random walks
|
||||
RANDOM_WALK_RT_THRESHOLD: Final[int] = 20 # RT size threshold for peerstore fallback
|
||||
@ -1,19 +0,0 @@
|
||||
from libp2p.exceptions import BaseLibp2pError
|
||||
|
||||
|
||||
class RoutingTableRefreshError(BaseLibp2pError):
|
||||
"""Base exception for routing table refresh operations."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class RandomWalkError(RoutingTableRefreshError):
|
||||
"""Exception raised during random walk operations."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class PeerValidationError(RoutingTableRefreshError):
|
||||
"""Exception raised when peer validation fails."""
|
||||
|
||||
pass
|
||||
@ -1,218 +0,0 @@
|
||||
from collections.abc import Awaitable, Callable
|
||||
import logging
|
||||
import secrets
|
||||
|
||||
import trio
|
||||
|
||||
from libp2p.abc import IHost
|
||||
from libp2p.discovery.random_walk.config import (
|
||||
RANDOM_WALK_CONCURRENCY,
|
||||
RANDOM_WALK_RT_THRESHOLD,
|
||||
REFRESH_QUERY_TIMEOUT,
|
||||
)
|
||||
from libp2p.discovery.random_walk.exceptions import RandomWalkError
|
||||
from libp2p.peer.id import ID
|
||||
from libp2p.peer.peerinfo import PeerInfo
|
||||
|
||||
logger = logging.getLogger("libp2p.discovery.random_walk")
|
||||
|
||||
|
||||
class RandomWalk:
|
||||
"""
|
||||
Random Walk implementation for peer discovery in Kademlia DHT.
|
||||
|
||||
Generates random peer IDs and performs FIND_NODE queries to discover
|
||||
new peers and populate the routing table.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
host: IHost,
|
||||
local_peer_id: ID,
|
||||
query_function: Callable[[bytes], Awaitable[list[ID]]],
|
||||
):
|
||||
"""
|
||||
Initialize Random Walk module.
|
||||
|
||||
Args:
|
||||
host: The libp2p host instance
|
||||
local_peer_id: Local peer ID
|
||||
query_function: Function to query for closest peers given target key bytes
|
||||
|
||||
"""
|
||||
self.host = host
|
||||
self.local_peer_id = local_peer_id
|
||||
self.query_function = query_function
|
||||
|
||||
def generate_random_peer_id(self) -> str:
|
||||
"""
|
||||
Generate a completely random peer ID
|
||||
for random walk queries.
|
||||
|
||||
Returns:
|
||||
Random peer ID as string
|
||||
|
||||
"""
|
||||
# Generate 32 random bytes (256 bits) - same as go-libp2p
|
||||
random_bytes = secrets.token_bytes(32)
|
||||
# Convert to hex string for query
|
||||
return random_bytes.hex()
|
||||
|
||||
async def perform_random_walk(self) -> list[PeerInfo]:
|
||||
"""
|
||||
Perform a single random walk operation.
|
||||
|
||||
Returns:
|
||||
List of validated peers discovered during the walk
|
||||
|
||||
"""
|
||||
try:
|
||||
# Generate random peer ID
|
||||
random_peer_id = self.generate_random_peer_id()
|
||||
logger.info(f"Starting random walk for peer ID: {random_peer_id}")
|
||||
|
||||
# Perform FIND_NODE query
|
||||
discovered_peer_ids: list[ID] = []
|
||||
|
||||
with trio.move_on_after(REFRESH_QUERY_TIMEOUT):
|
||||
# Call the query function with target key bytes
|
||||
target_key = bytes.fromhex(random_peer_id)
|
||||
discovered_peer_ids = await self.query_function(target_key) or []
|
||||
|
||||
if not discovered_peer_ids:
|
||||
logger.debug(f"No peers discovered in random walk for {random_peer_id}")
|
||||
return []
|
||||
|
||||
logger.info(
|
||||
f"Discovered {len(discovered_peer_ids)} peers in random walk "
|
||||
f"for {random_peer_id[:8]}..." # Show only first 8 chars for brevity
|
||||
)
|
||||
|
||||
# Convert peer IDs to PeerInfo objects and validate
|
||||
validated_peers: list[PeerInfo] = []
|
||||
|
||||
for peer_id in discovered_peer_ids:
|
||||
try:
|
||||
# Get addresses from peerstore
|
||||
addrs = self.host.get_peerstore().addrs(peer_id)
|
||||
if addrs:
|
||||
peer_info = PeerInfo(peer_id, addrs)
|
||||
validated_peers.append(peer_info)
|
||||
except Exception as e:
|
||||
logger.debug(f"Failed to create PeerInfo for {peer_id}: {e}")
|
||||
continue
|
||||
|
||||
return validated_peers
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Random walk failed: {e}")
|
||||
raise RandomWalkError(f"Random walk operation failed: {e}") from e
|
||||
|
||||
async def run_concurrent_random_walks(
|
||||
self, count: int = RANDOM_WALK_CONCURRENCY, current_routing_table_size: int = 0
|
||||
) -> list[PeerInfo]:
|
||||
"""
|
||||
Run multiple random walks concurrently.
|
||||
|
||||
Args:
|
||||
count: Number of concurrent random walks to perform
|
||||
current_routing_table_size: Current size of routing table (for optimization)
|
||||
|
||||
Returns:
|
||||
Combined list of all validated peers discovered
|
||||
|
||||
"""
|
||||
all_validated_peers: list[PeerInfo] = []
|
||||
logger.info(f"Starting {count} concurrent random walks")
|
||||
|
||||
# First, try to add peers from peerstore if routing table is small
|
||||
if current_routing_table_size < RANDOM_WALK_RT_THRESHOLD:
|
||||
try:
|
||||
peerstore_peers = self._get_peerstore_peers()
|
||||
if peerstore_peers:
|
||||
logger.debug(
|
||||
f"RT size ({current_routing_table_size}) below threshold, "
|
||||
f"adding {len(peerstore_peers)} peerstore peers"
|
||||
)
|
||||
all_validated_peers.extend(peerstore_peers)
|
||||
except Exception as e:
|
||||
logger.warning(f"Error processing peerstore peers: {e}")
|
||||
|
||||
async def single_walk() -> None:
|
||||
try:
|
||||
peers = await self.perform_random_walk()
|
||||
all_validated_peers.extend(peers)
|
||||
except Exception as e:
|
||||
logger.warning(f"Concurrent random walk failed: {e}")
|
||||
return
|
||||
|
||||
# Run concurrent random walks
|
||||
async with trio.open_nursery() as nursery:
|
||||
for _ in range(count):
|
||||
nursery.start_soon(single_walk)
|
||||
|
||||
# Remove duplicates based on peer ID
|
||||
unique_peers = {}
|
||||
for peer in all_validated_peers:
|
||||
unique_peers[peer.peer_id] = peer
|
||||
|
||||
result = list(unique_peers.values())
|
||||
logger.info(
|
||||
f"Concurrent random walks completed: {len(result)} unique peers discovered"
|
||||
)
|
||||
return result
|
||||
|
||||
def _get_peerstore_peers(self) -> list[PeerInfo]:
|
||||
"""
|
||||
Get peer info objects from the host's peerstore.
|
||||
|
||||
Returns:
|
||||
List of PeerInfo objects from peerstore
|
||||
|
||||
"""
|
||||
try:
|
||||
peerstore = self.host.get_peerstore()
|
||||
peer_ids = peerstore.peers_with_addrs()
|
||||
|
||||
peer_infos = []
|
||||
for peer_id in peer_ids:
|
||||
try:
|
||||
# Skip local peer
|
||||
if peer_id == self.local_peer_id:
|
||||
continue
|
||||
|
||||
peer_info = peerstore.peer_info(peer_id)
|
||||
if peer_info and peer_info.addrs:
|
||||
# Filter for compatible addresses (TCP + IPv4)
|
||||
if self._has_compatible_addresses(peer_info):
|
||||
peer_infos.append(peer_info)
|
||||
except Exception as e:
|
||||
logger.debug(f"Error getting peer info for {peer_id}: {e}")
|
||||
|
||||
return peer_infos
|
||||
|
||||
except Exception as e:
|
||||
logger.warning(f"Error accessing peerstore: {e}")
|
||||
return []
|
||||
|
||||
def _has_compatible_addresses(self, peer_info: PeerInfo) -> bool:
|
||||
"""
|
||||
Check if a peer has TCP+IPv4 compatible addresses.
|
||||
|
||||
Args:
|
||||
peer_info: PeerInfo to check
|
||||
|
||||
Returns:
|
||||
True if peer has compatible addresses
|
||||
|
||||
"""
|
||||
if not peer_info.addrs:
|
||||
return False
|
||||
|
||||
for addr in peer_info.addrs:
|
||||
addr_str = str(addr)
|
||||
# Check for TCP and IPv4 compatibility, avoid QUIC
|
||||
if "/tcp/" in addr_str and "/ip4/" in addr_str and "/quic" not in addr_str:
|
||||
return True
|
||||
|
||||
return False
|
||||
@ -1,208 +0,0 @@
|
||||
from collections.abc import Awaitable, Callable
|
||||
import logging
|
||||
import time
|
||||
from typing import Protocol
|
||||
|
||||
import trio
|
||||
|
||||
from libp2p.abc import IHost
|
||||
from libp2p.discovery.random_walk.config import (
|
||||
MIN_RT_REFRESH_THRESHOLD,
|
||||
RANDOM_WALK_CONCURRENCY,
|
||||
RANDOM_WALK_ENABLED,
|
||||
REFRESH_INTERVAL,
|
||||
)
|
||||
from libp2p.discovery.random_walk.exceptions import RoutingTableRefreshError
|
||||
from libp2p.discovery.random_walk.random_walk import RandomWalk
|
||||
from libp2p.peer.id import ID
|
||||
from libp2p.peer.peerinfo import PeerInfo
|
||||
|
||||
|
||||
class RoutingTableProtocol(Protocol):
|
||||
"""Protocol for routing table operations needed by RT refresh manager."""
|
||||
|
||||
def size(self) -> int:
|
||||
"""Return the current size of the routing table."""
|
||||
...
|
||||
|
||||
async def add_peer(self, peer_obj: PeerInfo) -> bool:
|
||||
"""Add a peer to the routing table."""
|
||||
...
|
||||
|
||||
|
||||
logger = logging.getLogger("libp2p.discovery.random_walk.rt_refresh_manager")
|
||||
|
||||
|
||||
class RTRefreshManager:
|
||||
"""
|
||||
Routing Table Refresh Manager for py-libp2p.
|
||||
|
||||
Manages periodic routing table refreshes and random walk operations
|
||||
to maintain routing table health and discover new peers.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
host: IHost,
|
||||
routing_table: RoutingTableProtocol,
|
||||
local_peer_id: ID,
|
||||
query_function: Callable[[bytes], Awaitable[list[ID]]],
|
||||
enable_auto_refresh: bool = RANDOM_WALK_ENABLED,
|
||||
refresh_interval: float = REFRESH_INTERVAL,
|
||||
min_refresh_threshold: int = MIN_RT_REFRESH_THRESHOLD,
|
||||
):
|
||||
"""
|
||||
Initialize RT Refresh Manager.
|
||||
|
||||
Args:
|
||||
host: The libp2p host instance
|
||||
routing_table: Routing table of host
|
||||
local_peer_id: Local peer ID
|
||||
query_function: Function to query for closest peers given target key bytes
|
||||
enable_auto_refresh: Whether to enable automatic refresh
|
||||
refresh_interval: Interval between refreshes in seconds
|
||||
min_refresh_threshold: Minimum RT size before triggering refresh
|
||||
|
||||
"""
|
||||
self.host = host
|
||||
self.routing_table = routing_table
|
||||
self.local_peer_id = local_peer_id
|
||||
self.query_function = query_function
|
||||
|
||||
self.enable_auto_refresh = enable_auto_refresh
|
||||
self.refresh_interval = refresh_interval
|
||||
self.min_refresh_threshold = min_refresh_threshold
|
||||
|
||||
# Initialize random walk module
|
||||
self.random_walk = RandomWalk(
|
||||
host=host,
|
||||
local_peer_id=self.local_peer_id,
|
||||
query_function=query_function,
|
||||
)
|
||||
|
||||
# Control variables
|
||||
self._running = False
|
||||
self._nursery: trio.Nursery | None = None
|
||||
|
||||
# Tracking
|
||||
self._last_refresh_time = 0.0
|
||||
self._refresh_done_callbacks: list[Callable[[], None]] = []
|
||||
|
||||
async def start(self) -> None:
|
||||
"""Start the RT Refresh Manager."""
|
||||
if self._running:
|
||||
logger.warning("RT Refresh Manager is already running")
|
||||
return
|
||||
|
||||
self._running = True
|
||||
|
||||
logger.info("Starting RT Refresh Manager")
|
||||
|
||||
# Start the main loop
|
||||
async with trio.open_nursery() as nursery:
|
||||
self._nursery = nursery
|
||||
nursery.start_soon(self._main_loop)
|
||||
|
||||
async def stop(self) -> None:
|
||||
"""Stop the RT Refresh Manager."""
|
||||
if not self._running:
|
||||
return
|
||||
|
||||
logger.info("Stopping RT Refresh Manager")
|
||||
self._running = False
|
||||
|
||||
async def _main_loop(self) -> None:
|
||||
"""Main loop for the RT Refresh Manager."""
|
||||
logger.info("RT Refresh Manager main loop started")
|
||||
|
||||
# Initial refresh if auto-refresh is enabled
|
||||
if self.enable_auto_refresh:
|
||||
await self._do_refresh(force=True)
|
||||
|
||||
try:
|
||||
while self._running:
|
||||
async with trio.open_nursery() as nursery:
|
||||
# Schedule periodic refresh if enabled
|
||||
if self.enable_auto_refresh:
|
||||
nursery.start_soon(self._periodic_refresh_task)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"RT Refresh Manager main loop error: {e}")
|
||||
finally:
|
||||
logger.info("RT Refresh Manager main loop stopped")
|
||||
|
||||
async def _periodic_refresh_task(self) -> None:
|
||||
"""Task for periodic refreshes."""
|
||||
while self._running:
|
||||
await trio.sleep(self.refresh_interval)
|
||||
if self._running:
|
||||
await self._do_refresh()
|
||||
|
||||
async def _do_refresh(self, force: bool = False) -> None:
|
||||
"""
|
||||
Perform routing table refresh operation.
|
||||
|
||||
Args:
|
||||
force: Whether to force refresh regardless of timing
|
||||
|
||||
"""
|
||||
try:
|
||||
current_time = time.time()
|
||||
|
||||
# Check if refresh is needed
|
||||
if not force:
|
||||
if current_time - self._last_refresh_time < self.refresh_interval:
|
||||
logger.debug("Skipping refresh: interval not elapsed")
|
||||
return
|
||||
|
||||
if self.routing_table.size() >= self.min_refresh_threshold:
|
||||
logger.debug("Skipping refresh: routing table size above threshold")
|
||||
return
|
||||
|
||||
logger.info(f"Starting routing table refresh (force={force})")
|
||||
start_time = current_time
|
||||
|
||||
# Perform random walks to discover new peers
|
||||
logger.info("Running concurrent random walks to discover new peers")
|
||||
current_rt_size = self.routing_table.size()
|
||||
discovered_peers = await self.random_walk.run_concurrent_random_walks(
|
||||
count=RANDOM_WALK_CONCURRENCY,
|
||||
current_routing_table_size=current_rt_size,
|
||||
)
|
||||
|
||||
# Add discovered peers to routing table
|
||||
added_count = 0
|
||||
for peer_info in discovered_peers:
|
||||
result = await self.routing_table.add_peer(peer_info)
|
||||
if result:
|
||||
added_count += 1
|
||||
|
||||
self._last_refresh_time = current_time
|
||||
|
||||
duration = time.time() - start_time
|
||||
logger.info(
|
||||
f"Routing table refresh completed: "
|
||||
f"{added_count}/{len(discovered_peers)} peers added, "
|
||||
f"RT size: {self.routing_table.size()}, "
|
||||
f"duration: {duration:.2f}s"
|
||||
)
|
||||
|
||||
# Notify refresh completion
|
||||
for callback in self._refresh_done_callbacks:
|
||||
try:
|
||||
callback()
|
||||
except Exception as e:
|
||||
logger.warning(f"Refresh callback error: {e}")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Routing table refresh failed: {e}")
|
||||
raise RoutingTableRefreshError(f"Refresh operation failed: {e}") from e
|
||||
|
||||
def add_refresh_done_callback(self, callback: Callable[[], None]) -> None:
|
||||
"""Add a callback to be called when refresh completes."""
|
||||
self._refresh_done_callbacks.append(callback)
|
||||
|
||||
def remove_refresh_done_callback(self, callback: Callable[[], None]) -> None:
|
||||
"""Remove a refresh completion callback."""
|
||||
if callback in self._refresh_done_callbacks:
|
||||
self._refresh_done_callbacks.remove(callback)
|
||||
@ -43,7 +43,6 @@ from libp2p.peer.id import (
|
||||
from libp2p.peer.peerinfo import (
|
||||
PeerInfo,
|
||||
)
|
||||
from libp2p.peer.peerstore import create_signed_peer_record
|
||||
from libp2p.protocol_muxer.exceptions import (
|
||||
MultiselectClientError,
|
||||
MultiselectError,
|
||||
@ -111,14 +110,6 @@ class BasicHost(IHost):
|
||||
if bootstrap:
|
||||
self.bootstrap = BootstrapDiscovery(network, bootstrap)
|
||||
|
||||
# Cache a signed-record if the local-node in the PeerStore
|
||||
envelope = create_signed_peer_record(
|
||||
self.get_id(),
|
||||
self.get_addrs(),
|
||||
self.get_private_key(),
|
||||
)
|
||||
self.get_peerstore().set_local_record(envelope)
|
||||
|
||||
def get_id(self) -> ID:
|
||||
"""
|
||||
:return: peer_id of host
|
||||
@ -213,6 +204,7 @@ class BasicHost(IHost):
|
||||
self,
|
||||
peer_id: ID,
|
||||
protocol_ids: Sequence[TProtocol],
|
||||
negotitate_timeout: int = DEFAULT_NEGOTIATE_TIMEOUT,
|
||||
) -> INetStream:
|
||||
"""
|
||||
:param peer_id: peer_id that host is connecting
|
||||
@ -226,7 +218,7 @@ class BasicHost(IHost):
|
||||
selected_protocol = await self.multiselect_client.select_one_of(
|
||||
list(protocol_ids),
|
||||
MultiselectCommunicator(net_stream),
|
||||
self.negotiate_timeout,
|
||||
negotitate_timeout,
|
||||
)
|
||||
except MultiselectClientError as error:
|
||||
logger.debug("fail to open a stream to peer %s, error=%s", peer_id, error)
|
||||
@ -296,11 +288,6 @@ class BasicHost(IHost):
|
||||
protocol, handler = await self.multiselect.negotiate(
|
||||
MultiselectCommunicator(net_stream), self.negotiate_timeout
|
||||
)
|
||||
if protocol is None:
|
||||
await net_stream.reset()
|
||||
raise StreamFailure(
|
||||
"Failed to negotiate protocol: no protocol selected"
|
||||
)
|
||||
except MultiselectError as error:
|
||||
peer_id = net_stream.muxed_conn.peer_id
|
||||
logger.debug(
|
||||
@ -308,13 +295,6 @@ class BasicHost(IHost):
|
||||
)
|
||||
await net_stream.reset()
|
||||
return
|
||||
if protocol is None:
|
||||
logger.debug(
|
||||
"no protocol negotiated, closing stream from peer %s",
|
||||
net_stream.muxed_conn.peer_id,
|
||||
)
|
||||
await net_stream.reset()
|
||||
return
|
||||
net_stream.set_protocol(protocol)
|
||||
if handler is None:
|
||||
logger.debug(
|
||||
@ -342,7 +322,7 @@ class BasicHost(IHost):
|
||||
:param peer_id: ID of the peer to check
|
||||
:return: True if peer has an active connection, False otherwise
|
||||
"""
|
||||
return len(self._network.get_connections(peer_id)) > 0
|
||||
return peer_id in self._network.connections
|
||||
|
||||
def get_peer_connection_info(self, peer_id: ID) -> INetConn | None:
|
||||
"""
|
||||
@ -351,4 +331,4 @@ class BasicHost(IHost):
|
||||
:param peer_id: ID of the peer to get info for
|
||||
:return: Connection object if peer is connected, None otherwise
|
||||
"""
|
||||
return self._network.get_connection(peer_id)
|
||||
return self._network.connections.get(peer_id)
|
||||
|
||||
@ -15,7 +15,8 @@ from libp2p.custom_types import (
|
||||
from libp2p.network.stream.exceptions import (
|
||||
StreamClosed,
|
||||
)
|
||||
from libp2p.peer.peerstore import env_to_send_in_RPC
|
||||
from libp2p.peer.envelope import seal_record
|
||||
from libp2p.peer.peer_record import PeerRecord
|
||||
from libp2p.utils import (
|
||||
decode_varint_with_size,
|
||||
get_agent_version,
|
||||
@ -65,7 +66,9 @@ def _mk_identify_protobuf(
|
||||
protocols = tuple(str(p) for p in host.get_mux().get_protocols() if p is not None)
|
||||
|
||||
# Create a signed peer-record for the remote peer
|
||||
envelope_bytes, _ = env_to_send_in_RPC(host)
|
||||
record = PeerRecord(host.get_id(), host.get_addrs())
|
||||
envelope = seal_record(record, host.get_private_key())
|
||||
protobuf = envelope.marshal_envelope()
|
||||
|
||||
observed_addr = observed_multiaddr.to_bytes() if observed_multiaddr else b""
|
||||
return Identify(
|
||||
@ -75,7 +78,7 @@ def _mk_identify_protobuf(
|
||||
listen_addrs=map(_multiaddr_to_bytes, laddrs),
|
||||
observed_addr=observed_addr,
|
||||
protocols=protocols,
|
||||
signedPeerRecord=envelope_bytes,
|
||||
signedPeerRecord=protobuf,
|
||||
)
|
||||
|
||||
|
||||
|
||||
@ -5,7 +5,6 @@ This module provides a complete Distributed Hash Table (DHT)
|
||||
implementation based on the Kademlia algorithm and protocol.
|
||||
"""
|
||||
|
||||
from collections.abc import Awaitable, Callable
|
||||
from enum import (
|
||||
Enum,
|
||||
)
|
||||
@ -21,19 +20,15 @@ import varint
|
||||
from libp2p.abc import (
|
||||
IHost,
|
||||
)
|
||||
from libp2p.discovery.random_walk.rt_refresh_manager import RTRefreshManager
|
||||
from libp2p.kad_dht.utils import maybe_consume_signed_record
|
||||
from libp2p.network.stream.net_stream import (
|
||||
INetStream,
|
||||
)
|
||||
from libp2p.peer.envelope import Envelope
|
||||
from libp2p.peer.id import (
|
||||
ID,
|
||||
)
|
||||
from libp2p.peer.peerinfo import (
|
||||
PeerInfo,
|
||||
)
|
||||
from libp2p.peer.peerstore import env_to_send_in_RPC
|
||||
from libp2p.tools.async_service import (
|
||||
Service,
|
||||
)
|
||||
@ -78,27 +73,14 @@ class KadDHT(Service):
|
||||
|
||||
This class provides a DHT implementation that combines routing table management,
|
||||
peer discovery, content routing, and value storage.
|
||||
|
||||
Optional Random Walk feature enhances peer discovery by automatically
|
||||
performing periodic random queries to discover new peers and maintain
|
||||
routing table health.
|
||||
|
||||
Example:
|
||||
# Basic DHT without random walk (default)
|
||||
dht = KadDHT(host, DHTMode.SERVER)
|
||||
|
||||
# DHT with random walk enabled for enhanced peer discovery
|
||||
dht = KadDHT(host, DHTMode.SERVER, enable_random_walk=True)
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, host: IHost, mode: DHTMode, enable_random_walk: bool = False):
|
||||
def __init__(self, host: IHost, mode: DHTMode):
|
||||
"""
|
||||
Initialize a new Kademlia DHT node.
|
||||
|
||||
:param host: The libp2p host.
|
||||
:param mode: The mode of host (Client or Server) - must be DHTMode enum
|
||||
:param enable_random_walk: Whether to enable automatic random walk
|
||||
"""
|
||||
super().__init__()
|
||||
|
||||
@ -110,7 +92,6 @@ class KadDHT(Service):
|
||||
raise TypeError(f"mode must be DHTMode enum, got {type(mode)}")
|
||||
|
||||
self.mode = mode
|
||||
self.enable_random_walk = enable_random_walk
|
||||
|
||||
# Initialize the routing table
|
||||
self.routing_table = RoutingTable(self.local_peer_id, self.host)
|
||||
@ -127,56 +108,13 @@ class KadDHT(Service):
|
||||
# Last time we republished provider records
|
||||
self._last_provider_republish = time.time()
|
||||
|
||||
# Initialize RT Refresh Manager (only if random walk is enabled)
|
||||
self.rt_refresh_manager: RTRefreshManager | None = None
|
||||
if self.enable_random_walk:
|
||||
self.rt_refresh_manager = RTRefreshManager(
|
||||
host=self.host,
|
||||
routing_table=self.routing_table,
|
||||
local_peer_id=self.local_peer_id,
|
||||
query_function=self._create_query_function(),
|
||||
enable_auto_refresh=True,
|
||||
)
|
||||
|
||||
# Set protocol handlers
|
||||
host.set_stream_handler(PROTOCOL_ID, self.handle_stream)
|
||||
|
||||
def _create_query_function(self) -> Callable[[bytes], Awaitable[list[ID]]]:
|
||||
"""
|
||||
Create a query function that wraps peer_routing.find_closest_peers_network.
|
||||
|
||||
This function is used by the RandomWalk module to query for peers without
|
||||
directly importing PeerRouting, avoiding circular import issues.
|
||||
|
||||
Returns:
|
||||
Callable that takes target_key bytes and returns list of peer IDs
|
||||
|
||||
"""
|
||||
|
||||
async def query_function(target_key: bytes) -> list[ID]:
|
||||
"""Query for closest peers to target key."""
|
||||
return await self.peer_routing.find_closest_peers_network(target_key)
|
||||
|
||||
return query_function
|
||||
|
||||
async def run(self) -> None:
|
||||
"""Run the DHT service."""
|
||||
logger.info(f"Starting Kademlia DHT with peer ID {self.local_peer_id}")
|
||||
|
||||
# Start the RT Refresh Manager in parallel with the main DHT service
|
||||
async with trio.open_nursery() as nursery:
|
||||
# Start the RT Refresh Manager only if random walk is enabled
|
||||
if self.rt_refresh_manager is not None:
|
||||
nursery.start_soon(self.rt_refresh_manager.start)
|
||||
logger.info("RT Refresh Manager started - Random Walk is now active")
|
||||
else:
|
||||
logger.info("Random Walk is disabled - RT Refresh Manager not started")
|
||||
|
||||
# Start the main DHT service loop
|
||||
nursery.start_soon(self._run_main_loop)
|
||||
|
||||
async def _run_main_loop(self) -> None:
|
||||
"""Run the main DHT service loop."""
|
||||
# Main service loop
|
||||
while self.manager.is_running:
|
||||
# Periodically refresh the routing table
|
||||
@ -197,17 +135,6 @@ class KadDHT(Service):
|
||||
# Wait before next maintenance cycle
|
||||
await trio.sleep(ROUTING_TABLE_REFRESH_INTERVAL)
|
||||
|
||||
async def stop(self) -> None:
|
||||
"""Stop the DHT service and cleanup resources."""
|
||||
logger.info("Stopping Kademlia DHT")
|
||||
|
||||
# Stop the RT Refresh Manager only if it was started
|
||||
if self.rt_refresh_manager is not None:
|
||||
await self.rt_refresh_manager.stop()
|
||||
logger.info("RT Refresh Manager stopped")
|
||||
else:
|
||||
logger.info("RT Refresh Manager was not running (Random Walk disabled)")
|
||||
|
||||
async def switch_mode(self, new_mode: DHTMode) -> DHTMode:
|
||||
"""
|
||||
Switch the DHT mode.
|
||||
@ -237,9 +164,6 @@ class KadDHT(Service):
|
||||
await self.add_peer(peer_id)
|
||||
logger.debug(f"Added peer {peer_id} to routing table")
|
||||
|
||||
closer_peer_envelope: Envelope | None = None
|
||||
provider_peer_envelope: Envelope | None = None
|
||||
|
||||
try:
|
||||
# Read varint-prefixed length for the message
|
||||
length_prefix = b""
|
||||
@ -280,14 +204,6 @@ class KadDHT(Service):
|
||||
)
|
||||
logger.debug(f"Found {len(closest_peers)} peers close to target")
|
||||
|
||||
# Consume the source signed_peer_record if sent
|
||||
if not maybe_consume_signed_record(message, self.host, peer_id):
|
||||
logger.error(
|
||||
"Received an invalid-signed-record, dropping the stream"
|
||||
)
|
||||
await stream.close()
|
||||
return
|
||||
|
||||
# Build response message with protobuf
|
||||
response = Message()
|
||||
response.type = Message.MessageType.FIND_NODE
|
||||
@ -312,21 +228,6 @@ class KadDHT(Service):
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# Add the signed-peer-record for each peer in the peer-proto
|
||||
# if cached in the peerstore
|
||||
closer_peer_envelope = (
|
||||
self.host.get_peerstore().get_peer_record(peer)
|
||||
)
|
||||
|
||||
if closer_peer_envelope is not None:
|
||||
peer_proto.signedRecord = (
|
||||
closer_peer_envelope.marshal_envelope()
|
||||
)
|
||||
|
||||
# Create sender_signed_peer_record
|
||||
envelope_bytes, _ = env_to_send_in_RPC(self.host)
|
||||
response.senderRecord = envelope_bytes
|
||||
|
||||
# Serialize and send response
|
||||
response_bytes = response.SerializeToString()
|
||||
await stream.write(varint.encode(len(response_bytes)))
|
||||
@ -341,14 +242,6 @@ class KadDHT(Service):
|
||||
key = message.key
|
||||
logger.debug(f"Received ADD_PROVIDER for key {key.hex()}")
|
||||
|
||||
# Consume the source signed-peer-record if sent
|
||||
if not maybe_consume_signed_record(message, self.host, peer_id):
|
||||
logger.error(
|
||||
"Received an invalid-signed-record, dropping the stream"
|
||||
)
|
||||
await stream.close()
|
||||
return
|
||||
|
||||
# Extract provider information
|
||||
for provider_proto in message.providerPeers:
|
||||
try:
|
||||
@ -375,17 +268,6 @@ class KadDHT(Service):
|
||||
logger.debug(
|
||||
f"Added provider {provider_id} for key {key.hex()}"
|
||||
)
|
||||
|
||||
# Process the signed-records of provider if sent
|
||||
if not maybe_consume_signed_record(
|
||||
provider_proto, self.host
|
||||
):
|
||||
logger.error(
|
||||
"Received an invalid-signed-record,"
|
||||
"dropping the stream"
|
||||
)
|
||||
await stream.close()
|
||||
return
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to process provider info: {e}")
|
||||
|
||||
@ -394,10 +276,6 @@ class KadDHT(Service):
|
||||
response.type = Message.MessageType.ADD_PROVIDER
|
||||
response.key = key
|
||||
|
||||
# Add sender's signed-peer-record
|
||||
envelope_bytes, _ = env_to_send_in_RPC(self.host)
|
||||
response.senderRecord = envelope_bytes
|
||||
|
||||
response_bytes = response.SerializeToString()
|
||||
await stream.write(varint.encode(len(response_bytes)))
|
||||
await stream.write(response_bytes)
|
||||
@ -409,14 +287,6 @@ class KadDHT(Service):
|
||||
key = message.key
|
||||
logger.debug(f"Received GET_PROVIDERS request for key {key.hex()}")
|
||||
|
||||
# Consume the source signed_peer_record if sent
|
||||
if not maybe_consume_signed_record(message, self.host, peer_id):
|
||||
logger.error(
|
||||
"Received an invalid-signed-record, dropping the stream"
|
||||
)
|
||||
await stream.close()
|
||||
return
|
||||
|
||||
# Find providers for the key
|
||||
providers = self.provider_store.get_providers(key)
|
||||
logger.debug(
|
||||
@ -428,28 +298,12 @@ class KadDHT(Service):
|
||||
response.type = Message.MessageType.GET_PROVIDERS
|
||||
response.key = key
|
||||
|
||||
# Create sender_signed_peer_record for the response
|
||||
envelope_bytes, _ = env_to_send_in_RPC(self.host)
|
||||
response.senderRecord = envelope_bytes
|
||||
|
||||
# Add provider information to response
|
||||
for provider_info in providers:
|
||||
provider_proto = response.providerPeers.add()
|
||||
provider_proto.id = provider_info.peer_id.to_bytes()
|
||||
provider_proto.connection = Message.ConnectionType.CAN_CONNECT
|
||||
|
||||
# Add provider signed-records if cached
|
||||
provider_peer_envelope = (
|
||||
self.host.get_peerstore().get_peer_record(
|
||||
provider_info.peer_id
|
||||
)
|
||||
)
|
||||
|
||||
if provider_peer_envelope is not None:
|
||||
provider_proto.signedRecord = (
|
||||
provider_peer_envelope.marshal_envelope()
|
||||
)
|
||||
|
||||
# Add addresses if available
|
||||
for addr in provider_info.addrs:
|
||||
provider_proto.addrs.append(addr.to_bytes())
|
||||
@ -473,16 +327,6 @@ class KadDHT(Service):
|
||||
peer_proto.id = peer.to_bytes()
|
||||
peer_proto.connection = Message.ConnectionType.CAN_CONNECT
|
||||
|
||||
# Add the signed-records of closest_peers if cached
|
||||
closer_peer_envelope = (
|
||||
self.host.get_peerstore().get_peer_record(peer)
|
||||
)
|
||||
|
||||
if closer_peer_envelope is not None:
|
||||
peer_proto.signedRecord = (
|
||||
closer_peer_envelope.marshal_envelope()
|
||||
)
|
||||
|
||||
# Add addresses if available
|
||||
try:
|
||||
addrs = self.host.get_peerstore().addrs(peer)
|
||||
@ -503,14 +347,6 @@ class KadDHT(Service):
|
||||
key = message.key
|
||||
logger.debug(f"Received GET_VALUE request for key {key.hex()}")
|
||||
|
||||
# Consume the sender_signed_peer_record
|
||||
if not maybe_consume_signed_record(message, self.host, peer_id):
|
||||
logger.error(
|
||||
"Received an invalid-signed-record, dropping the stream"
|
||||
)
|
||||
await stream.close()
|
||||
return
|
||||
|
||||
value = self.value_store.get(key)
|
||||
if value:
|
||||
logger.debug(f"Found value for key {key.hex()}")
|
||||
@ -525,10 +361,6 @@ class KadDHT(Service):
|
||||
response.record.value = value
|
||||
response.record.timeReceived = str(time.time())
|
||||
|
||||
# Create sender_signed_peer_record
|
||||
envelope_bytes, _ = env_to_send_in_RPC(self.host)
|
||||
response.senderRecord = envelope_bytes
|
||||
|
||||
# Serialize and send response
|
||||
response_bytes = response.SerializeToString()
|
||||
await stream.write(varint.encode(len(response_bytes)))
|
||||
@ -542,10 +374,6 @@ class KadDHT(Service):
|
||||
response.type = Message.MessageType.GET_VALUE
|
||||
response.key = key
|
||||
|
||||
# Create sender_signed_peer_record for the response
|
||||
envelope_bytes, _ = env_to_send_in_RPC(self.host)
|
||||
response.senderRecord = envelope_bytes
|
||||
|
||||
# Add closest peers to key
|
||||
closest_peers = self.routing_table.find_local_closest_peers(
|
||||
key, 20
|
||||
@ -564,16 +392,6 @@ class KadDHT(Service):
|
||||
peer_proto.id = peer.to_bytes()
|
||||
peer_proto.connection = Message.ConnectionType.CAN_CONNECT
|
||||
|
||||
# Add signed-records of closer-peers if cached
|
||||
closer_peer_envelope = (
|
||||
self.host.get_peerstore().get_peer_record(peer)
|
||||
)
|
||||
|
||||
if closer_peer_envelope is not None:
|
||||
peer_proto.signedRecord = (
|
||||
closer_peer_envelope.marshal_envelope()
|
||||
)
|
||||
|
||||
# Add addresses if available
|
||||
try:
|
||||
addrs = self.host.get_peerstore().addrs(peer)
|
||||
@ -596,15 +414,6 @@ class KadDHT(Service):
|
||||
key = message.record.key
|
||||
value = message.record.value
|
||||
success = False
|
||||
|
||||
# Consume the source signed_peer_record if sent
|
||||
if not maybe_consume_signed_record(message, self.host, peer_id):
|
||||
logger.error(
|
||||
"Received an invalid-signed-record, dropping the stream"
|
||||
)
|
||||
await stream.close()
|
||||
return
|
||||
|
||||
try:
|
||||
if not (key and value):
|
||||
raise ValueError(
|
||||
@ -625,12 +434,6 @@ class KadDHT(Service):
|
||||
response.type = Message.MessageType.PUT_VALUE
|
||||
if success:
|
||||
response.key = key
|
||||
|
||||
# Create sender_signed_peer_record for the response
|
||||
envelope_bytes, _ = env_to_send_in_RPC(self.host)
|
||||
response.senderRecord = envelope_bytes
|
||||
|
||||
# Serialize and send response
|
||||
response_bytes = response.SerializeToString()
|
||||
await stream.write(varint.encode(len(response_bytes)))
|
||||
await stream.write(response_bytes)
|
||||
@ -811,15 +614,3 @@ class KadDHT(Service):
|
||||
|
||||
"""
|
||||
return self.value_store.size()
|
||||
|
||||
def is_random_walk_enabled(self) -> bool:
|
||||
"""
|
||||
Check if random walk peer discovery is enabled.
|
||||
|
||||
Returns
|
||||
-------
|
||||
bool
|
||||
True if random walk is enabled, False otherwise.
|
||||
|
||||
"""
|
||||
return self.enable_random_walk
|
||||
|
||||
@ -27,7 +27,6 @@ message Message {
|
||||
bytes id = 1;
|
||||
repeated bytes addrs = 2;
|
||||
ConnectionType connection = 3;
|
||||
optional bytes signedRecord = 4; // Envelope(PeerRecord) encoded
|
||||
}
|
||||
|
||||
MessageType type = 1;
|
||||
@ -36,6 +35,4 @@ message Message {
|
||||
Record record = 3;
|
||||
repeated Peer closerPeers = 8;
|
||||
repeated Peer providerPeers = 9;
|
||||
|
||||
optional bytes senderRecord = 11; // Envelope(PeerRecord) encoded
|
||||
}
|
||||
|
||||
@ -1,12 +1,11 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Generated by the protocol buffer compiler. DO NOT EDIT!
|
||||
# source: libp2p/kad_dht/pb/kademlia.proto
|
||||
# Protobuf Python Version: 4.25.3
|
||||
"""Generated protocol buffer code."""
|
||||
from google.protobuf.internal import builder as _builder
|
||||
from google.protobuf import descriptor as _descriptor
|
||||
from google.protobuf import descriptor_pool as _descriptor_pool
|
||||
from google.protobuf import symbol_database as _symbol_database
|
||||
from google.protobuf.internal import builder as _builder
|
||||
# @@protoc_insertion_point(imports)
|
||||
|
||||
_sym_db = _symbol_database.Default()
|
||||
@ -14,21 +13,21 @@ _sym_db = _symbol_database.Default()
|
||||
|
||||
|
||||
|
||||
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n libp2p/kad_dht/pb/kademlia.proto\":\n\x06Record\x12\x0b\n\x03key\x18\x01 \x01(\x0c\x12\r\n\x05value\x18\x02 \x01(\x0c\x12\x14\n\x0ctimeReceived\x18\x05 \x01(\t\"\xa2\x04\n\x07Message\x12\"\n\x04type\x18\x01 \x01(\x0e\x32\x14.Message.MessageType\x12\x17\n\x0f\x63lusterLevelRaw\x18\n \x01(\x05\x12\x0b\n\x03key\x18\x02 \x01(\x0c\x12\x17\n\x06record\x18\x03 \x01(\x0b\x32\x07.Record\x12\"\n\x0b\x63loserPeers\x18\x08 \x03(\x0b\x32\r.Message.Peer\x12$\n\rproviderPeers\x18\t \x03(\x0b\x32\r.Message.Peer\x12\x19\n\x0csenderRecord\x18\x0b \x01(\x0cH\x00\x88\x01\x01\x1az\n\x04Peer\x12\n\n\x02id\x18\x01 \x01(\x0c\x12\r\n\x05\x61\x64\x64rs\x18\x02 \x03(\x0c\x12+\n\nconnection\x18\x03 \x01(\x0e\x32\x17.Message.ConnectionType\x12\x19\n\x0csignedRecord\x18\x04 \x01(\x0cH\x00\x88\x01\x01\x42\x0f\n\r_signedRecord\"i\n\x0bMessageType\x12\r\n\tPUT_VALUE\x10\x00\x12\r\n\tGET_VALUE\x10\x01\x12\x10\n\x0c\x41\x44\x44_PROVIDER\x10\x02\x12\x11\n\rGET_PROVIDERS\x10\x03\x12\r\n\tFIND_NODE\x10\x04\x12\x08\n\x04PING\x10\x05\"W\n\x0e\x43onnectionType\x12\x11\n\rNOT_CONNECTED\x10\x00\x12\r\n\tCONNECTED\x10\x01\x12\x0f\n\x0b\x43\x41N_CONNECT\x10\x02\x12\x12\n\x0e\x43\x41NNOT_CONNECT\x10\x03\x42\x0f\n\r_senderRecordb\x06proto3')
|
||||
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n libp2p/kad_dht/pb/kademlia.proto\":\n\x06Record\x12\x0b\n\x03key\x18\x01 \x01(\x0c\x12\r\n\x05value\x18\x02 \x01(\x0c\x12\x14\n\x0ctimeReceived\x18\x05 \x01(\t\"\xca\x03\n\x07Message\x12\"\n\x04type\x18\x01 \x01(\x0e\x32\x14.Message.MessageType\x12\x17\n\x0f\x63lusterLevelRaw\x18\n \x01(\x05\x12\x0b\n\x03key\x18\x02 \x01(\x0c\x12\x17\n\x06record\x18\x03 \x01(\x0b\x32\x07.Record\x12\"\n\x0b\x63loserPeers\x18\x08 \x03(\x0b\x32\r.Message.Peer\x12$\n\rproviderPeers\x18\t \x03(\x0b\x32\r.Message.Peer\x1aN\n\x04Peer\x12\n\n\x02id\x18\x01 \x01(\x0c\x12\r\n\x05\x61\x64\x64rs\x18\x02 \x03(\x0c\x12+\n\nconnection\x18\x03 \x01(\x0e\x32\x17.Message.ConnectionType\"i\n\x0bMessageType\x12\r\n\tPUT_VALUE\x10\x00\x12\r\n\tGET_VALUE\x10\x01\x12\x10\n\x0c\x41\x44\x44_PROVIDER\x10\x02\x12\x11\n\rGET_PROVIDERS\x10\x03\x12\r\n\tFIND_NODE\x10\x04\x12\x08\n\x04PING\x10\x05\"W\n\x0e\x43onnectionType\x12\x11\n\rNOT_CONNECTED\x10\x00\x12\r\n\tCONNECTED\x10\x01\x12\x0f\n\x0b\x43\x41N_CONNECT\x10\x02\x12\x12\n\x0e\x43\x41NNOT_CONNECT\x10\x03\x62\x06proto3')
|
||||
|
||||
_globals = globals()
|
||||
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
|
||||
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'libp2p.kad_dht.pb.kademlia_pb2', _globals)
|
||||
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals())
|
||||
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'libp2p.kad_dht.pb.kademlia_pb2', globals())
|
||||
if _descriptor._USE_C_DESCRIPTORS == False:
|
||||
|
||||
DESCRIPTOR._options = None
|
||||
_globals['_RECORD']._serialized_start=36
|
||||
_globals['_RECORD']._serialized_end=94
|
||||
_globals['_MESSAGE']._serialized_start=97
|
||||
_globals['_MESSAGE']._serialized_end=643
|
||||
_globals['_MESSAGE_PEER']._serialized_start=308
|
||||
_globals['_MESSAGE_PEER']._serialized_end=430
|
||||
_globals['_MESSAGE_MESSAGETYPE']._serialized_start=432
|
||||
_globals['_MESSAGE_MESSAGETYPE']._serialized_end=537
|
||||
_globals['_MESSAGE_CONNECTIONTYPE']._serialized_start=539
|
||||
_globals['_MESSAGE_CONNECTIONTYPE']._serialized_end=626
|
||||
_RECORD._serialized_start=36
|
||||
_RECORD._serialized_end=94
|
||||
_MESSAGE._serialized_start=97
|
||||
_MESSAGE._serialized_end=555
|
||||
_MESSAGE_PEER._serialized_start=281
|
||||
_MESSAGE_PEER._serialized_end=359
|
||||
_MESSAGE_MESSAGETYPE._serialized_start=361
|
||||
_MESSAGE_MESSAGETYPE._serialized_end=466
|
||||
_MESSAGE_CONNECTIONTYPE._serialized_start=468
|
||||
_MESSAGE_CONNECTIONTYPE._serialized_end=555
|
||||
# @@protoc_insertion_point(module_scope)
|
||||
|
||||
@ -1,70 +1,133 @@
|
||||
from google.protobuf.internal import containers as _containers
|
||||
from google.protobuf.internal import enum_type_wrapper as _enum_type_wrapper
|
||||
from google.protobuf import descriptor as _descriptor
|
||||
from google.protobuf import message as _message
|
||||
from typing import ClassVar as _ClassVar, Iterable as _Iterable, Mapping as _Mapping, Optional as _Optional, Union as _Union
|
||||
"""
|
||||
@generated by mypy-protobuf. Do not edit manually!
|
||||
isort:skip_file
|
||||
"""
|
||||
|
||||
DESCRIPTOR: _descriptor.FileDescriptor
|
||||
import builtins
|
||||
import collections.abc
|
||||
import google.protobuf.descriptor
|
||||
import google.protobuf.internal.containers
|
||||
import google.protobuf.internal.enum_type_wrapper
|
||||
import google.protobuf.message
|
||||
import sys
|
||||
import typing
|
||||
|
||||
class Record(_message.Message):
|
||||
__slots__ = ("key", "value", "timeReceived")
|
||||
KEY_FIELD_NUMBER: _ClassVar[int]
|
||||
VALUE_FIELD_NUMBER: _ClassVar[int]
|
||||
TIMERECEIVED_FIELD_NUMBER: _ClassVar[int]
|
||||
key: bytes
|
||||
value: bytes
|
||||
timeReceived: str
|
||||
def __init__(self, key: _Optional[bytes] = ..., value: _Optional[bytes] = ..., timeReceived: _Optional[str] = ...) -> None: ...
|
||||
if sys.version_info >= (3, 10):
|
||||
import typing as typing_extensions
|
||||
else:
|
||||
import typing_extensions
|
||||
|
||||
class Message(_message.Message):
|
||||
__slots__ = ("type", "clusterLevelRaw", "key", "record", "closerPeers", "providerPeers", "senderRecord")
|
||||
class MessageType(int, metaclass=_enum_type_wrapper.EnumTypeWrapper):
|
||||
__slots__ = ()
|
||||
PUT_VALUE: _ClassVar[Message.MessageType]
|
||||
GET_VALUE: _ClassVar[Message.MessageType]
|
||||
ADD_PROVIDER: _ClassVar[Message.MessageType]
|
||||
GET_PROVIDERS: _ClassVar[Message.MessageType]
|
||||
FIND_NODE: _ClassVar[Message.MessageType]
|
||||
PING: _ClassVar[Message.MessageType]
|
||||
PUT_VALUE: Message.MessageType
|
||||
GET_VALUE: Message.MessageType
|
||||
ADD_PROVIDER: Message.MessageType
|
||||
GET_PROVIDERS: Message.MessageType
|
||||
FIND_NODE: Message.MessageType
|
||||
PING: Message.MessageType
|
||||
class ConnectionType(int, metaclass=_enum_type_wrapper.EnumTypeWrapper):
|
||||
__slots__ = ()
|
||||
NOT_CONNECTED: _ClassVar[Message.ConnectionType]
|
||||
CONNECTED: _ClassVar[Message.ConnectionType]
|
||||
CAN_CONNECT: _ClassVar[Message.ConnectionType]
|
||||
CANNOT_CONNECT: _ClassVar[Message.ConnectionType]
|
||||
NOT_CONNECTED: Message.ConnectionType
|
||||
CONNECTED: Message.ConnectionType
|
||||
CAN_CONNECT: Message.ConnectionType
|
||||
CANNOT_CONNECT: Message.ConnectionType
|
||||
class Peer(_message.Message):
|
||||
__slots__ = ("id", "addrs", "connection", "signedRecord")
|
||||
ID_FIELD_NUMBER: _ClassVar[int]
|
||||
ADDRS_FIELD_NUMBER: _ClassVar[int]
|
||||
CONNECTION_FIELD_NUMBER: _ClassVar[int]
|
||||
SIGNEDRECORD_FIELD_NUMBER: _ClassVar[int]
|
||||
id: bytes
|
||||
addrs: _containers.RepeatedScalarFieldContainer[bytes]
|
||||
connection: Message.ConnectionType
|
||||
signedRecord: bytes
|
||||
def __init__(self, id: _Optional[bytes] = ..., addrs: _Optional[_Iterable[bytes]] = ..., connection: _Optional[_Union[Message.ConnectionType, str]] = ..., signedRecord: _Optional[bytes] = ...) -> None: ...
|
||||
TYPE_FIELD_NUMBER: _ClassVar[int]
|
||||
CLUSTERLEVELRAW_FIELD_NUMBER: _ClassVar[int]
|
||||
KEY_FIELD_NUMBER: _ClassVar[int]
|
||||
RECORD_FIELD_NUMBER: _ClassVar[int]
|
||||
CLOSERPEERS_FIELD_NUMBER: _ClassVar[int]
|
||||
PROVIDERPEERS_FIELD_NUMBER: _ClassVar[int]
|
||||
SENDERRECORD_FIELD_NUMBER: _ClassVar[int]
|
||||
type: Message.MessageType
|
||||
clusterLevelRaw: int
|
||||
key: bytes
|
||||
record: Record
|
||||
closerPeers: _containers.RepeatedCompositeFieldContainer[Message.Peer]
|
||||
providerPeers: _containers.RepeatedCompositeFieldContainer[Message.Peer]
|
||||
senderRecord: bytes
|
||||
def __init__(self, type: _Optional[_Union[Message.MessageType, str]] = ..., clusterLevelRaw: _Optional[int] = ..., key: _Optional[bytes] = ..., record: _Optional[_Union[Record, _Mapping]] = ..., closerPeers: _Optional[_Iterable[_Union[Message.Peer, _Mapping]]] = ..., providerPeers: _Optional[_Iterable[_Union[Message.Peer, _Mapping]]] = ..., senderRecord: _Optional[bytes] = ...) -> None: ... # type: ignore
|
||||
DESCRIPTOR: google.protobuf.descriptor.FileDescriptor
|
||||
|
||||
@typing.final
|
||||
class Record(google.protobuf.message.Message):
|
||||
DESCRIPTOR: google.protobuf.descriptor.Descriptor
|
||||
|
||||
KEY_FIELD_NUMBER: builtins.int
|
||||
VALUE_FIELD_NUMBER: builtins.int
|
||||
TIMERECEIVED_FIELD_NUMBER: builtins.int
|
||||
key: builtins.bytes
|
||||
value: builtins.bytes
|
||||
timeReceived: builtins.str
|
||||
def __init__(
|
||||
self,
|
||||
*,
|
||||
key: builtins.bytes = ...,
|
||||
value: builtins.bytes = ...,
|
||||
timeReceived: builtins.str = ...,
|
||||
) -> None: ...
|
||||
def ClearField(self, field_name: typing.Literal["key", b"key", "timeReceived", b"timeReceived", "value", b"value"]) -> None: ...
|
||||
|
||||
global___Record = Record
|
||||
|
||||
@typing.final
|
||||
class Message(google.protobuf.message.Message):
|
||||
DESCRIPTOR: google.protobuf.descriptor.Descriptor
|
||||
|
||||
class _MessageType:
|
||||
ValueType = typing.NewType("ValueType", builtins.int)
|
||||
V: typing_extensions.TypeAlias = ValueType
|
||||
|
||||
class _MessageTypeEnumTypeWrapper(google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[Message._MessageType.ValueType], builtins.type):
|
||||
DESCRIPTOR: google.protobuf.descriptor.EnumDescriptor
|
||||
PUT_VALUE: Message._MessageType.ValueType # 0
|
||||
GET_VALUE: Message._MessageType.ValueType # 1
|
||||
ADD_PROVIDER: Message._MessageType.ValueType # 2
|
||||
GET_PROVIDERS: Message._MessageType.ValueType # 3
|
||||
FIND_NODE: Message._MessageType.ValueType # 4
|
||||
PING: Message._MessageType.ValueType # 5
|
||||
|
||||
class MessageType(_MessageType, metaclass=_MessageTypeEnumTypeWrapper): ...
|
||||
PUT_VALUE: Message.MessageType.ValueType # 0
|
||||
GET_VALUE: Message.MessageType.ValueType # 1
|
||||
ADD_PROVIDER: Message.MessageType.ValueType # 2
|
||||
GET_PROVIDERS: Message.MessageType.ValueType # 3
|
||||
FIND_NODE: Message.MessageType.ValueType # 4
|
||||
PING: Message.MessageType.ValueType # 5
|
||||
|
||||
class _ConnectionType:
|
||||
ValueType = typing.NewType("ValueType", builtins.int)
|
||||
V: typing_extensions.TypeAlias = ValueType
|
||||
|
||||
class _ConnectionTypeEnumTypeWrapper(google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[Message._ConnectionType.ValueType], builtins.type):
|
||||
DESCRIPTOR: google.protobuf.descriptor.EnumDescriptor
|
||||
NOT_CONNECTED: Message._ConnectionType.ValueType # 0
|
||||
CONNECTED: Message._ConnectionType.ValueType # 1
|
||||
CAN_CONNECT: Message._ConnectionType.ValueType # 2
|
||||
CANNOT_CONNECT: Message._ConnectionType.ValueType # 3
|
||||
|
||||
class ConnectionType(_ConnectionType, metaclass=_ConnectionTypeEnumTypeWrapper): ...
|
||||
NOT_CONNECTED: Message.ConnectionType.ValueType # 0
|
||||
CONNECTED: Message.ConnectionType.ValueType # 1
|
||||
CAN_CONNECT: Message.ConnectionType.ValueType # 2
|
||||
CANNOT_CONNECT: Message.ConnectionType.ValueType # 3
|
||||
|
||||
@typing.final
|
||||
class Peer(google.protobuf.message.Message):
|
||||
DESCRIPTOR: google.protobuf.descriptor.Descriptor
|
||||
|
||||
ID_FIELD_NUMBER: builtins.int
|
||||
ADDRS_FIELD_NUMBER: builtins.int
|
||||
CONNECTION_FIELD_NUMBER: builtins.int
|
||||
id: builtins.bytes
|
||||
connection: global___Message.ConnectionType.ValueType
|
||||
@property
|
||||
def addrs(self) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[builtins.bytes]: ...
|
||||
def __init__(
|
||||
self,
|
||||
*,
|
||||
id: builtins.bytes = ...,
|
||||
addrs: collections.abc.Iterable[builtins.bytes] | None = ...,
|
||||
connection: global___Message.ConnectionType.ValueType = ...,
|
||||
) -> None: ...
|
||||
def ClearField(self, field_name: typing.Literal["addrs", b"addrs", "connection", b"connection", "id", b"id"]) -> None: ...
|
||||
|
||||
TYPE_FIELD_NUMBER: builtins.int
|
||||
CLUSTERLEVELRAW_FIELD_NUMBER: builtins.int
|
||||
KEY_FIELD_NUMBER: builtins.int
|
||||
RECORD_FIELD_NUMBER: builtins.int
|
||||
CLOSERPEERS_FIELD_NUMBER: builtins.int
|
||||
PROVIDERPEERS_FIELD_NUMBER: builtins.int
|
||||
type: global___Message.MessageType.ValueType
|
||||
clusterLevelRaw: builtins.int
|
||||
key: builtins.bytes
|
||||
@property
|
||||
def record(self) -> global___Record: ...
|
||||
@property
|
||||
def closerPeers(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___Message.Peer]: ...
|
||||
@property
|
||||
def providerPeers(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___Message.Peer]: ...
|
||||
def __init__(
|
||||
self,
|
||||
*,
|
||||
type: global___Message.MessageType.ValueType = ...,
|
||||
clusterLevelRaw: builtins.int = ...,
|
||||
key: builtins.bytes = ...,
|
||||
record: global___Record | None = ...,
|
||||
closerPeers: collections.abc.Iterable[global___Message.Peer] | None = ...,
|
||||
providerPeers: collections.abc.Iterable[global___Message.Peer] | None = ...,
|
||||
) -> None: ...
|
||||
def HasField(self, field_name: typing.Literal["record", b"record"]) -> builtins.bool: ...
|
||||
def ClearField(self, field_name: typing.Literal["closerPeers", b"closerPeers", "clusterLevelRaw", b"clusterLevelRaw", "key", b"key", "providerPeers", b"providerPeers", "record", b"record", "type", b"type"]) -> None: ...
|
||||
|
||||
global___Message = Message
|
||||
|
||||
@ -15,14 +15,12 @@ from libp2p.abc import (
|
||||
INetStream,
|
||||
IPeerRouting,
|
||||
)
|
||||
from libp2p.peer.envelope import Envelope
|
||||
from libp2p.peer.id import (
|
||||
ID,
|
||||
)
|
||||
from libp2p.peer.peerinfo import (
|
||||
PeerInfo,
|
||||
)
|
||||
from libp2p.peer.peerstore import env_to_send_in_RPC
|
||||
|
||||
from .common import (
|
||||
ALPHA,
|
||||
@ -35,7 +33,6 @@ from .routing_table import (
|
||||
RoutingTable,
|
||||
)
|
||||
from .utils import (
|
||||
maybe_consume_signed_record,
|
||||
sort_peer_ids_by_distance,
|
||||
)
|
||||
|
||||
@ -173,7 +170,7 @@ class PeerRouting(IPeerRouting):
|
||||
|
||||
# Return early if we have no peers to start with
|
||||
if not closest_peers:
|
||||
logger.debug("No local peers available for network lookup")
|
||||
logger.warning("No local peers available for network lookup")
|
||||
return []
|
||||
|
||||
# Iterative lookup until convergence
|
||||
@ -258,10 +255,6 @@ class PeerRouting(IPeerRouting):
|
||||
find_node_msg.type = Message.MessageType.FIND_NODE
|
||||
find_node_msg.key = target_key # Set target key directly as bytes
|
||||
|
||||
# Create sender_signed_peer_record
|
||||
envelope_bytes, _ = env_to_send_in_RPC(self.host)
|
||||
find_node_msg.senderRecord = envelope_bytes
|
||||
|
||||
# Serialize and send the protobuf message with varint length prefix
|
||||
proto_bytes = find_node_msg.SerializeToString()
|
||||
logger.debug(
|
||||
@ -306,22 +299,7 @@ class PeerRouting(IPeerRouting):
|
||||
|
||||
# Process closest peers from response
|
||||
if response_msg.type == Message.MessageType.FIND_NODE:
|
||||
# Consume the sender_signed_peer_record
|
||||
if not maybe_consume_signed_record(response_msg, self.host, peer):
|
||||
logger.error(
|
||||
"Received an invalid-signed-record,ignoring the response"
|
||||
)
|
||||
return []
|
||||
|
||||
for peer_data in response_msg.closerPeers:
|
||||
# Consume the received closer_peers signed-records, peer-id is
|
||||
# sent with the peer-data
|
||||
if not maybe_consume_signed_record(peer_data, self.host):
|
||||
logger.error(
|
||||
"Received an invalid-signed-record,ignoring the response"
|
||||
)
|
||||
return []
|
||||
|
||||
new_peer_id = ID(peer_data.id)
|
||||
if new_peer_id not in results:
|
||||
results.append(new_peer_id)
|
||||
@ -354,7 +332,6 @@ class PeerRouting(IPeerRouting):
|
||||
"""
|
||||
try:
|
||||
# Read message length
|
||||
peer_id = stream.muxed_conn.peer_id
|
||||
length_bytes = await stream.read(4)
|
||||
if not length_bytes:
|
||||
return
|
||||
@ -368,18 +345,10 @@ class PeerRouting(IPeerRouting):
|
||||
|
||||
# Parse protobuf message
|
||||
kad_message = Message()
|
||||
closer_peer_envelope: Envelope | None = None
|
||||
try:
|
||||
kad_message.ParseFromString(message_bytes)
|
||||
|
||||
if kad_message.type == Message.MessageType.FIND_NODE:
|
||||
# Consume the sender's signed-peer-record if sent
|
||||
if not maybe_consume_signed_record(kad_message, self.host, peer_id):
|
||||
logger.error(
|
||||
"Received an invalid-signed-record, dropping the stream"
|
||||
)
|
||||
return
|
||||
|
||||
# Get target key directly from protobuf message
|
||||
target_key = kad_message.key
|
||||
|
||||
@ -392,26 +361,12 @@ class PeerRouting(IPeerRouting):
|
||||
response = Message()
|
||||
response.type = Message.MessageType.FIND_NODE
|
||||
|
||||
# Create sender_signed_peer_record for the response
|
||||
envelope_bytes, _ = env_to_send_in_RPC(self.host)
|
||||
response.senderRecord = envelope_bytes
|
||||
|
||||
# Add peer information to response
|
||||
for peer_id in closest_peers:
|
||||
peer_proto = response.closerPeers.add()
|
||||
peer_proto.id = peer_id.to_bytes()
|
||||
peer_proto.connection = Message.ConnectionType.CAN_CONNECT
|
||||
|
||||
# Add the signed-records of closest_peers if cached
|
||||
closer_peer_envelope = (
|
||||
self.host.get_peerstore().get_peer_record(peer_id)
|
||||
)
|
||||
|
||||
if isinstance(closer_peer_envelope, Envelope):
|
||||
peer_proto.signedRecord = (
|
||||
closer_peer_envelope.marshal_envelope()
|
||||
)
|
||||
|
||||
# Add addresses if available
|
||||
try:
|
||||
addrs = self.host.get_peerstore().addrs(peer_id)
|
||||
|
||||
@ -22,14 +22,12 @@ from libp2p.abc import (
|
||||
from libp2p.custom_types import (
|
||||
TProtocol,
|
||||
)
|
||||
from libp2p.kad_dht.utils import maybe_consume_signed_record
|
||||
from libp2p.peer.id import (
|
||||
ID,
|
||||
)
|
||||
from libp2p.peer.peerinfo import (
|
||||
PeerInfo,
|
||||
)
|
||||
from libp2p.peer.peerstore import env_to_send_in_RPC
|
||||
|
||||
from .common import (
|
||||
ALPHA,
|
||||
@ -242,18 +240,11 @@ class ProviderStore:
|
||||
message.type = Message.MessageType.ADD_PROVIDER
|
||||
message.key = key
|
||||
|
||||
# Create sender's signed-peer-record
|
||||
envelope_bytes, _ = env_to_send_in_RPC(self.host)
|
||||
message.senderRecord = envelope_bytes
|
||||
|
||||
# Add our provider info
|
||||
provider = message.providerPeers.add()
|
||||
provider.id = self.local_peer_id.to_bytes()
|
||||
provider.addrs.extend(addrs)
|
||||
|
||||
# Add the provider's signed-peer-record
|
||||
provider.signedRecord = envelope_bytes
|
||||
|
||||
# Serialize and send the message
|
||||
proto_bytes = message.SerializeToString()
|
||||
await stream.write(varint.encode(len(proto_bytes)))
|
||||
@ -285,15 +276,10 @@ class ProviderStore:
|
||||
response = Message()
|
||||
response.ParseFromString(response_bytes)
|
||||
|
||||
if response.type == Message.MessageType.ADD_PROVIDER:
|
||||
# Consume the sender's signed-peer-record if sent
|
||||
if not maybe_consume_signed_record(response, self.host, peer_id):
|
||||
logger.error(
|
||||
"Received an invalid-signed-record, ignoring the response"
|
||||
)
|
||||
result = False
|
||||
else:
|
||||
result = True
|
||||
# Check response type
|
||||
response.type == Message.MessageType.ADD_PROVIDER
|
||||
if response.type:
|
||||
result = True
|
||||
|
||||
except Exception as e:
|
||||
logger.warning(f"Error sending ADD_PROVIDER to {peer_id}: {e}")
|
||||
@ -394,10 +380,6 @@ class ProviderStore:
|
||||
message.type = Message.MessageType.GET_PROVIDERS
|
||||
message.key = key
|
||||
|
||||
# Create sender's signed-peer-record
|
||||
envelope_bytes, _ = env_to_send_in_RPC(self.host)
|
||||
message.senderRecord = envelope_bytes
|
||||
|
||||
# Serialize and send the message
|
||||
proto_bytes = message.SerializeToString()
|
||||
await stream.write(varint.encode(len(proto_bytes)))
|
||||
@ -432,26 +414,10 @@ class ProviderStore:
|
||||
if response.type != Message.MessageType.GET_PROVIDERS:
|
||||
return []
|
||||
|
||||
# Consume the sender's signed-peer-record if sent
|
||||
if not maybe_consume_signed_record(response, self.host, peer_id):
|
||||
logger.error(
|
||||
"Received an invalid-signed-record, ignoring the response"
|
||||
)
|
||||
return []
|
||||
|
||||
# Extract provider information
|
||||
providers = []
|
||||
for provider_proto in response.providerPeers:
|
||||
try:
|
||||
# Consume the provider's signed-peer-record if sent, peer-id
|
||||
# already sent with the provider-proto
|
||||
if not maybe_consume_signed_record(provider_proto, self.host):
|
||||
logger.error(
|
||||
"Received an invalid-signed-record, "
|
||||
"ignoring the response"
|
||||
)
|
||||
return []
|
||||
|
||||
# Create peer ID from bytes
|
||||
provider_id = ID(provider_proto.id)
|
||||
|
||||
@ -465,7 +431,6 @@ class ProviderStore:
|
||||
|
||||
# Create PeerInfo and add to result
|
||||
providers.append(PeerInfo(provider_id, addrs))
|
||||
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to parse provider info: {e}")
|
||||
|
||||
|
||||
@ -2,93 +2,13 @@
|
||||
Utility functions for Kademlia DHT implementation.
|
||||
"""
|
||||
|
||||
import logging
|
||||
|
||||
import base58
|
||||
import multihash
|
||||
|
||||
from libp2p.abc import IHost
|
||||
from libp2p.peer.envelope import consume_envelope
|
||||
from libp2p.peer.id import (
|
||||
ID,
|
||||
)
|
||||
|
||||
from .pb.kademlia_pb2 import (
|
||||
Message,
|
||||
)
|
||||
|
||||
logger = logging.getLogger("kademlia-example.utils")
|
||||
|
||||
|
||||
def maybe_consume_signed_record(
|
||||
msg: Message | Message.Peer, host: IHost, peer_id: ID | None = None
|
||||
) -> bool:
|
||||
"""
|
||||
Attempt to parse and store a signed-peer-record (Envelope) received during
|
||||
DHT communication. If the record is invalid, the peer-id does not match, or
|
||||
updating the peerstore fails, the function logs an error and returns False.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
msg : Message | Message.Peer
|
||||
The protobuf message received during DHT communication. Can either be a
|
||||
top-level `Message` containing `senderRecord` or a `Message.Peer`
|
||||
containing `signedRecord`.
|
||||
host : IHost
|
||||
The local host instance, providing access to the peerstore for storing
|
||||
verified peer records.
|
||||
peer_id : ID | None, optional
|
||||
The expected peer ID for record validation. If provided, the peer ID
|
||||
inside the record must match this value.
|
||||
|
||||
Returns
|
||||
-------
|
||||
bool
|
||||
True if a valid signed peer record was successfully consumed and stored,
|
||||
False otherwise.
|
||||
|
||||
"""
|
||||
if isinstance(msg, Message):
|
||||
if msg.HasField("senderRecord"):
|
||||
try:
|
||||
# Convert the signed-peer-record(Envelope) from
|
||||
# protobuf bytes
|
||||
envelope, record = consume_envelope(
|
||||
msg.senderRecord,
|
||||
"libp2p-peer-record",
|
||||
)
|
||||
if not (isinstance(peer_id, ID) and record.peer_id == peer_id):
|
||||
return False
|
||||
# Use the default TTL of 2 hours (7200 seconds)
|
||||
if not host.get_peerstore().consume_peer_record(envelope, 7200):
|
||||
logger.error("Failed to update the Certified-Addr-Book")
|
||||
return False
|
||||
except Exception as e:
|
||||
logger.error("Failed to update the Certified-Addr-Book: %s", e)
|
||||
return False
|
||||
else:
|
||||
if msg.HasField("signedRecord"):
|
||||
try:
|
||||
# Convert the signed-peer-record(Envelope) from
|
||||
# protobuf bytes
|
||||
envelope, record = consume_envelope(
|
||||
msg.signedRecord,
|
||||
"libp2p-peer-record",
|
||||
)
|
||||
if not record.peer_id.to_bytes() == msg.id:
|
||||
return False
|
||||
# Use the default TTL of 2 hours (7200 seconds)
|
||||
if not host.get_peerstore().consume_peer_record(envelope, 7200):
|
||||
logger.error("Failed to update the Certified-Addr-Book")
|
||||
return False
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Failed to update the Certified-Addr-Book: %s",
|
||||
e,
|
||||
)
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def create_key_from_binary(binary_data: bytes) -> bytes:
|
||||
"""
|
||||
|
||||
@ -15,11 +15,9 @@ from libp2p.abc import (
|
||||
from libp2p.custom_types import (
|
||||
TProtocol,
|
||||
)
|
||||
from libp2p.kad_dht.utils import maybe_consume_signed_record
|
||||
from libp2p.peer.id import (
|
||||
ID,
|
||||
)
|
||||
from libp2p.peer.peerstore import env_to_send_in_RPC
|
||||
|
||||
from .common import (
|
||||
DEFAULT_TTL,
|
||||
@ -112,10 +110,6 @@ class ValueStore:
|
||||
message = Message()
|
||||
message.type = Message.MessageType.PUT_VALUE
|
||||
|
||||
# Create sender's signed-peer-record
|
||||
envelope_bytes, _ = env_to_send_in_RPC(self.host)
|
||||
message.senderRecord = envelope_bytes
|
||||
|
||||
# Set message fields
|
||||
message.key = key
|
||||
message.record.key = key
|
||||
@ -161,13 +155,7 @@ class ValueStore:
|
||||
|
||||
# Check if response is valid
|
||||
if response.type == Message.MessageType.PUT_VALUE:
|
||||
# Consume the sender's signed-peer-record if sent
|
||||
if not maybe_consume_signed_record(response, self.host, peer_id):
|
||||
logger.error(
|
||||
"Received an invalid-signed-record, ignoring the response"
|
||||
)
|
||||
return False
|
||||
if response.key == key:
|
||||
if response.key:
|
||||
result = True
|
||||
return result
|
||||
|
||||
@ -243,10 +231,6 @@ class ValueStore:
|
||||
message.type = Message.MessageType.GET_VALUE
|
||||
message.key = key
|
||||
|
||||
# Create sender's signed-peer-record
|
||||
envelope_bytes, _ = env_to_send_in_RPC(self.host)
|
||||
message.senderRecord = envelope_bytes
|
||||
|
||||
# Serialize and send the protobuf message
|
||||
proto_bytes = message.SerializeToString()
|
||||
await stream.write(varint.encode(len(proto_bytes)))
|
||||
@ -291,13 +275,6 @@ class ValueStore:
|
||||
and response.HasField("record")
|
||||
and response.record.value
|
||||
):
|
||||
# Consume the sender's signed-peer-record
|
||||
if not maybe_consume_signed_record(response, self.host, peer_id):
|
||||
logger.error(
|
||||
"Received an invalid-signed-record, ignoring the response"
|
||||
)
|
||||
return None
|
||||
|
||||
logger.debug(
|
||||
f"Received value for key {key.hex()} from peer {peer_id}"
|
||||
)
|
||||
|
||||
@ -1,70 +0,0 @@
|
||||
from dataclasses import dataclass
|
||||
|
||||
|
||||
@dataclass
|
||||
class RetryConfig:
|
||||
"""
|
||||
Configuration for retry logic with exponential backoff.
|
||||
|
||||
This configuration controls how connection attempts are retried when they fail.
|
||||
The retry mechanism uses exponential backoff with jitter to prevent thundering
|
||||
herd problems in distributed systems.
|
||||
|
||||
Attributes:
|
||||
max_retries: Maximum number of retry attempts before giving up.
|
||||
Default: 3 attempts
|
||||
initial_delay: Initial delay in seconds before the first retry.
|
||||
Default: 0.1 seconds (100ms)
|
||||
max_delay: Maximum delay cap in seconds to prevent excessive wait times.
|
||||
Default: 30.0 seconds
|
||||
backoff_multiplier: Multiplier for exponential backoff (each retry multiplies
|
||||
the delay by this factor). Default: 2.0 (doubles each time)
|
||||
jitter_factor: Random jitter factor (0.0-1.0) to add randomness to delays
|
||||
and prevent synchronized retries. Default: 0.1 (10% jitter)
|
||||
|
||||
"""
|
||||
|
||||
max_retries: int = 3
|
||||
initial_delay: float = 0.1
|
||||
max_delay: float = 30.0
|
||||
backoff_multiplier: float = 2.0
|
||||
jitter_factor: float = 0.1
|
||||
|
||||
|
||||
@dataclass
|
||||
class ConnectionConfig:
|
||||
"""
|
||||
Configuration for multi-connection support.
|
||||
|
||||
This configuration controls how multiple connections per peer are managed,
|
||||
including connection limits, timeouts, and load balancing strategies.
|
||||
|
||||
Attributes:
|
||||
max_connections_per_peer: Maximum number of connections allowed to a single
|
||||
peer. Default: 3 connections
|
||||
connection_timeout: Timeout in seconds for establishing new connections.
|
||||
Default: 30.0 seconds
|
||||
load_balancing_strategy: Strategy for distributing streams across connections.
|
||||
Options: "round_robin" (default) or "least_loaded"
|
||||
|
||||
"""
|
||||
|
||||
max_connections_per_peer: int = 3
|
||||
connection_timeout: float = 30.0
|
||||
load_balancing_strategy: str = "round_robin" # or "least_loaded"
|
||||
|
||||
def __post_init__(self) -> None:
|
||||
"""Validate configuration after initialization."""
|
||||
if not (
|
||||
self.load_balancing_strategy == "round_robin"
|
||||
or self.load_balancing_strategy == "least_loaded"
|
||||
):
|
||||
raise ValueError(
|
||||
"Load balancing strategy can only be 'round_robin' or 'least_loaded'"
|
||||
)
|
||||
|
||||
if self.max_connections_per_peer < 1:
|
||||
raise ValueError("Max connection per peer should be atleast 1")
|
||||
|
||||
if self.connection_timeout < 0:
|
||||
raise ValueError("Connection timeout should be positive")
|
||||
@ -17,7 +17,6 @@ from libp2p.stream_muxer.exceptions import (
|
||||
MuxedStreamError,
|
||||
MuxedStreamReset,
|
||||
)
|
||||
from libp2p.transport.quic.exceptions import QUICStreamClosedError, QUICStreamResetError
|
||||
|
||||
from .exceptions import (
|
||||
StreamClosed,
|
||||
@ -171,7 +170,7 @@ class NetStream(INetStream):
|
||||
elif self.__stream_state == StreamState.OPEN:
|
||||
self.__stream_state = StreamState.CLOSE_READ
|
||||
raise StreamEOF() from error
|
||||
except (MuxedStreamReset, QUICStreamClosedError, QUICStreamResetError) as error:
|
||||
except MuxedStreamReset as error:
|
||||
async with self._state_lock:
|
||||
if self.__stream_state in [
|
||||
StreamState.OPEN,
|
||||
@ -200,12 +199,7 @@ class NetStream(INetStream):
|
||||
|
||||
try:
|
||||
await self.muxed_stream.write(data)
|
||||
except (
|
||||
MuxedStreamClosed,
|
||||
MuxedStreamError,
|
||||
QUICStreamClosedError,
|
||||
QUICStreamResetError,
|
||||
) as error:
|
||||
except (MuxedStreamClosed, MuxedStreamError) as error:
|
||||
async with self._state_lock:
|
||||
if self.__stream_state == StreamState.OPEN:
|
||||
self.__stream_state = StreamState.CLOSE_WRITE
|
||||
|
||||
@ -3,8 +3,6 @@ from collections.abc import (
|
||||
Callable,
|
||||
)
|
||||
import logging
|
||||
import random
|
||||
from typing import cast
|
||||
|
||||
from multiaddr import (
|
||||
Multiaddr,
|
||||
@ -27,7 +25,6 @@ from libp2p.custom_types import (
|
||||
from libp2p.io.abc import (
|
||||
ReadWriteCloser,
|
||||
)
|
||||
from libp2p.network.config import ConnectionConfig, RetryConfig
|
||||
from libp2p.peer.id import (
|
||||
ID,
|
||||
)
|
||||
@ -42,9 +39,6 @@ from libp2p.transport.exceptions import (
|
||||
OpenConnectionError,
|
||||
SecurityUpgradeFailure,
|
||||
)
|
||||
from libp2p.transport.quic.config import QUICTransportConfig
|
||||
from libp2p.transport.quic.connection import QUICConnection
|
||||
from libp2p.transport.quic.transport import QUICTransport
|
||||
from libp2p.transport.upgrader import (
|
||||
TransportUpgrader,
|
||||
)
|
||||
@ -77,7 +71,9 @@ class Swarm(Service, INetworkService):
|
||||
peerstore: IPeerStore
|
||||
upgrader: TransportUpgrader
|
||||
transport: ITransport
|
||||
connections: dict[ID, list[INetConn]]
|
||||
# TODO: Connection and `peer_id` are 1-1 mapping in our implementation,
|
||||
# whereas in Go one `peer_id` may point to multiple connections.
|
||||
connections: dict[ID, INetConn]
|
||||
listeners: dict[str, IListener]
|
||||
common_stream_handler: StreamHandlerFn
|
||||
listener_nursery: trio.Nursery | None
|
||||
@ -85,31 +81,18 @@ class Swarm(Service, INetworkService):
|
||||
|
||||
notifees: list[INotifee]
|
||||
|
||||
# Enhanced: New configuration
|
||||
retry_config: RetryConfig
|
||||
connection_config: ConnectionConfig | QUICTransportConfig
|
||||
_round_robin_index: dict[ID, int]
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
peer_id: ID,
|
||||
peerstore: IPeerStore,
|
||||
upgrader: TransportUpgrader,
|
||||
transport: ITransport,
|
||||
retry_config: RetryConfig | None = None,
|
||||
connection_config: ConnectionConfig | QUICTransportConfig | None = None,
|
||||
):
|
||||
self.self_id = peer_id
|
||||
self.peerstore = peerstore
|
||||
self.upgrader = upgrader
|
||||
self.transport = transport
|
||||
|
||||
# Enhanced: Initialize retry and connection configuration
|
||||
self.retry_config = retry_config or RetryConfig()
|
||||
self.connection_config = connection_config or ConnectionConfig()
|
||||
|
||||
# Enhanced: Initialize connections as 1:many mapping
|
||||
self.connections = {}
|
||||
self.connections = dict()
|
||||
self.listeners = dict()
|
||||
|
||||
# Create Notifee array
|
||||
@ -120,19 +103,11 @@ class Swarm(Service, INetworkService):
|
||||
self.listener_nursery = None
|
||||
self.event_listener_nursery_created = trio.Event()
|
||||
|
||||
# Load balancing state
|
||||
self._round_robin_index = {}
|
||||
|
||||
async def run(self) -> None:
|
||||
async with trio.open_nursery() as nursery:
|
||||
# Create a nursery for listener tasks.
|
||||
self.listener_nursery = nursery
|
||||
self.event_listener_nursery_created.set()
|
||||
|
||||
if isinstance(self.transport, QUICTransport):
|
||||
self.transport.set_background_nursery(nursery)
|
||||
self.transport.set_swarm(self)
|
||||
|
||||
try:
|
||||
await self.manager.wait_finished()
|
||||
finally:
|
||||
@ -147,74 +122,18 @@ class Swarm(Service, INetworkService):
|
||||
def set_stream_handler(self, stream_handler: StreamHandlerFn) -> None:
|
||||
self.common_stream_handler = stream_handler
|
||||
|
||||
def get_connections(self, peer_id: ID | None = None) -> list[INetConn]:
|
||||
async def dial_peer(self, peer_id: ID) -> INetConn:
|
||||
"""
|
||||
Get connections for peer (like JS getConnections, Go ConnsToPeer).
|
||||
|
||||
Parameters
|
||||
----------
|
||||
peer_id : ID | None
|
||||
The peer ID to get connections for. If None, returns all connections.
|
||||
|
||||
Returns
|
||||
-------
|
||||
list[INetConn]
|
||||
List of connections to the specified peer, or all connections
|
||||
if peer_id is None.
|
||||
|
||||
"""
|
||||
if peer_id is not None:
|
||||
return self.connections.get(peer_id, [])
|
||||
|
||||
# Return all connections from all peers
|
||||
all_conns = []
|
||||
for conns in self.connections.values():
|
||||
all_conns.extend(conns)
|
||||
return all_conns
|
||||
|
||||
def get_connections_map(self) -> dict[ID, list[INetConn]]:
|
||||
"""
|
||||
Get all connections map (like JS getConnectionsMap).
|
||||
|
||||
Returns
|
||||
-------
|
||||
dict[ID, list[INetConn]]
|
||||
The complete mapping of peer IDs to their connection lists.
|
||||
|
||||
"""
|
||||
return self.connections.copy()
|
||||
|
||||
def get_connection(self, peer_id: ID) -> INetConn | None:
|
||||
"""
|
||||
Get single connection for backward compatibility.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
peer_id : ID
|
||||
The peer ID to get a connection for.
|
||||
|
||||
Returns
|
||||
-------
|
||||
INetConn | None
|
||||
The first available connection, or None if no connections exist.
|
||||
|
||||
"""
|
||||
conns = self.get_connections(peer_id)
|
||||
return conns[0] if conns else None
|
||||
|
||||
async def dial_peer(self, peer_id: ID) -> list[INetConn]:
|
||||
"""
|
||||
Try to create connections to peer_id with enhanced retry logic.
|
||||
Try to create a connection to peer_id.
|
||||
|
||||
:param peer_id: peer if we want to dial
|
||||
:raises SwarmException: raised when an error occurs
|
||||
:return: list of muxed connections
|
||||
:return: muxed connection
|
||||
"""
|
||||
# Check if we already have connections
|
||||
existing_connections = self.get_connections(peer_id)
|
||||
if existing_connections:
|
||||
logger.debug(f"Reusing existing connections to peer {peer_id}")
|
||||
return existing_connections
|
||||
if peer_id in self.connections:
|
||||
# If muxed connection already exists for peer_id,
|
||||
# set muxed connection equal to existing muxed connection
|
||||
return self.connections[peer_id]
|
||||
|
||||
logger.debug("attempting to dial peer %s", peer_id)
|
||||
|
||||
@ -227,19 +146,12 @@ class Swarm(Service, INetworkService):
|
||||
if not addrs:
|
||||
raise SwarmException(f"No known addresses to peer {peer_id}")
|
||||
|
||||
connections = []
|
||||
exceptions: list[SwarmException] = []
|
||||
|
||||
# Enhanced: Try all known addresses with retry logic
|
||||
# Try all known addresses
|
||||
for multiaddr in addrs:
|
||||
try:
|
||||
connection = await self._dial_with_retry(multiaddr, peer_id)
|
||||
connections.append(connection)
|
||||
|
||||
# Limit number of connections per peer
|
||||
if len(connections) >= self.connection_config.max_connections_per_peer:
|
||||
break
|
||||
|
||||
return await self.dial_addr(multiaddr, peer_id)
|
||||
except SwarmException as e:
|
||||
exceptions.append(e)
|
||||
logger.debug(
|
||||
@ -249,73 +161,15 @@ class Swarm(Service, INetworkService):
|
||||
exc_info=e,
|
||||
)
|
||||
|
||||
if not connections:
|
||||
# Tried all addresses, raising exception.
|
||||
raise SwarmException(
|
||||
f"unable to connect to {peer_id}, no addresses established a "
|
||||
"successful connection (with exceptions)"
|
||||
) from MultiError(exceptions)
|
||||
# Tried all addresses, raising exception.
|
||||
raise SwarmException(
|
||||
f"unable to connect to {peer_id}, no addresses established a successful "
|
||||
"connection (with exceptions)"
|
||||
) from MultiError(exceptions)
|
||||
|
||||
return connections
|
||||
|
||||
async def _dial_with_retry(self, addr: Multiaddr, peer_id: ID) -> INetConn:
|
||||
async def dial_addr(self, addr: Multiaddr, peer_id: ID) -> INetConn:
|
||||
"""
|
||||
Enhanced: Dial with retry logic and exponential backoff.
|
||||
|
||||
:param addr: the address to dial
|
||||
:param peer_id: the peer we want to connect to
|
||||
:raises SwarmException: raised when all retry attempts fail
|
||||
:return: network connection
|
||||
"""
|
||||
last_exception = None
|
||||
|
||||
for attempt in range(self.retry_config.max_retries + 1):
|
||||
try:
|
||||
return await self._dial_addr_single_attempt(addr, peer_id)
|
||||
except Exception as e:
|
||||
last_exception = e
|
||||
if attempt < self.retry_config.max_retries:
|
||||
delay = self._calculate_backoff_delay(attempt)
|
||||
logger.debug(
|
||||
f"Connection attempt {attempt + 1} failed, "
|
||||
f"retrying in {delay:.2f}s: {e}"
|
||||
)
|
||||
await trio.sleep(delay)
|
||||
else:
|
||||
logger.debug(f"All {self.retry_config.max_retries} attempts failed")
|
||||
|
||||
# Convert the last exception to SwarmException for consistency
|
||||
if last_exception is not None:
|
||||
if isinstance(last_exception, SwarmException):
|
||||
raise last_exception
|
||||
else:
|
||||
raise SwarmException(
|
||||
f"Failed to connect after {self.retry_config.max_retries} attempts"
|
||||
) from last_exception
|
||||
|
||||
# This should never be reached, but mypy requires it
|
||||
raise SwarmException("Unexpected error in retry logic")
|
||||
|
||||
def _calculate_backoff_delay(self, attempt: int) -> float:
|
||||
"""
|
||||
Enhanced: Calculate backoff delay with jitter to prevent thundering herd.
|
||||
|
||||
:param attempt: the current attempt number (0-based)
|
||||
:return: delay in seconds
|
||||
"""
|
||||
delay = min(
|
||||
self.retry_config.initial_delay
|
||||
* (self.retry_config.backoff_multiplier**attempt),
|
||||
self.retry_config.max_delay,
|
||||
)
|
||||
|
||||
# Add jitter to prevent synchronized retries
|
||||
jitter = delay * self.retry_config.jitter_factor
|
||||
return delay + random.uniform(-jitter, jitter)
|
||||
|
||||
async def _dial_addr_single_attempt(self, addr: Multiaddr, peer_id: ID) -> INetConn:
|
||||
"""
|
||||
Enhanced: Single attempt to dial an address (extracted from original dial_addr).
|
||||
Try to create a connection to peer_id with addr.
|
||||
|
||||
:param addr: the address we want to connect with
|
||||
:param peer_id: the peer we want to connect to
|
||||
@ -325,7 +179,6 @@ class Swarm(Service, INetworkService):
|
||||
# Dial peer (connection to peer does not yet exist)
|
||||
# Transport dials peer (gets back a raw conn)
|
||||
try:
|
||||
addr = Multiaddr(f"{addr}/p2p/{peer_id}")
|
||||
raw_conn = await self.transport.dial(addr)
|
||||
except OpenConnectionError as error:
|
||||
logger.debug("fail to dial peer %s over base transport", peer_id)
|
||||
@ -333,15 +186,6 @@ class Swarm(Service, INetworkService):
|
||||
f"fail to open connection to peer {peer_id}"
|
||||
) from error
|
||||
|
||||
if isinstance(self.transport, QUICTransport) and isinstance(
|
||||
raw_conn, IMuxedConn
|
||||
):
|
||||
logger.info(
|
||||
"Skipping upgrade for QUIC, QUIC connections are already multiplexed"
|
||||
)
|
||||
swarm_conn = await self.add_conn(raw_conn)
|
||||
return swarm_conn
|
||||
|
||||
logger.debug("dialed peer %s over base transport", peer_id)
|
||||
|
||||
# Per, https://discuss.libp2p.io/t/multistream-security/130, we first secure
|
||||
@ -367,103 +211,24 @@ class Swarm(Service, INetworkService):
|
||||
logger.debug("upgraded mux for peer %s", peer_id)
|
||||
|
||||
swarm_conn = await self.add_conn(muxed_conn)
|
||||
|
||||
logger.debug("successfully dialed peer %s", peer_id)
|
||||
|
||||
return swarm_conn
|
||||
|
||||
async def dial_addr(self, addr: Multiaddr, peer_id: ID) -> INetConn:
|
||||
"""
|
||||
Enhanced: Try to create a connection to peer_id with addr using retry logic.
|
||||
|
||||
:param addr: the address we want to connect with
|
||||
:param peer_id: the peer we want to connect to
|
||||
:raises SwarmException: raised when an error occurs
|
||||
:return: network connection
|
||||
"""
|
||||
return await self._dial_with_retry(addr, peer_id)
|
||||
|
||||
async def new_stream(self, peer_id: ID) -> INetStream:
|
||||
"""
|
||||
Enhanced: Create a new stream with load balancing across multiple connections.
|
||||
|
||||
:param peer_id: peer_id of destination
|
||||
:raises SwarmException: raised when an error occurs
|
||||
:return: net stream instance
|
||||
"""
|
||||
logger.debug("attempting to open a stream to peer %s", peer_id)
|
||||
# Get existing connections or dial new ones
|
||||
connections = self.get_connections(peer_id)
|
||||
if not connections:
|
||||
connections = await self.dial_peer(peer_id)
|
||||
|
||||
# Load balancing strategy at interface level
|
||||
connection = self._select_connection(connections, peer_id)
|
||||
swarm_conn = await self.dial_peer(peer_id)
|
||||
|
||||
if isinstance(self.transport, QUICTransport) and connection is not None:
|
||||
conn = cast(SwarmConn, connection)
|
||||
return await conn.new_stream()
|
||||
|
||||
try:
|
||||
net_stream = await connection.new_stream()
|
||||
logger.debug("successfully opened a stream to peer %s", peer_id)
|
||||
return net_stream
|
||||
except Exception as e:
|
||||
logger.debug(f"Failed to create stream on connection: {e}")
|
||||
# Try other connections if available
|
||||
for other_conn in connections:
|
||||
if other_conn != connection:
|
||||
try:
|
||||
net_stream = await other_conn.new_stream()
|
||||
logger.debug(
|
||||
f"Successfully opened a stream to peer {peer_id} "
|
||||
"using alternative connection"
|
||||
)
|
||||
return net_stream
|
||||
except Exception:
|
||||
continue
|
||||
|
||||
# All connections failed, raise exception
|
||||
raise SwarmException(f"Failed to create stream to peer {peer_id}") from e
|
||||
|
||||
def _select_connection(self, connections: list[INetConn], peer_id: ID) -> INetConn:
|
||||
"""
|
||||
Select connection based on load balancing strategy.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
connections : list[INetConn]
|
||||
List of available connections.
|
||||
peer_id : ID
|
||||
The peer ID for round-robin tracking.
|
||||
strategy : str
|
||||
Load balancing strategy ("round_robin", "least_loaded", etc.).
|
||||
|
||||
Returns
|
||||
-------
|
||||
INetConn
|
||||
Selected connection.
|
||||
|
||||
"""
|
||||
if not connections:
|
||||
raise ValueError("No connections available")
|
||||
|
||||
strategy = self.connection_config.load_balancing_strategy
|
||||
|
||||
if strategy == "round_robin":
|
||||
# Simple round-robin selection
|
||||
if peer_id not in self._round_robin_index:
|
||||
self._round_robin_index[peer_id] = 0
|
||||
|
||||
index = self._round_robin_index[peer_id] % len(connections)
|
||||
self._round_robin_index[peer_id] += 1
|
||||
return connections[index]
|
||||
|
||||
elif strategy == "least_loaded":
|
||||
# Find connection with least streams
|
||||
return min(connections, key=lambda c: len(c.get_streams()))
|
||||
|
||||
else:
|
||||
# Default to first connection
|
||||
return connections[0]
|
||||
net_stream = await swarm_conn.new_stream()
|
||||
logger.debug("successfully opened a stream to peer %s", peer_id)
|
||||
return net_stream
|
||||
|
||||
async def listen(self, *multiaddrs: Multiaddr) -> bool:
|
||||
"""
|
||||
@ -481,38 +246,16 @@ class Swarm(Service, INetworkService):
|
||||
- Call listener listen with the multiaddr
|
||||
- Map multiaddr to listener
|
||||
"""
|
||||
logger.debug(f"Swarm.listen called with multiaddrs: {multiaddrs}")
|
||||
# We need to wait until `self.listener_nursery` is created.
|
||||
logger.debug("Starting to listen")
|
||||
await self.event_listener_nursery_created.wait()
|
||||
|
||||
success_count = 0
|
||||
for maddr in multiaddrs:
|
||||
logger.debug(f"Swarm.listen processing multiaddr: {maddr}")
|
||||
if str(maddr) in self.listeners:
|
||||
logger.debug(f"Swarm.listen: listener already exists for {maddr}")
|
||||
success_count += 1
|
||||
continue
|
||||
return True
|
||||
|
||||
async def conn_handler(
|
||||
read_write_closer: ReadWriteCloser, maddr: Multiaddr = maddr
|
||||
) -> None:
|
||||
# No need to upgrade QUIC Connection
|
||||
if isinstance(self.transport, QUICTransport):
|
||||
try:
|
||||
quic_conn = cast(QUICConnection, read_write_closer)
|
||||
await self.add_conn(quic_conn)
|
||||
peer_id = quic_conn.peer_id
|
||||
logger.debug(
|
||||
f"successfully opened quic connection to peer {peer_id}"
|
||||
)
|
||||
# NOTE: This is a intentional barrier to prevent from the
|
||||
# handler exiting and closing the connection.
|
||||
await self.manager.wait_finished()
|
||||
except Exception:
|
||||
await read_write_closer.close()
|
||||
return
|
||||
|
||||
raw_conn = RawConnection(read_write_closer, False)
|
||||
|
||||
# Per, https://discuss.libp2p.io/t/multistream-security/130, we first
|
||||
@ -548,30 +291,24 @@ class Swarm(Service, INetworkService):
|
||||
|
||||
try:
|
||||
# Success
|
||||
logger.debug(f"Swarm.listen: creating listener for {maddr}")
|
||||
listener = self.transport.create_listener(conn_handler)
|
||||
logger.debug(f"Swarm.listen: listener created for {maddr}")
|
||||
self.listeners[str(maddr)] = listener
|
||||
# TODO: `listener.listen` is not bounded with nursery. If we want to be
|
||||
# I/O agnostic, we should change the API.
|
||||
if self.listener_nursery is None:
|
||||
raise SwarmException("swarm instance hasn't been run")
|
||||
assert self.listener_nursery is not None # For type checker
|
||||
logger.debug(f"Swarm.listen: calling listener.listen for {maddr}")
|
||||
await listener.listen(maddr, self.listener_nursery)
|
||||
logger.debug(f"Swarm.listen: listener.listen completed for {maddr}")
|
||||
|
||||
# Call notifiers since event occurred
|
||||
await self.notify_listen(maddr)
|
||||
|
||||
success_count += 1
|
||||
logger.debug("successfully started listening on: %s", maddr)
|
||||
return True
|
||||
except OSError:
|
||||
# Failed. Continue looping.
|
||||
logger.debug("fail to listen on: %s", maddr)
|
||||
|
||||
# Return true if at least one address succeeded
|
||||
return success_count > 0
|
||||
# No maddr succeeded
|
||||
return False
|
||||
|
||||
async def close(self) -> None:
|
||||
"""
|
||||
@ -584,9 +321,9 @@ class Swarm(Service, INetworkService):
|
||||
# Perform alternative cleanup if the manager isn't initialized
|
||||
# Close all connections manually
|
||||
if hasattr(self, "connections"):
|
||||
for peer_id, conns in list(self.connections.items()):
|
||||
for conn in conns:
|
||||
await conn.close()
|
||||
for conn_id in list(self.connections.keys()):
|
||||
conn = self.connections[conn_id]
|
||||
await conn.close()
|
||||
|
||||
# Clear connection tracking dictionary
|
||||
self.connections.clear()
|
||||
@ -616,28 +353,12 @@ class Swarm(Service, INetworkService):
|
||||
logger.debug("swarm successfully closed")
|
||||
|
||||
async def close_peer(self, peer_id: ID) -> None:
|
||||
"""
|
||||
Close all connections to the specified peer.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
peer_id : ID
|
||||
The peer ID to close connections for.
|
||||
|
||||
"""
|
||||
connections = self.get_connections(peer_id)
|
||||
if not connections:
|
||||
if peer_id not in self.connections:
|
||||
return
|
||||
|
||||
# Close all connections
|
||||
for connection in connections:
|
||||
try:
|
||||
await connection.close()
|
||||
except Exception as e:
|
||||
logger.warning(f"Error closing connection to {peer_id}: {e}")
|
||||
|
||||
# Remove from connections dict
|
||||
self.connections.pop(peer_id, None)
|
||||
connection = self.connections[peer_id]
|
||||
# NOTE: `connection.close` will delete `peer_id` from `self.connections`
|
||||
# and `notify_disconnected` for us.
|
||||
await connection.close()
|
||||
|
||||
logger.debug("successfully close the connection to peer %s", peer_id)
|
||||
|
||||
@ -651,77 +372,26 @@ class Swarm(Service, INetworkService):
|
||||
muxed_conn,
|
||||
self,
|
||||
)
|
||||
logger.debug("Swarm::add_conn | starting muxed connection")
|
||||
|
||||
self.manager.run_task(muxed_conn.start)
|
||||
await muxed_conn.event_started.wait()
|
||||
logger.debug("Swarm::add_conn | starting swarm connection")
|
||||
self.manager.run_task(swarm_conn.start)
|
||||
await swarm_conn.event_started.wait()
|
||||
|
||||
# Add to connections dict with deduplication
|
||||
peer_id = muxed_conn.peer_id
|
||||
if peer_id not in self.connections:
|
||||
self.connections[peer_id] = []
|
||||
|
||||
# Check for duplicate connections by comparing the underlying muxed connection
|
||||
for existing_conn in self.connections[peer_id]:
|
||||
if existing_conn.muxed_conn == muxed_conn:
|
||||
logger.debug(f"Connection already exists for peer {peer_id}")
|
||||
# existing_conn is a SwarmConn since it's stored in the connections list
|
||||
return existing_conn # type: ignore[return-value]
|
||||
|
||||
self.connections[peer_id].append(swarm_conn)
|
||||
|
||||
# Trim if we exceed max connections
|
||||
max_conns = self.connection_config.max_connections_per_peer
|
||||
if len(self.connections[peer_id]) > max_conns:
|
||||
self._trim_connections(peer_id)
|
||||
|
||||
# Store muxed_conn with peer id
|
||||
self.connections[muxed_conn.peer_id] = swarm_conn
|
||||
# Call notifiers since event occurred
|
||||
await self.notify_connected(swarm_conn)
|
||||
return swarm_conn
|
||||
|
||||
def _trim_connections(self, peer_id: ID) -> None:
|
||||
"""
|
||||
Remove oldest connections when limit is exceeded.
|
||||
"""
|
||||
connections = self.connections[peer_id]
|
||||
if len(connections) <= self.connection_config.max_connections_per_peer:
|
||||
return
|
||||
|
||||
# Sort by creation time and remove oldest
|
||||
# For now, just keep the most recent connections
|
||||
max_conns = self.connection_config.max_connections_per_peer
|
||||
connections_to_remove = connections[:-max_conns]
|
||||
|
||||
for conn in connections_to_remove:
|
||||
logger.debug(f"Trimming old connection for peer {peer_id}")
|
||||
trio.lowlevel.spawn_system_task(self._close_connection_async, conn)
|
||||
|
||||
# Keep only the most recent connections
|
||||
max_conns = self.connection_config.max_connections_per_peer
|
||||
self.connections[peer_id] = connections[-max_conns:]
|
||||
|
||||
async def _close_connection_async(self, connection: INetConn) -> None:
|
||||
"""Close a connection asynchronously."""
|
||||
try:
|
||||
await connection.close()
|
||||
except Exception as e:
|
||||
logger.warning(f"Error closing connection: {e}")
|
||||
|
||||
def remove_conn(self, swarm_conn: SwarmConn) -> None:
|
||||
"""
|
||||
Simply remove the connection from Swarm's records, without closing
|
||||
the connection.
|
||||
"""
|
||||
peer_id = swarm_conn.muxed_conn.peer_id
|
||||
|
||||
if peer_id in self.connections:
|
||||
self.connections[peer_id] = [
|
||||
conn for conn in self.connections[peer_id] if conn != swarm_conn
|
||||
]
|
||||
if not self.connections[peer_id]:
|
||||
del self.connections[peer_id]
|
||||
if peer_id not in self.connections:
|
||||
return
|
||||
del self.connections[peer_id]
|
||||
|
||||
# Notifee
|
||||
|
||||
@ -767,21 +437,3 @@ class Swarm(Service, INetworkService):
|
||||
async with trio.open_nursery() as nursery:
|
||||
for notifee in self.notifees:
|
||||
nursery.start_soon(notifier, notifee)
|
||||
|
||||
# Backward compatibility properties
|
||||
@property
|
||||
def connections_legacy(self) -> dict[ID, INetConn]:
|
||||
"""
|
||||
Legacy 1:1 mapping for backward compatibility.
|
||||
|
||||
Returns
|
||||
-------
|
||||
dict[ID, INetConn]
|
||||
Legacy mapping with only the first connection per peer.
|
||||
|
||||
"""
|
||||
legacy_conns = {}
|
||||
for peer_id, conns in self.connections.items():
|
||||
if conns:
|
||||
legacy_conns[peer_id] = conns[0]
|
||||
return legacy_conns
|
||||
|
||||
@ -1,7 +1,5 @@
|
||||
from typing import Any, cast
|
||||
|
||||
import multiaddr
|
||||
|
||||
from libp2p.crypto.ed25519 import Ed25519PublicKey
|
||||
from libp2p.crypto.keys import PrivateKey, PublicKey
|
||||
from libp2p.crypto.rsa import RSAPublicKey
|
||||
@ -133,9 +131,6 @@ class Envelope:
|
||||
)
|
||||
return False
|
||||
|
||||
def _env_addrs_set(self) -> set[multiaddr.Multiaddr]:
|
||||
return {b for b in self.record().addrs}
|
||||
|
||||
|
||||
def pub_key_to_protobuf(pub_key: PublicKey) -> cryto_pb.PublicKey:
|
||||
"""
|
||||
|
||||
@ -16,7 +16,6 @@ import trio
|
||||
from trio import MemoryReceiveChannel, MemorySendChannel
|
||||
|
||||
from libp2p.abc import (
|
||||
IHost,
|
||||
IPeerStore,
|
||||
)
|
||||
from libp2p.crypto.keys import (
|
||||
@ -24,8 +23,7 @@ from libp2p.crypto.keys import (
|
||||
PrivateKey,
|
||||
PublicKey,
|
||||
)
|
||||
from libp2p.peer.envelope import Envelope, seal_record
|
||||
from libp2p.peer.peer_record import PeerRecord
|
||||
from libp2p.peer.envelope import Envelope
|
||||
|
||||
from .id import (
|
||||
ID,
|
||||
@ -41,86 +39,6 @@ from .peerinfo import (
|
||||
PERMANENT_ADDR_TTL = 0
|
||||
|
||||
|
||||
def create_signed_peer_record(
|
||||
peer_id: ID, addrs: list[Multiaddr], pvt_key: PrivateKey
|
||||
) -> Envelope:
|
||||
"""Creates a signed_peer_record wrapped in an Envelope"""
|
||||
record = PeerRecord(peer_id, addrs)
|
||||
envelope = seal_record(record, pvt_key)
|
||||
return envelope
|
||||
|
||||
|
||||
def env_to_send_in_RPC(host: IHost) -> tuple[bytes, bool]:
|
||||
"""
|
||||
Return the signed peer record (Envelope) to be sent in an RPC.
|
||||
|
||||
This function checks whether the host already has a cached signed peer record
|
||||
(SPR). If one exists and its addresses match the host's current listen
|
||||
addresses, the cached envelope is reused. Otherwise, a new signed peer record
|
||||
is created, cached, and returned.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
host : IHost
|
||||
The local host instance, providing access to peer ID, listen addresses,
|
||||
private key, and the peerstore.
|
||||
|
||||
Returns
|
||||
-------
|
||||
tuple[bytes, bool]
|
||||
A 2-tuple where the first element is the serialized envelope (bytes)
|
||||
for the signed peer record, and the second element is a boolean flag
|
||||
indicating whether a new record was created (True) or an existing cached
|
||||
one was reused (False).
|
||||
|
||||
"""
|
||||
listen_addrs_set = {addr for addr in host.get_addrs()}
|
||||
local_env = host.get_peerstore().get_local_record()
|
||||
|
||||
if local_env is None:
|
||||
# No cached SPR yet -> create one
|
||||
return issue_and_cache_local_record(host), True
|
||||
else:
|
||||
record_addrs_set = local_env._env_addrs_set()
|
||||
if record_addrs_set == listen_addrs_set:
|
||||
# Perfect match -> reuse cached envelope
|
||||
return local_env.marshal_envelope(), False
|
||||
else:
|
||||
# Addresses changed -> issue a new SPR and cache it
|
||||
return issue_and_cache_local_record(host), True
|
||||
|
||||
|
||||
def issue_and_cache_local_record(host: IHost) -> bytes:
|
||||
"""
|
||||
Create and cache a new signed peer record (Envelope) for the host.
|
||||
|
||||
This function generates a new signed peer record from the host’s peer ID,
|
||||
listen addresses, and private key. The resulting envelope is stored in
|
||||
the peerstore as the local record for future reuse.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
host : IHost
|
||||
The local host instance, providing access to peer ID, listen addresses,
|
||||
private key, and the peerstore.
|
||||
|
||||
Returns
|
||||
-------
|
||||
bytes
|
||||
The serialized envelope (bytes) representing the newly created signed
|
||||
peer record.
|
||||
|
||||
"""
|
||||
env = create_signed_peer_record(
|
||||
host.get_id(),
|
||||
host.get_addrs(),
|
||||
host.get_private_key(),
|
||||
)
|
||||
# Cache it for next time use
|
||||
host.get_peerstore().set_local_record(env)
|
||||
return env.marshal_envelope()
|
||||
|
||||
|
||||
class PeerRecordState:
|
||||
envelope: Envelope
|
||||
seq: int
|
||||
@ -137,17 +55,8 @@ class PeerStore(IPeerStore):
|
||||
self.peer_data_map = defaultdict(PeerData)
|
||||
self.addr_update_channels: dict[ID, MemorySendChannel[Multiaddr]] = {}
|
||||
self.peer_record_map: dict[ID, PeerRecordState] = {}
|
||||
self.local_peer_record: Envelope | None = None
|
||||
self.max_records = max_records
|
||||
|
||||
def get_local_record(self) -> Envelope | None:
|
||||
"""Get the local-signed-record wrapped in Envelope"""
|
||||
return self.local_peer_record
|
||||
|
||||
def set_local_record(self, envelope: Envelope) -> None:
|
||||
"""Set the local-signed-record wrapped in Envelope"""
|
||||
self.local_peer_record = envelope
|
||||
|
||||
def peer_info(self, peer_id: ID) -> PeerInfo:
|
||||
"""
|
||||
:param peer_id: peer ID to get info for
|
||||
|
||||
@ -48,11 +48,12 @@ class Multiselect(IMultiselectMuxer):
|
||||
"""
|
||||
self.handlers[protocol] = handler
|
||||
|
||||
# FIXME: Make TProtocol Optional[TProtocol] to keep types consistent
|
||||
async def negotiate(
|
||||
self,
|
||||
communicator: IMultiselectCommunicator,
|
||||
negotiate_timeout: int = DEFAULT_NEGOTIATE_TIMEOUT,
|
||||
) -> tuple[TProtocol | None, StreamHandlerFn | None]:
|
||||
) -> tuple[TProtocol, StreamHandlerFn | None]:
|
||||
"""
|
||||
Negotiate performs protocol selection.
|
||||
|
||||
@ -83,14 +84,14 @@ class Multiselect(IMultiselectMuxer):
|
||||
raise MultiselectError() from error
|
||||
|
||||
else:
|
||||
protocol_to_check = None if not command else TProtocol(command)
|
||||
if protocol_to_check in self.handlers:
|
||||
protocol = TProtocol(command)
|
||||
if protocol in self.handlers:
|
||||
try:
|
||||
await communicator.write(command)
|
||||
await communicator.write(protocol)
|
||||
except MultiselectCommunicatorError as error:
|
||||
raise MultiselectError() from error
|
||||
|
||||
return protocol_to_check, self.handlers[protocol_to_check]
|
||||
return protocol, self.handlers[protocol]
|
||||
try:
|
||||
await communicator.write(PROTOCOL_NOT_FOUND_MSG)
|
||||
except MultiselectCommunicatorError as error:
|
||||
|
||||
@ -134,10 +134,8 @@ class MultiselectClient(IMultiselectClient):
|
||||
:raise MultiselectClientError: raised when protocol negotiation failed
|
||||
:return: selected protocol
|
||||
"""
|
||||
# Represent `None` protocol as an empty string.
|
||||
protocol_str = protocol if protocol is not None else ""
|
||||
try:
|
||||
await communicator.write(protocol_str)
|
||||
await communicator.write(protocol)
|
||||
except MultiselectCommunicatorError as error:
|
||||
raise MultiselectClientError() from error
|
||||
|
||||
@ -147,7 +145,7 @@ class MultiselectClient(IMultiselectClient):
|
||||
except MultiselectCommunicatorError as error:
|
||||
raise MultiselectClientError() from error
|
||||
|
||||
if response == protocol_str:
|
||||
if response == protocol:
|
||||
return protocol
|
||||
if response == PROTOCOL_NOT_FOUND_MSG:
|
||||
raise MultiselectClientError("protocol not supported")
|
||||
|
||||
@ -1,5 +1,3 @@
|
||||
from builtins import AssertionError
|
||||
|
||||
from libp2p.abc import (
|
||||
IMultiselectCommunicator,
|
||||
)
|
||||
@ -32,14 +30,10 @@ class MultiselectCommunicator(IMultiselectCommunicator):
|
||||
"""
|
||||
:raise MultiselectCommunicatorError: raised when failed to write to underlying reader
|
||||
""" # noqa: E501
|
||||
if msg_str is None:
|
||||
msg_bytes = encode_delim(b"")
|
||||
else:
|
||||
msg_bytes = encode_delim(msg_str.encode())
|
||||
msg_bytes = encode_delim(msg_str.encode())
|
||||
try:
|
||||
await self.read_writer.write(msg_bytes)
|
||||
# Handle for connection close during ongoing negotiation in QUIC
|
||||
except (IOException, AssertionError, ValueError) as error:
|
||||
except IOException as error:
|
||||
raise MultiselectCommunicatorError(
|
||||
"fail to write to multiselect communicator"
|
||||
) from error
|
||||
|
||||
@ -15,7 +15,6 @@ from libp2p.custom_types import (
|
||||
from libp2p.peer.id import (
|
||||
ID,
|
||||
)
|
||||
from libp2p.peer.peerstore import env_to_send_in_RPC
|
||||
|
||||
from .exceptions import (
|
||||
PubsubRouterError,
|
||||
@ -104,11 +103,6 @@ class FloodSub(IPubsubRouter):
|
||||
)
|
||||
rpc_msg = rpc_pb2.RPC(publish=[pubsub_msg])
|
||||
|
||||
# Add the senderRecord of the peer in the RPC msg
|
||||
if isinstance(self.pubsub, Pubsub):
|
||||
envelope_bytes, _ = env_to_send_in_RPC(self.pubsub.host)
|
||||
rpc_msg.senderRecord = envelope_bytes
|
||||
|
||||
logger.debug("publishing message %s", pubsub_msg)
|
||||
|
||||
if self.pubsub is None:
|
||||
|
||||
@ -1,3 +1,6 @@
|
||||
from ast import (
|
||||
literal_eval,
|
||||
)
|
||||
from collections import (
|
||||
defaultdict,
|
||||
)
|
||||
@ -19,7 +22,6 @@ from libp2p.abc import (
|
||||
IPubsubRouter,
|
||||
)
|
||||
from libp2p.custom_types import (
|
||||
MessageID,
|
||||
TProtocol,
|
||||
)
|
||||
from libp2p.peer.id import (
|
||||
@ -32,12 +34,10 @@ from libp2p.peer.peerinfo import (
|
||||
)
|
||||
from libp2p.peer.peerstore import (
|
||||
PERMANENT_ADDR_TTL,
|
||||
env_to_send_in_RPC,
|
||||
)
|
||||
from libp2p.pubsub import (
|
||||
floodsub,
|
||||
)
|
||||
from libp2p.pubsub.utils import maybe_consume_signed_record
|
||||
from libp2p.tools.async_service import (
|
||||
Service,
|
||||
)
|
||||
@ -54,10 +54,6 @@ from .pb import (
|
||||
from .pubsub import (
|
||||
Pubsub,
|
||||
)
|
||||
from .utils import (
|
||||
parse_message_id_safe,
|
||||
safe_parse_message_id,
|
||||
)
|
||||
|
||||
PROTOCOL_ID = TProtocol("/meshsub/1.0.0")
|
||||
PROTOCOL_ID_V11 = TProtocol("/meshsub/1.1.0")
|
||||
@ -230,12 +226,6 @@ class GossipSub(IPubsubRouter, Service):
|
||||
:param rpc: RPC message
|
||||
:param sender_peer_id: id of the peer who sent the message
|
||||
"""
|
||||
# Process the senderRecord if sent
|
||||
if isinstance(self.pubsub, Pubsub):
|
||||
if not maybe_consume_signed_record(rpc, self.pubsub.host, sender_peer_id):
|
||||
logger.error("Received an invalid-signed-record, ignoring the message")
|
||||
return
|
||||
|
||||
control_message = rpc.control
|
||||
|
||||
# Relay each rpc control message to the appropriate handler
|
||||
@ -263,11 +253,6 @@ class GossipSub(IPubsubRouter, Service):
|
||||
)
|
||||
rpc_msg = rpc_pb2.RPC(publish=[pubsub_msg])
|
||||
|
||||
# Add the senderRecord of the peer in the RPC msg
|
||||
if isinstance(self.pubsub, Pubsub):
|
||||
envelope_bytes, _ = env_to_send_in_RPC(self.pubsub.host)
|
||||
rpc_msg.senderRecord = envelope_bytes
|
||||
|
||||
logger.debug("publishing message %s", pubsub_msg)
|
||||
|
||||
for peer_id in peers_gen:
|
||||
@ -308,8 +293,7 @@ class GossipSub(IPubsubRouter, Service):
|
||||
floodsub_peers: set[ID] = {
|
||||
peer_id
|
||||
for peer_id in self.pubsub.peer_topics[topic]
|
||||
if peer_id in self.peer_protocol
|
||||
and self.peer_protocol[peer_id] == floodsub.PROTOCOL_ID
|
||||
if self.peer_protocol[peer_id] == floodsub.PROTOCOL_ID
|
||||
}
|
||||
send_to.update(floodsub_peers)
|
||||
|
||||
@ -791,16 +775,16 @@ class GossipSub(IPubsubRouter, Service):
|
||||
# Get list of all seen (seqnos, from) from the (seqno, from) tuples in
|
||||
# seen_messages cache
|
||||
seen_seqnos_and_peers = [
|
||||
str(seqno_and_from)
|
||||
for seqno_and_from in self.pubsub.seen_messages.cache.keys()
|
||||
seqno_and_from for seqno_and_from in self.pubsub.seen_messages.cache.keys()
|
||||
]
|
||||
|
||||
# Add all unknown message ids (ids that appear in ihave_msg but not in
|
||||
# seen_seqnos) to list of messages we want to request
|
||||
msg_ids_wanted: list[MessageID] = [
|
||||
parse_message_id_safe(msg_id)
|
||||
# FIXME: Update type of message ID
|
||||
msg_ids_wanted: list[Any] = [
|
||||
msg_id
|
||||
for msg_id in ihave_msg.messageIDs
|
||||
if msg_id not in seen_seqnos_and_peers
|
||||
if literal_eval(msg_id) not in seen_seqnos_and_peers
|
||||
]
|
||||
|
||||
# Request messages with IWANT message
|
||||
@ -814,9 +798,9 @@ class GossipSub(IPubsubRouter, Service):
|
||||
Forwards all request messages that are present in mcache to the
|
||||
requesting peer.
|
||||
"""
|
||||
msg_ids: list[tuple[bytes, bytes]] = [
|
||||
safe_parse_message_id(msg) for msg in iwant_msg.messageIDs
|
||||
]
|
||||
# FIXME: Update type of message ID
|
||||
# FIXME: Find a better way to parse the msg ids
|
||||
msg_ids: list[Any] = [literal_eval(msg) for msg in iwant_msg.messageIDs]
|
||||
msgs_to_forward: list[rpc_pb2.Message] = []
|
||||
for msg_id_iwant in msg_ids:
|
||||
# Check if the wanted message ID is present in mcache
|
||||
@ -834,13 +818,6 @@ class GossipSub(IPubsubRouter, Service):
|
||||
# 1) Package these messages into a single packet
|
||||
packet: rpc_pb2.RPC = rpc_pb2.RPC()
|
||||
|
||||
# Here the an RPC message is being created and published in response
|
||||
# to the iwant control msg, so we will send a freshly created senderRecord
|
||||
# with the RPC msg
|
||||
if isinstance(self.pubsub, Pubsub):
|
||||
envelope_bytes, _ = env_to_send_in_RPC(self.pubsub.host)
|
||||
packet.senderRecord = envelope_bytes
|
||||
|
||||
packet.publish.extend(msgs_to_forward)
|
||||
|
||||
if self.pubsub is None:
|
||||
@ -996,12 +973,6 @@ class GossipSub(IPubsubRouter, Service):
|
||||
raise NoPubsubAttached
|
||||
# Add control message to packet
|
||||
packet: rpc_pb2.RPC = rpc_pb2.RPC()
|
||||
|
||||
# Add the sender's peer-record in the RPC msg
|
||||
if isinstance(self.pubsub, Pubsub):
|
||||
envelope_bytes, _ = env_to_send_in_RPC(self.pubsub.host)
|
||||
packet.senderRecord = envelope_bytes
|
||||
|
||||
packet.control.CopyFrom(control_msg)
|
||||
|
||||
# Get stream for peer from pubsub
|
||||
|
||||
@ -14,7 +14,6 @@ message RPC {
|
||||
}
|
||||
|
||||
optional ControlMessage control = 3;
|
||||
optional bytes senderRecord = 4;
|
||||
}
|
||||
|
||||
message Message {
|
||||
|
||||
@ -1,12 +1,11 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Generated by the protocol buffer compiler. DO NOT EDIT!
|
||||
# source: libp2p/pubsub/pb/rpc.proto
|
||||
# Protobuf Python Version: 4.25.3
|
||||
"""Generated protocol buffer code."""
|
||||
from google.protobuf.internal import builder as _builder
|
||||
from google.protobuf import descriptor as _descriptor
|
||||
from google.protobuf import descriptor_pool as _descriptor_pool
|
||||
from google.protobuf import symbol_database as _symbol_database
|
||||
from google.protobuf.internal import builder as _builder
|
||||
# @@protoc_insertion_point(imports)
|
||||
|
||||
_sym_db = _symbol_database.Default()
|
||||
@ -14,39 +13,39 @@ _sym_db = _symbol_database.Default()
|
||||
|
||||
|
||||
|
||||
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1alibp2p/pubsub/pb/rpc.proto\x12\tpubsub.pb\"\xca\x01\n\x03RPC\x12-\n\rsubscriptions\x18\x01 \x03(\x0b\x32\x16.pubsub.pb.RPC.SubOpts\x12#\n\x07publish\x18\x02 \x03(\x0b\x32\x12.pubsub.pb.Message\x12*\n\x07\x63ontrol\x18\x03 \x01(\x0b\x32\x19.pubsub.pb.ControlMessage\x12\x14\n\x0csenderRecord\x18\x04 \x01(\x0c\x1a-\n\x07SubOpts\x12\x11\n\tsubscribe\x18\x01 \x01(\x08\x12\x0f\n\x07topicid\x18\x02 \x01(\t\"i\n\x07Message\x12\x0f\n\x07\x66rom_id\x18\x01 \x01(\x0c\x12\x0c\n\x04\x64\x61ta\x18\x02 \x01(\x0c\x12\r\n\x05seqno\x18\x03 \x01(\x0c\x12\x10\n\x08topicIDs\x18\x04 \x03(\t\x12\x11\n\tsignature\x18\x05 \x01(\x0c\x12\x0b\n\x03key\x18\x06 \x01(\x0c\"\xb0\x01\n\x0e\x43ontrolMessage\x12&\n\x05ihave\x18\x01 \x03(\x0b\x32\x17.pubsub.pb.ControlIHave\x12&\n\x05iwant\x18\x02 \x03(\x0b\x32\x17.pubsub.pb.ControlIWant\x12&\n\x05graft\x18\x03 \x03(\x0b\x32\x17.pubsub.pb.ControlGraft\x12&\n\x05prune\x18\x04 \x03(\x0b\x32\x17.pubsub.pb.ControlPrune\"3\n\x0c\x43ontrolIHave\x12\x0f\n\x07topicID\x18\x01 \x01(\t\x12\x12\n\nmessageIDs\x18\x02 \x03(\t\"\"\n\x0c\x43ontrolIWant\x12\x12\n\nmessageIDs\x18\x01 \x03(\t\"\x1f\n\x0c\x43ontrolGraft\x12\x0f\n\x07topicID\x18\x01 \x01(\t\"T\n\x0c\x43ontrolPrune\x12\x0f\n\x07topicID\x18\x01 \x01(\t\x12\"\n\x05peers\x18\x02 \x03(\x0b\x32\x13.pubsub.pb.PeerInfo\x12\x0f\n\x07\x62\x61\x63koff\x18\x03 \x01(\x04\"4\n\x08PeerInfo\x12\x0e\n\x06peerID\x18\x01 \x01(\x0c\x12\x18\n\x10signedPeerRecord\x18\x02 \x01(\x0c\"\x87\x03\n\x0fTopicDescriptor\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x31\n\x04\x61uth\x18\x02 \x01(\x0b\x32#.pubsub.pb.TopicDescriptor.AuthOpts\x12/\n\x03\x65nc\x18\x03 \x01(\x0b\x32\".pubsub.pb.TopicDescriptor.EncOpts\x1a|\n\x08\x41uthOpts\x12:\n\x04mode\x18\x01 \x01(\x0e\x32,.pubsub.pb.TopicDescriptor.AuthOpts.AuthMode\x12\x0c\n\x04keys\x18\x02 \x03(\x0c\"&\n\x08\x41uthMode\x12\x08\n\x04NONE\x10\x00\x12\x07\n\x03KEY\x10\x01\x12\x07\n\x03WOT\x10\x02\x1a\x83\x01\n\x07\x45ncOpts\x12\x38\n\x04mode\x18\x01 \x01(\x0e\x32*.pubsub.pb.TopicDescriptor.EncOpts.EncMode\x12\x11\n\tkeyHashes\x18\x02 \x03(\x0c\"+\n\x07\x45ncMode\x12\x08\n\x04NONE\x10\x00\x12\r\n\tSHAREDKEY\x10\x01\x12\x07\n\x03WOT\x10\x02')
|
||||
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1alibp2p/pubsub/pb/rpc.proto\x12\tpubsub.pb\"\xb4\x01\n\x03RPC\x12-\n\rsubscriptions\x18\x01 \x03(\x0b\x32\x16.pubsub.pb.RPC.SubOpts\x12#\n\x07publish\x18\x02 \x03(\x0b\x32\x12.pubsub.pb.Message\x12*\n\x07\x63ontrol\x18\x03 \x01(\x0b\x32\x19.pubsub.pb.ControlMessage\x1a-\n\x07SubOpts\x12\x11\n\tsubscribe\x18\x01 \x01(\x08\x12\x0f\n\x07topicid\x18\x02 \x01(\t\"i\n\x07Message\x12\x0f\n\x07\x66rom_id\x18\x01 \x01(\x0c\x12\x0c\n\x04\x64\x61ta\x18\x02 \x01(\x0c\x12\r\n\x05seqno\x18\x03 \x01(\x0c\x12\x10\n\x08topicIDs\x18\x04 \x03(\t\x12\x11\n\tsignature\x18\x05 \x01(\x0c\x12\x0b\n\x03key\x18\x06 \x01(\x0c\"\xb0\x01\n\x0e\x43ontrolMessage\x12&\n\x05ihave\x18\x01 \x03(\x0b\x32\x17.pubsub.pb.ControlIHave\x12&\n\x05iwant\x18\x02 \x03(\x0b\x32\x17.pubsub.pb.ControlIWant\x12&\n\x05graft\x18\x03 \x03(\x0b\x32\x17.pubsub.pb.ControlGraft\x12&\n\x05prune\x18\x04 \x03(\x0b\x32\x17.pubsub.pb.ControlPrune\"3\n\x0c\x43ontrolIHave\x12\x0f\n\x07topicID\x18\x01 \x01(\t\x12\x12\n\nmessageIDs\x18\x02 \x03(\t\"\"\n\x0c\x43ontrolIWant\x12\x12\n\nmessageIDs\x18\x01 \x03(\t\"\x1f\n\x0c\x43ontrolGraft\x12\x0f\n\x07topicID\x18\x01 \x01(\t\"T\n\x0c\x43ontrolPrune\x12\x0f\n\x07topicID\x18\x01 \x01(\t\x12\"\n\x05peers\x18\x02 \x03(\x0b\x32\x13.pubsub.pb.PeerInfo\x12\x0f\n\x07\x62\x61\x63koff\x18\x03 \x01(\x04\"4\n\x08PeerInfo\x12\x0e\n\x06peerID\x18\x01 \x01(\x0c\x12\x18\n\x10signedPeerRecord\x18\x02 \x01(\x0c\"\x87\x03\n\x0fTopicDescriptor\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x31\n\x04\x61uth\x18\x02 \x01(\x0b\x32#.pubsub.pb.TopicDescriptor.AuthOpts\x12/\n\x03\x65nc\x18\x03 \x01(\x0b\x32\".pubsub.pb.TopicDescriptor.EncOpts\x1a|\n\x08\x41uthOpts\x12:\n\x04mode\x18\x01 \x01(\x0e\x32,.pubsub.pb.TopicDescriptor.AuthOpts.AuthMode\x12\x0c\n\x04keys\x18\x02 \x03(\x0c\"&\n\x08\x41uthMode\x12\x08\n\x04NONE\x10\x00\x12\x07\n\x03KEY\x10\x01\x12\x07\n\x03WOT\x10\x02\x1a\x83\x01\n\x07\x45ncOpts\x12\x38\n\x04mode\x18\x01 \x01(\x0e\x32*.pubsub.pb.TopicDescriptor.EncOpts.EncMode\x12\x11\n\tkeyHashes\x18\x02 \x03(\x0c\"+\n\x07\x45ncMode\x12\x08\n\x04NONE\x10\x00\x12\r\n\tSHAREDKEY\x10\x01\x12\x07\n\x03WOT\x10\x02')
|
||||
|
||||
_globals = globals()
|
||||
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
|
||||
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'libp2p.pubsub.pb.rpc_pb2', _globals)
|
||||
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals())
|
||||
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'libp2p.pubsub.pb.rpc_pb2', globals())
|
||||
if _descriptor._USE_C_DESCRIPTORS == False:
|
||||
|
||||
DESCRIPTOR._options = None
|
||||
_globals['_RPC']._serialized_start=42
|
||||
_globals['_RPC']._serialized_end=244
|
||||
_globals['_RPC_SUBOPTS']._serialized_start=199
|
||||
_globals['_RPC_SUBOPTS']._serialized_end=244
|
||||
_globals['_MESSAGE']._serialized_start=246
|
||||
_globals['_MESSAGE']._serialized_end=351
|
||||
_globals['_CONTROLMESSAGE']._serialized_start=354
|
||||
_globals['_CONTROLMESSAGE']._serialized_end=530
|
||||
_globals['_CONTROLIHAVE']._serialized_start=532
|
||||
_globals['_CONTROLIHAVE']._serialized_end=583
|
||||
_globals['_CONTROLIWANT']._serialized_start=585
|
||||
_globals['_CONTROLIWANT']._serialized_end=619
|
||||
_globals['_CONTROLGRAFT']._serialized_start=621
|
||||
_globals['_CONTROLGRAFT']._serialized_end=652
|
||||
_globals['_CONTROLPRUNE']._serialized_start=654
|
||||
_globals['_CONTROLPRUNE']._serialized_end=738
|
||||
_globals['_PEERINFO']._serialized_start=740
|
||||
_globals['_PEERINFO']._serialized_end=792
|
||||
_globals['_TOPICDESCRIPTOR']._serialized_start=795
|
||||
_globals['_TOPICDESCRIPTOR']._serialized_end=1186
|
||||
_globals['_TOPICDESCRIPTOR_AUTHOPTS']._serialized_start=928
|
||||
_globals['_TOPICDESCRIPTOR_AUTHOPTS']._serialized_end=1052
|
||||
_globals['_TOPICDESCRIPTOR_AUTHOPTS_AUTHMODE']._serialized_start=1014
|
||||
_globals['_TOPICDESCRIPTOR_AUTHOPTS_AUTHMODE']._serialized_end=1052
|
||||
_globals['_TOPICDESCRIPTOR_ENCOPTS']._serialized_start=1055
|
||||
_globals['_TOPICDESCRIPTOR_ENCOPTS']._serialized_end=1186
|
||||
_globals['_TOPICDESCRIPTOR_ENCOPTS_ENCMODE']._serialized_start=1143
|
||||
_globals['_TOPICDESCRIPTOR_ENCOPTS_ENCMODE']._serialized_end=1186
|
||||
_RPC._serialized_start=42
|
||||
_RPC._serialized_end=222
|
||||
_RPC_SUBOPTS._serialized_start=177
|
||||
_RPC_SUBOPTS._serialized_end=222
|
||||
_MESSAGE._serialized_start=224
|
||||
_MESSAGE._serialized_end=329
|
||||
_CONTROLMESSAGE._serialized_start=332
|
||||
_CONTROLMESSAGE._serialized_end=508
|
||||
_CONTROLIHAVE._serialized_start=510
|
||||
_CONTROLIHAVE._serialized_end=561
|
||||
_CONTROLIWANT._serialized_start=563
|
||||
_CONTROLIWANT._serialized_end=597
|
||||
_CONTROLGRAFT._serialized_start=599
|
||||
_CONTROLGRAFT._serialized_end=630
|
||||
_CONTROLPRUNE._serialized_start=632
|
||||
_CONTROLPRUNE._serialized_end=716
|
||||
_PEERINFO._serialized_start=718
|
||||
_PEERINFO._serialized_end=770
|
||||
_TOPICDESCRIPTOR._serialized_start=773
|
||||
_TOPICDESCRIPTOR._serialized_end=1164
|
||||
_TOPICDESCRIPTOR_AUTHOPTS._serialized_start=906
|
||||
_TOPICDESCRIPTOR_AUTHOPTS._serialized_end=1030
|
||||
_TOPICDESCRIPTOR_AUTHOPTS_AUTHMODE._serialized_start=992
|
||||
_TOPICDESCRIPTOR_AUTHOPTS_AUTHMODE._serialized_end=1030
|
||||
_TOPICDESCRIPTOR_ENCOPTS._serialized_start=1033
|
||||
_TOPICDESCRIPTOR_ENCOPTS._serialized_end=1164
|
||||
_TOPICDESCRIPTOR_ENCOPTS_ENCMODE._serialized_start=1121
|
||||
_TOPICDESCRIPTOR_ENCOPTS_ENCMODE._serialized_end=1164
|
||||
# @@protoc_insertion_point(module_scope)
|
||||
|
||||
@ -1,132 +1,323 @@
|
||||
from google.protobuf.internal import containers as _containers
|
||||
from google.protobuf.internal import enum_type_wrapper as _enum_type_wrapper
|
||||
from google.protobuf import descriptor as _descriptor
|
||||
from google.protobuf import message as _message
|
||||
from typing import ClassVar as _ClassVar, Iterable as _Iterable, Mapping as _Mapping, Optional as _Optional, Union as _Union
|
||||
"""
|
||||
@generated by mypy-protobuf. Do not edit manually!
|
||||
isort:skip_file
|
||||
Modified from https://github.com/libp2p/go-libp2p-pubsub/blob/master/pb/rpc.proto"""
|
||||
|
||||
DESCRIPTOR: _descriptor.FileDescriptor
|
||||
import builtins
|
||||
import collections.abc
|
||||
import google.protobuf.descriptor
|
||||
import google.protobuf.internal.containers
|
||||
import google.protobuf.internal.enum_type_wrapper
|
||||
import google.protobuf.message
|
||||
import sys
|
||||
import typing
|
||||
|
||||
class RPC(_message.Message):
|
||||
__slots__ = ("subscriptions", "publish", "control", "senderRecord")
|
||||
class SubOpts(_message.Message):
|
||||
__slots__ = ("subscribe", "topicid")
|
||||
SUBSCRIBE_FIELD_NUMBER: _ClassVar[int]
|
||||
TOPICID_FIELD_NUMBER: _ClassVar[int]
|
||||
subscribe: bool
|
||||
topicid: str
|
||||
def __init__(self, subscribe: bool = ..., topicid: _Optional[str] = ...) -> None: ...
|
||||
SUBSCRIPTIONS_FIELD_NUMBER: _ClassVar[int]
|
||||
PUBLISH_FIELD_NUMBER: _ClassVar[int]
|
||||
CONTROL_FIELD_NUMBER: _ClassVar[int]
|
||||
SENDERRECORD_FIELD_NUMBER: _ClassVar[int]
|
||||
subscriptions: _containers.RepeatedCompositeFieldContainer[RPC.SubOpts]
|
||||
publish: _containers.RepeatedCompositeFieldContainer[Message]
|
||||
control: ControlMessage
|
||||
senderRecord: bytes
|
||||
def __init__(self, subscriptions: _Optional[_Iterable[_Union[RPC.SubOpts, _Mapping]]] = ..., publish: _Optional[_Iterable[_Union[Message, _Mapping]]] = ..., control: _Optional[_Union[ControlMessage, _Mapping]] = ..., senderRecord: _Optional[bytes] = ...) -> None: ... # type: ignore
|
||||
if sys.version_info >= (3, 10):
|
||||
import typing as typing_extensions
|
||||
else:
|
||||
import typing_extensions
|
||||
|
||||
class Message(_message.Message):
|
||||
__slots__ = ("from_id", "data", "seqno", "topicIDs", "signature", "key")
|
||||
FROM_ID_FIELD_NUMBER: _ClassVar[int]
|
||||
DATA_FIELD_NUMBER: _ClassVar[int]
|
||||
SEQNO_FIELD_NUMBER: _ClassVar[int]
|
||||
TOPICIDS_FIELD_NUMBER: _ClassVar[int]
|
||||
SIGNATURE_FIELD_NUMBER: _ClassVar[int]
|
||||
KEY_FIELD_NUMBER: _ClassVar[int]
|
||||
from_id: bytes
|
||||
data: bytes
|
||||
seqno: bytes
|
||||
topicIDs: _containers.RepeatedScalarFieldContainer[str]
|
||||
signature: bytes
|
||||
key: bytes
|
||||
def __init__(self, from_id: _Optional[bytes] = ..., data: _Optional[bytes] = ..., seqno: _Optional[bytes] = ..., topicIDs: _Optional[_Iterable[str]] = ..., signature: _Optional[bytes] = ..., key: _Optional[bytes] = ...) -> None: ...
|
||||
DESCRIPTOR: google.protobuf.descriptor.FileDescriptor
|
||||
|
||||
class ControlMessage(_message.Message):
|
||||
__slots__ = ("ihave", "iwant", "graft", "prune")
|
||||
IHAVE_FIELD_NUMBER: _ClassVar[int]
|
||||
IWANT_FIELD_NUMBER: _ClassVar[int]
|
||||
GRAFT_FIELD_NUMBER: _ClassVar[int]
|
||||
PRUNE_FIELD_NUMBER: _ClassVar[int]
|
||||
ihave: _containers.RepeatedCompositeFieldContainer[ControlIHave]
|
||||
iwant: _containers.RepeatedCompositeFieldContainer[ControlIWant]
|
||||
graft: _containers.RepeatedCompositeFieldContainer[ControlGraft]
|
||||
prune: _containers.RepeatedCompositeFieldContainer[ControlPrune]
|
||||
def __init__(self, ihave: _Optional[_Iterable[_Union[ControlIHave, _Mapping]]] = ..., iwant: _Optional[_Iterable[_Union[ControlIWant, _Mapping]]] = ..., graft: _Optional[_Iterable[_Union[ControlGraft, _Mapping]]] = ..., prune: _Optional[_Iterable[_Union[ControlPrune, _Mapping]]] = ...) -> None: ... # type: ignore
|
||||
@typing.final
|
||||
class RPC(google.protobuf.message.Message):
|
||||
DESCRIPTOR: google.protobuf.descriptor.Descriptor
|
||||
|
||||
class ControlIHave(_message.Message):
|
||||
__slots__ = ("topicID", "messageIDs")
|
||||
TOPICID_FIELD_NUMBER: _ClassVar[int]
|
||||
MESSAGEIDS_FIELD_NUMBER: _ClassVar[int]
|
||||
topicID: str
|
||||
messageIDs: _containers.RepeatedScalarFieldContainer[str]
|
||||
def __init__(self, topicID: _Optional[str] = ..., messageIDs: _Optional[_Iterable[str]] = ...) -> None: ...
|
||||
@typing.final
|
||||
class SubOpts(google.protobuf.message.Message):
|
||||
DESCRIPTOR: google.protobuf.descriptor.Descriptor
|
||||
|
||||
class ControlIWant(_message.Message):
|
||||
__slots__ = ("messageIDs",)
|
||||
MESSAGEIDS_FIELD_NUMBER: _ClassVar[int]
|
||||
messageIDs: _containers.RepeatedScalarFieldContainer[str]
|
||||
def __init__(self, messageIDs: _Optional[_Iterable[str]] = ...) -> None: ...
|
||||
SUBSCRIBE_FIELD_NUMBER: builtins.int
|
||||
TOPICID_FIELD_NUMBER: builtins.int
|
||||
subscribe: builtins.bool
|
||||
"""subscribe or unsubscribe"""
|
||||
topicid: builtins.str
|
||||
def __init__(
|
||||
self,
|
||||
*,
|
||||
subscribe: builtins.bool | None = ...,
|
||||
topicid: builtins.str | None = ...,
|
||||
) -> None: ...
|
||||
def HasField(self, field_name: typing.Literal["subscribe", b"subscribe", "topicid", b"topicid"]) -> builtins.bool: ...
|
||||
def ClearField(self, field_name: typing.Literal["subscribe", b"subscribe", "topicid", b"topicid"]) -> None: ...
|
||||
|
||||
class ControlGraft(_message.Message):
|
||||
__slots__ = ("topicID",)
|
||||
TOPICID_FIELD_NUMBER: _ClassVar[int]
|
||||
topicID: str
|
||||
def __init__(self, topicID: _Optional[str] = ...) -> None: ...
|
||||
SUBSCRIPTIONS_FIELD_NUMBER: builtins.int
|
||||
PUBLISH_FIELD_NUMBER: builtins.int
|
||||
CONTROL_FIELD_NUMBER: builtins.int
|
||||
@property
|
||||
def subscriptions(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___RPC.SubOpts]: ...
|
||||
@property
|
||||
def publish(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___Message]: ...
|
||||
@property
|
||||
def control(self) -> global___ControlMessage: ...
|
||||
def __init__(
|
||||
self,
|
||||
*,
|
||||
subscriptions: collections.abc.Iterable[global___RPC.SubOpts] | None = ...,
|
||||
publish: collections.abc.Iterable[global___Message] | None = ...,
|
||||
control: global___ControlMessage | None = ...,
|
||||
) -> None: ...
|
||||
def HasField(self, field_name: typing.Literal["control", b"control"]) -> builtins.bool: ...
|
||||
def ClearField(self, field_name: typing.Literal["control", b"control", "publish", b"publish", "subscriptions", b"subscriptions"]) -> None: ...
|
||||
|
||||
class ControlPrune(_message.Message):
|
||||
__slots__ = ("topicID", "peers", "backoff")
|
||||
TOPICID_FIELD_NUMBER: _ClassVar[int]
|
||||
PEERS_FIELD_NUMBER: _ClassVar[int]
|
||||
BACKOFF_FIELD_NUMBER: _ClassVar[int]
|
||||
topicID: str
|
||||
peers: _containers.RepeatedCompositeFieldContainer[PeerInfo]
|
||||
backoff: int
|
||||
def __init__(self, topicID: _Optional[str] = ..., peers: _Optional[_Iterable[_Union[PeerInfo, _Mapping]]] = ..., backoff: _Optional[int] = ...) -> None: ... # type: ignore
|
||||
global___RPC = RPC
|
||||
|
||||
class PeerInfo(_message.Message):
|
||||
__slots__ = ("peerID", "signedPeerRecord")
|
||||
PEERID_FIELD_NUMBER: _ClassVar[int]
|
||||
SIGNEDPEERRECORD_FIELD_NUMBER: _ClassVar[int]
|
||||
peerID: bytes
|
||||
signedPeerRecord: bytes
|
||||
def __init__(self, peerID: _Optional[bytes] = ..., signedPeerRecord: _Optional[bytes] = ...) -> None: ...
|
||||
@typing.final
|
||||
class Message(google.protobuf.message.Message):
|
||||
DESCRIPTOR: google.protobuf.descriptor.Descriptor
|
||||
|
||||
class TopicDescriptor(_message.Message):
|
||||
__slots__ = ("name", "auth", "enc")
|
||||
class AuthOpts(_message.Message):
|
||||
__slots__ = ("mode", "keys")
|
||||
class AuthMode(int, metaclass=_enum_type_wrapper.EnumTypeWrapper):
|
||||
__slots__ = ()
|
||||
NONE: _ClassVar[TopicDescriptor.AuthOpts.AuthMode]
|
||||
KEY: _ClassVar[TopicDescriptor.AuthOpts.AuthMode]
|
||||
WOT: _ClassVar[TopicDescriptor.AuthOpts.AuthMode]
|
||||
NONE: TopicDescriptor.AuthOpts.AuthMode
|
||||
KEY: TopicDescriptor.AuthOpts.AuthMode
|
||||
WOT: TopicDescriptor.AuthOpts.AuthMode
|
||||
MODE_FIELD_NUMBER: _ClassVar[int]
|
||||
KEYS_FIELD_NUMBER: _ClassVar[int]
|
||||
mode: TopicDescriptor.AuthOpts.AuthMode
|
||||
keys: _containers.RepeatedScalarFieldContainer[bytes]
|
||||
def __init__(self, mode: _Optional[_Union[TopicDescriptor.AuthOpts.AuthMode, str]] = ..., keys: _Optional[_Iterable[bytes]] = ...) -> None: ...
|
||||
class EncOpts(_message.Message):
|
||||
__slots__ = ("mode", "keyHashes")
|
||||
class EncMode(int, metaclass=_enum_type_wrapper.EnumTypeWrapper):
|
||||
__slots__ = ()
|
||||
NONE: _ClassVar[TopicDescriptor.EncOpts.EncMode]
|
||||
SHAREDKEY: _ClassVar[TopicDescriptor.EncOpts.EncMode]
|
||||
WOT: _ClassVar[TopicDescriptor.EncOpts.EncMode]
|
||||
NONE: TopicDescriptor.EncOpts.EncMode
|
||||
SHAREDKEY: TopicDescriptor.EncOpts.EncMode
|
||||
WOT: TopicDescriptor.EncOpts.EncMode
|
||||
MODE_FIELD_NUMBER: _ClassVar[int]
|
||||
KEYHASHES_FIELD_NUMBER: _ClassVar[int]
|
||||
mode: TopicDescriptor.EncOpts.EncMode
|
||||
keyHashes: _containers.RepeatedScalarFieldContainer[bytes]
|
||||
def __init__(self, mode: _Optional[_Union[TopicDescriptor.EncOpts.EncMode, str]] = ..., keyHashes: _Optional[_Iterable[bytes]] = ...) -> None: ...
|
||||
NAME_FIELD_NUMBER: _ClassVar[int]
|
||||
AUTH_FIELD_NUMBER: _ClassVar[int]
|
||||
ENC_FIELD_NUMBER: _ClassVar[int]
|
||||
name: str
|
||||
auth: TopicDescriptor.AuthOpts
|
||||
enc: TopicDescriptor.EncOpts
|
||||
def __init__(self, name: _Optional[str] = ..., auth: _Optional[_Union[TopicDescriptor.AuthOpts, _Mapping]] = ..., enc: _Optional[_Union[TopicDescriptor.EncOpts, _Mapping]] = ...) -> None: ... # type: ignore
|
||||
FROM_ID_FIELD_NUMBER: builtins.int
|
||||
DATA_FIELD_NUMBER: builtins.int
|
||||
SEQNO_FIELD_NUMBER: builtins.int
|
||||
TOPICIDS_FIELD_NUMBER: builtins.int
|
||||
SIGNATURE_FIELD_NUMBER: builtins.int
|
||||
KEY_FIELD_NUMBER: builtins.int
|
||||
from_id: builtins.bytes
|
||||
data: builtins.bytes
|
||||
seqno: builtins.bytes
|
||||
signature: builtins.bytes
|
||||
key: builtins.bytes
|
||||
@property
|
||||
def topicIDs(self) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[builtins.str]: ...
|
||||
def __init__(
|
||||
self,
|
||||
*,
|
||||
from_id: builtins.bytes | None = ...,
|
||||
data: builtins.bytes | None = ...,
|
||||
seqno: builtins.bytes | None = ...,
|
||||
topicIDs: collections.abc.Iterable[builtins.str] | None = ...,
|
||||
signature: builtins.bytes | None = ...,
|
||||
key: builtins.bytes | None = ...,
|
||||
) -> None: ...
|
||||
def HasField(self, field_name: typing.Literal["data", b"data", "from_id", b"from_id", "key", b"key", "seqno", b"seqno", "signature", b"signature"]) -> builtins.bool: ...
|
||||
def ClearField(self, field_name: typing.Literal["data", b"data", "from_id", b"from_id", "key", b"key", "seqno", b"seqno", "signature", b"signature", "topicIDs", b"topicIDs"]) -> None: ...
|
||||
|
||||
global___Message = Message
|
||||
|
||||
@typing.final
|
||||
class ControlMessage(google.protobuf.message.Message):
|
||||
DESCRIPTOR: google.protobuf.descriptor.Descriptor
|
||||
|
||||
IHAVE_FIELD_NUMBER: builtins.int
|
||||
IWANT_FIELD_NUMBER: builtins.int
|
||||
GRAFT_FIELD_NUMBER: builtins.int
|
||||
PRUNE_FIELD_NUMBER: builtins.int
|
||||
@property
|
||||
def ihave(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___ControlIHave]: ...
|
||||
@property
|
||||
def iwant(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___ControlIWant]: ...
|
||||
@property
|
||||
def graft(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___ControlGraft]: ...
|
||||
@property
|
||||
def prune(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___ControlPrune]: ...
|
||||
def __init__(
|
||||
self,
|
||||
*,
|
||||
ihave: collections.abc.Iterable[global___ControlIHave] | None = ...,
|
||||
iwant: collections.abc.Iterable[global___ControlIWant] | None = ...,
|
||||
graft: collections.abc.Iterable[global___ControlGraft] | None = ...,
|
||||
prune: collections.abc.Iterable[global___ControlPrune] | None = ...,
|
||||
) -> None: ...
|
||||
def ClearField(self, field_name: typing.Literal["graft", b"graft", "ihave", b"ihave", "iwant", b"iwant", "prune", b"prune"]) -> None: ...
|
||||
|
||||
global___ControlMessage = ControlMessage
|
||||
|
||||
@typing.final
|
||||
class ControlIHave(google.protobuf.message.Message):
|
||||
DESCRIPTOR: google.protobuf.descriptor.Descriptor
|
||||
|
||||
TOPICID_FIELD_NUMBER: builtins.int
|
||||
MESSAGEIDS_FIELD_NUMBER: builtins.int
|
||||
topicID: builtins.str
|
||||
@property
|
||||
def messageIDs(self) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[builtins.str]: ...
|
||||
def __init__(
|
||||
self,
|
||||
*,
|
||||
topicID: builtins.str | None = ...,
|
||||
messageIDs: collections.abc.Iterable[builtins.str] | None = ...,
|
||||
) -> None: ...
|
||||
def HasField(self, field_name: typing.Literal["topicID", b"topicID"]) -> builtins.bool: ...
|
||||
def ClearField(self, field_name: typing.Literal["messageIDs", b"messageIDs", "topicID", b"topicID"]) -> None: ...
|
||||
|
||||
global___ControlIHave = ControlIHave
|
||||
|
||||
@typing.final
|
||||
class ControlIWant(google.protobuf.message.Message):
|
||||
DESCRIPTOR: google.protobuf.descriptor.Descriptor
|
||||
|
||||
MESSAGEIDS_FIELD_NUMBER: builtins.int
|
||||
@property
|
||||
def messageIDs(self) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[builtins.str]: ...
|
||||
def __init__(
|
||||
self,
|
||||
*,
|
||||
messageIDs: collections.abc.Iterable[builtins.str] | None = ...,
|
||||
) -> None: ...
|
||||
def ClearField(self, field_name: typing.Literal["messageIDs", b"messageIDs"]) -> None: ...
|
||||
|
||||
global___ControlIWant = ControlIWant
|
||||
|
||||
@typing.final
|
||||
class ControlGraft(google.protobuf.message.Message):
|
||||
DESCRIPTOR: google.protobuf.descriptor.Descriptor
|
||||
|
||||
TOPICID_FIELD_NUMBER: builtins.int
|
||||
topicID: builtins.str
|
||||
def __init__(
|
||||
self,
|
||||
*,
|
||||
topicID: builtins.str | None = ...,
|
||||
) -> None: ...
|
||||
def HasField(self, field_name: typing.Literal["topicID", b"topicID"]) -> builtins.bool: ...
|
||||
def ClearField(self, field_name: typing.Literal["topicID", b"topicID"]) -> None: ...
|
||||
|
||||
global___ControlGraft = ControlGraft
|
||||
|
||||
@typing.final
|
||||
class ControlPrune(google.protobuf.message.Message):
|
||||
DESCRIPTOR: google.protobuf.descriptor.Descriptor
|
||||
|
||||
TOPICID_FIELD_NUMBER: builtins.int
|
||||
PEERS_FIELD_NUMBER: builtins.int
|
||||
BACKOFF_FIELD_NUMBER: builtins.int
|
||||
topicID: builtins.str
|
||||
backoff: builtins.int
|
||||
@property
|
||||
def peers(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___PeerInfo]: ...
|
||||
def __init__(
|
||||
self,
|
||||
*,
|
||||
topicID: builtins.str | None = ...,
|
||||
peers: collections.abc.Iterable[global___PeerInfo] | None = ...,
|
||||
backoff: builtins.int | None = ...,
|
||||
) -> None: ...
|
||||
def HasField(self, field_name: typing.Literal["backoff", b"backoff", "topicID", b"topicID"]) -> builtins.bool: ...
|
||||
def ClearField(self, field_name: typing.Literal["backoff", b"backoff", "peers", b"peers", "topicID", b"topicID"]) -> None: ...
|
||||
|
||||
global___ControlPrune = ControlPrune
|
||||
|
||||
@typing.final
|
||||
class PeerInfo(google.protobuf.message.Message):
|
||||
DESCRIPTOR: google.protobuf.descriptor.Descriptor
|
||||
|
||||
PEERID_FIELD_NUMBER: builtins.int
|
||||
SIGNEDPEERRECORD_FIELD_NUMBER: builtins.int
|
||||
peerID: builtins.bytes
|
||||
signedPeerRecord: builtins.bytes
|
||||
def __init__(
|
||||
self,
|
||||
*,
|
||||
peerID: builtins.bytes | None = ...,
|
||||
signedPeerRecord: builtins.bytes | None = ...,
|
||||
) -> None: ...
|
||||
def HasField(self, field_name: typing.Literal["peerID", b"peerID", "signedPeerRecord", b"signedPeerRecord"]) -> builtins.bool: ...
|
||||
def ClearField(self, field_name: typing.Literal["peerID", b"peerID", "signedPeerRecord", b"signedPeerRecord"]) -> None: ...
|
||||
|
||||
global___PeerInfo = PeerInfo
|
||||
|
||||
@typing.final
|
||||
class TopicDescriptor(google.protobuf.message.Message):
|
||||
DESCRIPTOR: google.protobuf.descriptor.Descriptor
|
||||
|
||||
@typing.final
|
||||
class AuthOpts(google.protobuf.message.Message):
|
||||
DESCRIPTOR: google.protobuf.descriptor.Descriptor
|
||||
|
||||
class _AuthMode:
|
||||
ValueType = typing.NewType("ValueType", builtins.int)
|
||||
V: typing_extensions.TypeAlias = ValueType
|
||||
|
||||
class _AuthModeEnumTypeWrapper(google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[TopicDescriptor.AuthOpts._AuthMode.ValueType], builtins.type):
|
||||
DESCRIPTOR: google.protobuf.descriptor.EnumDescriptor
|
||||
NONE: TopicDescriptor.AuthOpts._AuthMode.ValueType # 0
|
||||
"""no authentication, anyone can publish"""
|
||||
KEY: TopicDescriptor.AuthOpts._AuthMode.ValueType # 1
|
||||
"""only messages signed by keys in the topic descriptor are accepted"""
|
||||
WOT: TopicDescriptor.AuthOpts._AuthMode.ValueType # 2
|
||||
"""web of trust, certificates can allow publisher set to grow"""
|
||||
|
||||
class AuthMode(_AuthMode, metaclass=_AuthModeEnumTypeWrapper): ...
|
||||
NONE: TopicDescriptor.AuthOpts.AuthMode.ValueType # 0
|
||||
"""no authentication, anyone can publish"""
|
||||
KEY: TopicDescriptor.AuthOpts.AuthMode.ValueType # 1
|
||||
"""only messages signed by keys in the topic descriptor are accepted"""
|
||||
WOT: TopicDescriptor.AuthOpts.AuthMode.ValueType # 2
|
||||
"""web of trust, certificates can allow publisher set to grow"""
|
||||
|
||||
MODE_FIELD_NUMBER: builtins.int
|
||||
KEYS_FIELD_NUMBER: builtins.int
|
||||
mode: global___TopicDescriptor.AuthOpts.AuthMode.ValueType
|
||||
@property
|
||||
def keys(self) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[builtins.bytes]:
|
||||
"""root keys to trust"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
*,
|
||||
mode: global___TopicDescriptor.AuthOpts.AuthMode.ValueType | None = ...,
|
||||
keys: collections.abc.Iterable[builtins.bytes] | None = ...,
|
||||
) -> None: ...
|
||||
def HasField(self, field_name: typing.Literal["mode", b"mode"]) -> builtins.bool: ...
|
||||
def ClearField(self, field_name: typing.Literal["keys", b"keys", "mode", b"mode"]) -> None: ...
|
||||
|
||||
@typing.final
|
||||
class EncOpts(google.protobuf.message.Message):
|
||||
DESCRIPTOR: google.protobuf.descriptor.Descriptor
|
||||
|
||||
class _EncMode:
|
||||
ValueType = typing.NewType("ValueType", builtins.int)
|
||||
V: typing_extensions.TypeAlias = ValueType
|
||||
|
||||
class _EncModeEnumTypeWrapper(google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[TopicDescriptor.EncOpts._EncMode.ValueType], builtins.type):
|
||||
DESCRIPTOR: google.protobuf.descriptor.EnumDescriptor
|
||||
NONE: TopicDescriptor.EncOpts._EncMode.ValueType # 0
|
||||
"""no encryption, anyone can read"""
|
||||
SHAREDKEY: TopicDescriptor.EncOpts._EncMode.ValueType # 1
|
||||
"""messages are encrypted with shared key"""
|
||||
WOT: TopicDescriptor.EncOpts._EncMode.ValueType # 2
|
||||
"""web of trust, certificates can allow publisher set to grow"""
|
||||
|
||||
class EncMode(_EncMode, metaclass=_EncModeEnumTypeWrapper): ...
|
||||
NONE: TopicDescriptor.EncOpts.EncMode.ValueType # 0
|
||||
"""no encryption, anyone can read"""
|
||||
SHAREDKEY: TopicDescriptor.EncOpts.EncMode.ValueType # 1
|
||||
"""messages are encrypted with shared key"""
|
||||
WOT: TopicDescriptor.EncOpts.EncMode.ValueType # 2
|
||||
"""web of trust, certificates can allow publisher set to grow"""
|
||||
|
||||
MODE_FIELD_NUMBER: builtins.int
|
||||
KEYHASHES_FIELD_NUMBER: builtins.int
|
||||
mode: global___TopicDescriptor.EncOpts.EncMode.ValueType
|
||||
@property
|
||||
def keyHashes(self) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[builtins.bytes]:
|
||||
"""the hashes of the shared keys used (salted)"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
*,
|
||||
mode: global___TopicDescriptor.EncOpts.EncMode.ValueType | None = ...,
|
||||
keyHashes: collections.abc.Iterable[builtins.bytes] | None = ...,
|
||||
) -> None: ...
|
||||
def HasField(self, field_name: typing.Literal["mode", b"mode"]) -> builtins.bool: ...
|
||||
def ClearField(self, field_name: typing.Literal["keyHashes", b"keyHashes", "mode", b"mode"]) -> None: ...
|
||||
|
||||
NAME_FIELD_NUMBER: builtins.int
|
||||
AUTH_FIELD_NUMBER: builtins.int
|
||||
ENC_FIELD_NUMBER: builtins.int
|
||||
name: builtins.str
|
||||
@property
|
||||
def auth(self) -> global___TopicDescriptor.AuthOpts: ...
|
||||
@property
|
||||
def enc(self) -> global___TopicDescriptor.EncOpts: ...
|
||||
def __init__(
|
||||
self,
|
||||
*,
|
||||
name: builtins.str | None = ...,
|
||||
auth: global___TopicDescriptor.AuthOpts | None = ...,
|
||||
enc: global___TopicDescriptor.EncOpts | None = ...,
|
||||
) -> None: ...
|
||||
def HasField(self, field_name: typing.Literal["auth", b"auth", "enc", b"enc", "name", b"name"]) -> builtins.bool: ...
|
||||
def ClearField(self, field_name: typing.Literal["auth", b"auth", "enc", b"enc", "name", b"name"]) -> None: ...
|
||||
|
||||
global___TopicDescriptor = TopicDescriptor
|
||||
|
||||
@ -56,8 +56,6 @@ from libp2p.peer.id import (
|
||||
from libp2p.peer.peerdata import (
|
||||
PeerDataError,
|
||||
)
|
||||
from libp2p.peer.peerstore import env_to_send_in_RPC
|
||||
from libp2p.pubsub.utils import maybe_consume_signed_record
|
||||
from libp2p.tools.async_service import (
|
||||
Service,
|
||||
)
|
||||
@ -249,10 +247,6 @@ class Pubsub(Service, IPubsub):
|
||||
packet.subscriptions.extend(
|
||||
[rpc_pb2.RPC.SubOpts(subscribe=True, topicid=topic_id)]
|
||||
)
|
||||
# Add the sender's signedRecord in the RPC message
|
||||
envelope_bytes, _ = env_to_send_in_RPC(self.host)
|
||||
packet.senderRecord = envelope_bytes
|
||||
|
||||
return packet
|
||||
|
||||
async def continuously_read_stream(self, stream: INetStream) -> None:
|
||||
@ -269,14 +263,6 @@ class Pubsub(Service, IPubsub):
|
||||
incoming: bytes = await read_varint_prefixed_bytes(stream)
|
||||
rpc_incoming: rpc_pb2.RPC = rpc_pb2.RPC()
|
||||
rpc_incoming.ParseFromString(incoming)
|
||||
|
||||
# Process the sender's signed-record if sent
|
||||
if not maybe_consume_signed_record(rpc_incoming, self.host, peer_id):
|
||||
logger.error(
|
||||
"Received an invalid-signed-record, ignoring the incoming msg"
|
||||
)
|
||||
continue
|
||||
|
||||
if rpc_incoming.publish:
|
||||
# deal with RPC.publish
|
||||
for msg in rpc_incoming.publish:
|
||||
@ -586,9 +572,6 @@ class Pubsub(Service, IPubsub):
|
||||
[rpc_pb2.RPC.SubOpts(subscribe=True, topicid=topic_id)]
|
||||
)
|
||||
|
||||
# Add the senderRecord of the peer in the RPC msg
|
||||
envelope_bytes, _ = env_to_send_in_RPC(self.host)
|
||||
packet.senderRecord = envelope_bytes
|
||||
# Send out subscribe message to all peers
|
||||
await self.message_all_peers(packet.SerializeToString())
|
||||
|
||||
@ -621,9 +604,6 @@ class Pubsub(Service, IPubsub):
|
||||
packet.subscriptions.extend(
|
||||
[rpc_pb2.RPC.SubOpts(subscribe=False, topicid=topic_id)]
|
||||
)
|
||||
# Add the senderRecord of the peer in the RPC msg
|
||||
envelope_bytes, _ = env_to_send_in_RPC(self.host)
|
||||
packet.senderRecord = envelope_bytes
|
||||
|
||||
# Send out unsubscribe message to all peers
|
||||
await self.message_all_peers(packet.SerializeToString())
|
||||
|
||||
@ -1,80 +0,0 @@
|
||||
import ast
|
||||
import logging
|
||||
|
||||
from libp2p.abc import IHost
|
||||
from libp2p.custom_types import (
|
||||
MessageID,
|
||||
)
|
||||
from libp2p.peer.envelope import consume_envelope
|
||||
from libp2p.peer.id import ID
|
||||
from libp2p.pubsub.pb.rpc_pb2 import RPC
|
||||
|
||||
logger = logging.getLogger("pubsub-example.utils")
|
||||
|
||||
|
||||
def maybe_consume_signed_record(msg: RPC, host: IHost, peer_id: ID) -> bool:
|
||||
"""
|
||||
Attempt to parse and store a signed-peer-record (Envelope) received during
|
||||
PubSub communication. If the record is invalid, the peer-id does not match, or
|
||||
updating the peerstore fails, the function logs an error and returns False.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
msg : RPC
|
||||
The protobuf message received during PubSub communication.
|
||||
host : IHost
|
||||
The local host instance, providing access to the peerstore for storing
|
||||
verified peer records.
|
||||
peer_id : ID | None, optional
|
||||
The expected peer ID for record validation. If provided, the peer ID
|
||||
inside the record must match this value.
|
||||
|
||||
Returns
|
||||
-------
|
||||
bool
|
||||
True if a valid signed peer record was successfully consumed and stored,
|
||||
False otherwise.
|
||||
|
||||
"""
|
||||
if msg.HasField("senderRecord"):
|
||||
try:
|
||||
# Convert the signed-peer-record(Envelope) from
|
||||
# protobuf bytes
|
||||
envelope, record = consume_envelope(msg.senderRecord, "libp2p-peer-record")
|
||||
if not record.peer_id == peer_id:
|
||||
return False
|
||||
|
||||
# Use the default TTL of 2 hours (7200 seconds)
|
||||
if not host.get_peerstore().consume_peer_record(envelope, 7200):
|
||||
logger.error("Failed to update the Certified-Addr-Book")
|
||||
return False
|
||||
except Exception as e:
|
||||
logger.error("Failed to update the Certified-Addr-Book: %s", e)
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def parse_message_id_safe(msg_id_str: str) -> MessageID:
|
||||
"""Safely handle message ID as string."""
|
||||
return MessageID(msg_id_str)
|
||||
|
||||
|
||||
def safe_parse_message_id(msg_id_str: str) -> tuple[bytes, bytes]:
|
||||
"""
|
||||
Safely parse message ID using ast.literal_eval with validation.
|
||||
:param msg_id_str: String representation of message ID
|
||||
:return: Tuple of (seqno, from_id) as bytes
|
||||
:raises ValueError: If parsing fails
|
||||
"""
|
||||
try:
|
||||
parsed = ast.literal_eval(msg_id_str)
|
||||
if not isinstance(parsed, tuple) or len(parsed) != 2:
|
||||
raise ValueError("Invalid message ID format")
|
||||
|
||||
seqno, from_id = parsed
|
||||
if not isinstance(seqno, bytes) or not isinstance(from_id, bytes):
|
||||
raise ValueError("Message ID components must be bytes")
|
||||
|
||||
return (seqno, from_id)
|
||||
except (ValueError, SyntaxError) as e:
|
||||
raise ValueError(f"Invalid message ID format: {e}")
|
||||
@ -9,7 +9,6 @@ from dataclasses import (
|
||||
dataclass,
|
||||
field,
|
||||
)
|
||||
from enum import Flag, auto
|
||||
|
||||
from libp2p.peer.peerinfo import (
|
||||
PeerInfo,
|
||||
@ -19,118 +18,29 @@ from .resources import (
|
||||
RelayLimits,
|
||||
)
|
||||
|
||||
DEFAULT_MIN_RELAYS = 3
|
||||
DEFAULT_MAX_RELAYS = 20
|
||||
DEFAULT_DISCOVERY_INTERVAL = 300 # seconds
|
||||
DEFAULT_RESERVATION_TTL = 3600 # seconds
|
||||
DEFAULT_MAX_CIRCUIT_DURATION = 3600 # seconds
|
||||
DEFAULT_MAX_CIRCUIT_BYTES = 1024 * 1024 * 1024 # 1GB
|
||||
|
||||
DEFAULT_MAX_CIRCUIT_CONNS = 8
|
||||
DEFAULT_MAX_RESERVATIONS = 4
|
||||
|
||||
MAX_RESERVATIONS_PER_IP = 8
|
||||
MAX_CIRCUITS_PER_IP = 16
|
||||
RESERVATION_RATE_PER_IP = 4 # per minute
|
||||
CIRCUIT_RATE_PER_IP = 8 # per minute
|
||||
MAX_CIRCUITS_TOTAL = 64
|
||||
MAX_RESERVATIONS_TOTAL = 32
|
||||
MAX_BANDWIDTH_PER_CIRCUIT = 1024 * 1024 # 1MB/s
|
||||
MAX_BANDWIDTH_TOTAL = 10 * 1024 * 1024 # 10MB/s
|
||||
|
||||
MIN_RELAY_SCORE = 0.5
|
||||
MAX_RELAY_LATENCY = 1.0 # seconds
|
||||
ENABLE_AUTO_RELAY = True
|
||||
AUTO_RELAY_TIMEOUT = 30 # seconds
|
||||
MAX_AUTO_RELAY_ATTEMPTS = 3
|
||||
RESERVATION_REFRESH_THRESHOLD = 0.8 # Refresh at 80% of TTL
|
||||
MAX_CONCURRENT_RESERVATIONS = 2
|
||||
|
||||
# Timeout constants for different components
|
||||
DEFAULT_DISCOVERY_STREAM_TIMEOUT = 10 # seconds
|
||||
DEFAULT_PEER_PROTOCOL_TIMEOUT = 5 # seconds
|
||||
DEFAULT_PROTOCOL_READ_TIMEOUT = 15 # seconds
|
||||
DEFAULT_PROTOCOL_WRITE_TIMEOUT = 15 # seconds
|
||||
DEFAULT_PROTOCOL_CLOSE_TIMEOUT = 10 # seconds
|
||||
DEFAULT_DCUTR_READ_TIMEOUT = 30 # seconds
|
||||
DEFAULT_DCUTR_WRITE_TIMEOUT = 30 # seconds
|
||||
DEFAULT_DIAL_TIMEOUT = 10 # seconds
|
||||
|
||||
|
||||
@dataclass
|
||||
class TimeoutConfig:
|
||||
"""Timeout configuration for different Circuit Relay v2 components."""
|
||||
|
||||
# Discovery timeouts
|
||||
discovery_stream_timeout: int = DEFAULT_DISCOVERY_STREAM_TIMEOUT
|
||||
peer_protocol_timeout: int = DEFAULT_PEER_PROTOCOL_TIMEOUT
|
||||
|
||||
# Core protocol timeouts
|
||||
protocol_read_timeout: int = DEFAULT_PROTOCOL_READ_TIMEOUT
|
||||
protocol_write_timeout: int = DEFAULT_PROTOCOL_WRITE_TIMEOUT
|
||||
protocol_close_timeout: int = DEFAULT_PROTOCOL_CLOSE_TIMEOUT
|
||||
|
||||
# DCUtR timeouts
|
||||
dcutr_read_timeout: int = DEFAULT_DCUTR_READ_TIMEOUT
|
||||
dcutr_write_timeout: int = DEFAULT_DCUTR_WRITE_TIMEOUT
|
||||
dial_timeout: int = DEFAULT_DIAL_TIMEOUT
|
||||
|
||||
|
||||
# Relay roles enum
|
||||
class RelayRole(Flag):
|
||||
"""
|
||||
Bit-flag enum that captures the three possible relay capabilities.
|
||||
|
||||
A node can combine multiple roles using bit-wise OR, for example::
|
||||
|
||||
RelayRole.HOP | RelayRole.STOP
|
||||
"""
|
||||
|
||||
HOP = auto() # Act as a relay for others ("hop")
|
||||
STOP = auto() # Accept relayed connections ("stop")
|
||||
CLIENT = auto() # Dial through existing relays ("client")
|
||||
|
||||
|
||||
@dataclass
|
||||
class RelayConfig:
|
||||
"""Configuration for Circuit Relay v2."""
|
||||
|
||||
# Role configuration (bit-flags)
|
||||
roles: RelayRole = RelayRole.STOP | RelayRole.CLIENT
|
||||
# Role configuration
|
||||
enable_hop: bool = False # Whether to act as a relay (hop)
|
||||
enable_stop: bool = True # Whether to accept relayed connections (stop)
|
||||
enable_client: bool = True # Whether to use relays for dialing
|
||||
|
||||
# Resource limits
|
||||
limits: RelayLimits | None = None
|
||||
|
||||
# Discovery configuration
|
||||
bootstrap_relays: list[PeerInfo] = field(default_factory=list)
|
||||
min_relays: int = DEFAULT_MIN_RELAYS
|
||||
max_relays: int = DEFAULT_MAX_RELAYS
|
||||
discovery_interval: int = DEFAULT_DISCOVERY_INTERVAL
|
||||
min_relays: int = 3
|
||||
max_relays: int = 20
|
||||
discovery_interval: int = 300 # seconds
|
||||
|
||||
# Connection configuration
|
||||
reservation_ttl: int = DEFAULT_RESERVATION_TTL
|
||||
max_circuit_duration: int = DEFAULT_MAX_CIRCUIT_DURATION
|
||||
max_circuit_bytes: int = DEFAULT_MAX_CIRCUIT_BYTES
|
||||
|
||||
# Timeout configuration
|
||||
timeouts: TimeoutConfig = field(default_factory=TimeoutConfig)
|
||||
|
||||
# ---------------------------------------------------------------------
|
||||
# Backwards-compat boolean helpers. Existing code that still accesses
|
||||
# ``cfg.enable_hop, cfg.enable_stop, cfg.enable_client`` will continue to work.
|
||||
# ---------------------------------------------------------------------
|
||||
|
||||
@property
|
||||
def enable_hop(self) -> bool: # pragma: no cover – helper
|
||||
return bool(self.roles & RelayRole.HOP)
|
||||
|
||||
@property
|
||||
def enable_stop(self) -> bool: # pragma: no cover – helper
|
||||
return bool(self.roles & RelayRole.STOP)
|
||||
|
||||
@property
|
||||
def enable_client(self) -> bool: # pragma: no cover – helper
|
||||
return bool(self.roles & RelayRole.CLIENT)
|
||||
reservation_ttl: int = 3600 # seconds
|
||||
max_circuit_duration: int = 3600 # seconds
|
||||
max_circuit_bytes: int = 1024 * 1024 * 1024 # 1GB
|
||||
|
||||
def __post_init__(self) -> None:
|
||||
"""Initialize default values."""
|
||||
@ -138,8 +48,8 @@ class RelayConfig:
|
||||
self.limits = RelayLimits(
|
||||
duration=self.max_circuit_duration,
|
||||
data=self.max_circuit_bytes,
|
||||
max_circuit_conns=DEFAULT_MAX_CIRCUIT_CONNS,
|
||||
max_reservations=DEFAULT_MAX_RESERVATIONS,
|
||||
max_circuit_conns=8,
|
||||
max_reservations=4,
|
||||
)
|
||||
|
||||
|
||||
@ -148,20 +58,20 @@ class HopConfig:
|
||||
"""Configuration specific to relay (hop) nodes."""
|
||||
|
||||
# Resource limits per IP
|
||||
max_reservations_per_ip: int = MAX_RESERVATIONS_PER_IP
|
||||
max_circuits_per_ip: int = MAX_CIRCUITS_PER_IP
|
||||
max_reservations_per_ip: int = 8
|
||||
max_circuits_per_ip: int = 16
|
||||
|
||||
# Rate limiting
|
||||
reservation_rate_per_ip: int = RESERVATION_RATE_PER_IP
|
||||
circuit_rate_per_ip: int = CIRCUIT_RATE_PER_IP
|
||||
reservation_rate_per_ip: int = 4 # per minute
|
||||
circuit_rate_per_ip: int = 8 # per minute
|
||||
|
||||
# Resource quotas
|
||||
max_circuits_total: int = MAX_CIRCUITS_TOTAL
|
||||
max_reservations_total: int = MAX_RESERVATIONS_TOTAL
|
||||
max_circuits_total: int = 64
|
||||
max_reservations_total: int = 32
|
||||
|
||||
# Bandwidth limits
|
||||
max_bandwidth_per_circuit: int = MAX_BANDWIDTH_PER_CIRCUIT
|
||||
max_bandwidth_total: int = MAX_BANDWIDTH_TOTAL
|
||||
max_bandwidth_per_circuit: int = 1024 * 1024 # 1MB/s
|
||||
max_bandwidth_total: int = 10 * 1024 * 1024 # 10MB/s
|
||||
|
||||
|
||||
@dataclass
|
||||
@ -169,14 +79,14 @@ class ClientConfig:
|
||||
"""Configuration specific to relay clients."""
|
||||
|
||||
# Relay selection
|
||||
min_relay_score: float = MIN_RELAY_SCORE
|
||||
max_relay_latency: float = MAX_RELAY_LATENCY
|
||||
min_relay_score: float = 0.5
|
||||
max_relay_latency: float = 1.0 # seconds
|
||||
|
||||
# Auto-relay settings
|
||||
enable_auto_relay: bool = ENABLE_AUTO_RELAY
|
||||
auto_relay_timeout: int = AUTO_RELAY_TIMEOUT
|
||||
max_auto_relay_attempts: int = MAX_AUTO_RELAY_ATTEMPTS
|
||||
enable_auto_relay: bool = True
|
||||
auto_relay_timeout: int = 30 # seconds
|
||||
max_auto_relay_attempts: int = 3
|
||||
|
||||
# Reservation management
|
||||
reservation_refresh_threshold: float = RESERVATION_REFRESH_THRESHOLD
|
||||
max_concurrent_reservations: int = MAX_CONCURRENT_RESERVATIONS
|
||||
reservation_refresh_threshold: float = 0.8 # Refresh at 80% of TTL
|
||||
max_concurrent_reservations: int = 2
|
||||
|
||||
@ -29,11 +29,6 @@ from libp2p.peer.id import (
|
||||
from libp2p.peer.peerinfo import (
|
||||
PeerInfo,
|
||||
)
|
||||
from libp2p.relay.circuit_v2.config import (
|
||||
DEFAULT_DCUTR_READ_TIMEOUT,
|
||||
DEFAULT_DCUTR_WRITE_TIMEOUT,
|
||||
DEFAULT_DIAL_TIMEOUT,
|
||||
)
|
||||
from libp2p.relay.circuit_v2.nat import (
|
||||
ReachabilityChecker,
|
||||
)
|
||||
@ -52,7 +47,11 @@ PROTOCOL_ID = TProtocol("/libp2p/dcutr")
|
||||
# Maximum message size for DCUtR (4KiB as per spec)
|
||||
MAX_MESSAGE_SIZE = 4 * 1024
|
||||
|
||||
# DCUtR protocol constants
|
||||
# Timeouts
|
||||
STREAM_READ_TIMEOUT = 30 # seconds
|
||||
STREAM_WRITE_TIMEOUT = 30 # seconds
|
||||
DIAL_TIMEOUT = 10 # seconds
|
||||
|
||||
# Maximum number of hole punch attempts per peer
|
||||
MAX_HOLE_PUNCH_ATTEMPTS = 5
|
||||
|
||||
@ -71,13 +70,7 @@ class DCUtRProtocol(Service):
|
||||
hole punching, after they have established an initial connection through a relay.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
host: IHost,
|
||||
read_timeout: int = DEFAULT_DCUTR_READ_TIMEOUT,
|
||||
write_timeout: int = DEFAULT_DCUTR_WRITE_TIMEOUT,
|
||||
dial_timeout: int = DEFAULT_DIAL_TIMEOUT,
|
||||
):
|
||||
def __init__(self, host: IHost):
|
||||
"""
|
||||
Initialize the DCUtR protocol.
|
||||
|
||||
@ -85,19 +78,10 @@ class DCUtRProtocol(Service):
|
||||
----------
|
||||
host : IHost
|
||||
The libp2p host this protocol is running on
|
||||
read_timeout : int
|
||||
Timeout for stream read operations, in seconds
|
||||
write_timeout : int
|
||||
Timeout for stream write operations, in seconds
|
||||
dial_timeout : int
|
||||
Timeout for dial operations, in seconds
|
||||
|
||||
"""
|
||||
super().__init__()
|
||||
self.host = host
|
||||
self.read_timeout = read_timeout
|
||||
self.write_timeout = write_timeout
|
||||
self.dial_timeout = dial_timeout
|
||||
self.event_started = trio.Event()
|
||||
self._hole_punch_attempts: dict[ID, int] = {}
|
||||
self._direct_connections: set[ID] = set()
|
||||
@ -177,7 +161,7 @@ class DCUtRProtocol(Service):
|
||||
|
||||
try:
|
||||
# Read the CONNECT message
|
||||
with trio.fail_after(self.read_timeout):
|
||||
with trio.fail_after(STREAM_READ_TIMEOUT):
|
||||
msg_bytes = await stream.read(MAX_MESSAGE_SIZE)
|
||||
|
||||
# Parse the message
|
||||
@ -212,7 +196,7 @@ class DCUtRProtocol(Service):
|
||||
response.type = HolePunch.CONNECT
|
||||
response.ObsAddrs.extend(our_addrs)
|
||||
|
||||
with trio.fail_after(self.write_timeout):
|
||||
with trio.fail_after(STREAM_WRITE_TIMEOUT):
|
||||
await stream.write(response.SerializeToString())
|
||||
|
||||
logger.debug(
|
||||
@ -222,7 +206,7 @@ class DCUtRProtocol(Service):
|
||||
)
|
||||
|
||||
# Wait for SYNC message
|
||||
with trio.fail_after(self.read_timeout):
|
||||
with trio.fail_after(STREAM_READ_TIMEOUT):
|
||||
sync_bytes = await stream.read(MAX_MESSAGE_SIZE)
|
||||
|
||||
# Parse the SYNC message
|
||||
@ -316,7 +300,7 @@ class DCUtRProtocol(Service):
|
||||
connect_msg.ObsAddrs.extend(our_addrs)
|
||||
|
||||
start_time = time.time()
|
||||
with trio.fail_after(self.write_timeout):
|
||||
with trio.fail_after(STREAM_WRITE_TIMEOUT):
|
||||
await stream.write(connect_msg.SerializeToString())
|
||||
|
||||
logger.debug(
|
||||
@ -326,7 +310,7 @@ class DCUtRProtocol(Service):
|
||||
)
|
||||
|
||||
# Receive the peer's CONNECT message
|
||||
with trio.fail_after(self.read_timeout):
|
||||
with trio.fail_after(STREAM_READ_TIMEOUT):
|
||||
resp_bytes = await stream.read(MAX_MESSAGE_SIZE)
|
||||
|
||||
# Calculate RTT
|
||||
@ -365,7 +349,7 @@ class DCUtRProtocol(Service):
|
||||
sync_msg = HolePunch()
|
||||
sync_msg.type = HolePunch.SYNC
|
||||
|
||||
with trio.fail_after(self.write_timeout):
|
||||
with trio.fail_after(STREAM_WRITE_TIMEOUT):
|
||||
await stream.write(sync_msg.SerializeToString())
|
||||
|
||||
logger.debug("Sent SYNC message to %s", peer_id)
|
||||
@ -484,7 +468,7 @@ class DCUtRProtocol(Service):
|
||||
peer_info = PeerInfo(peer_id, [addr])
|
||||
|
||||
# Try to connect with timeout
|
||||
with trio.fail_after(self.dial_timeout):
|
||||
with trio.fail_after(DIAL_TIMEOUT):
|
||||
await self.host.connect(peer_info)
|
||||
|
||||
logger.info("Successfully connected to %s at %s", peer_id, addr)
|
||||
@ -524,9 +508,7 @@ class DCUtRProtocol(Service):
|
||||
|
||||
# Handle both single connection and list of connections
|
||||
connections: list[INetConn] = (
|
||||
list(conn_or_conns)
|
||||
if not isinstance(conn_or_conns, list)
|
||||
else conn_or_conns
|
||||
[conn_or_conns] if not isinstance(conn_or_conns, list) else conn_or_conns
|
||||
)
|
||||
|
||||
# Check if any connection is direct (not relayed)
|
||||
|
||||
@ -31,11 +31,6 @@ from libp2p.tools.async_service import (
|
||||
Service,
|
||||
)
|
||||
|
||||
from .config import (
|
||||
DEFAULT_DISCOVERY_INTERVAL,
|
||||
DEFAULT_DISCOVERY_STREAM_TIMEOUT,
|
||||
DEFAULT_PEER_PROTOCOL_TIMEOUT,
|
||||
)
|
||||
from .pb.circuit_pb2 import (
|
||||
HopMessage,
|
||||
)
|
||||
@ -48,8 +43,10 @@ from .protocol_buffer import (
|
||||
|
||||
logger = logging.getLogger("libp2p.relay.circuit_v2.discovery")
|
||||
|
||||
# Discovery constants
|
||||
# Constants
|
||||
MAX_RELAYS_TO_TRACK = 10
|
||||
DEFAULT_DISCOVERY_INTERVAL = 60 # seconds
|
||||
STREAM_TIMEOUT = 10 # seconds
|
||||
|
||||
|
||||
# Extended interfaces for type checking
|
||||
@ -89,8 +86,6 @@ class RelayDiscovery(Service):
|
||||
auto_reserve: bool = False,
|
||||
discovery_interval: int = DEFAULT_DISCOVERY_INTERVAL,
|
||||
max_relays: int = MAX_RELAYS_TO_TRACK,
|
||||
stream_timeout: int = DEFAULT_DISCOVERY_STREAM_TIMEOUT,
|
||||
peer_protocol_timeout: int = DEFAULT_PEER_PROTOCOL_TIMEOUT,
|
||||
) -> None:
|
||||
"""
|
||||
Initialize the discovery service.
|
||||
@ -105,10 +100,6 @@ class RelayDiscovery(Service):
|
||||
How often to run discovery, in seconds
|
||||
max_relays : int
|
||||
Maximum number of relays to track
|
||||
stream_timeout : int
|
||||
Timeout for stream operations during discovery, in seconds
|
||||
peer_protocol_timeout : int
|
||||
Timeout for checking peer protocol support, in seconds
|
||||
|
||||
"""
|
||||
super().__init__()
|
||||
@ -116,8 +107,6 @@ class RelayDiscovery(Service):
|
||||
self.auto_reserve = auto_reserve
|
||||
self.discovery_interval = discovery_interval
|
||||
self.max_relays = max_relays
|
||||
self.stream_timeout = stream_timeout
|
||||
self.peer_protocol_timeout = peer_protocol_timeout
|
||||
self._discovered_relays: dict[ID, RelayInfo] = {}
|
||||
self._protocol_cache: dict[
|
||||
ID, set[str]
|
||||
@ -176,8 +165,8 @@ class RelayDiscovery(Service):
|
||||
self._discovered_relays[peer_id].last_seen = time.time()
|
||||
continue
|
||||
|
||||
# Don't wait too long for protocol info
|
||||
with trio.move_on_after(self.peer_protocol_timeout):
|
||||
# Check if peer supports the relay protocol
|
||||
with trio.move_on_after(5): # Don't wait too long for protocol info
|
||||
if await self._supports_relay_protocol(peer_id):
|
||||
await self._add_relay(peer_id)
|
||||
|
||||
@ -275,7 +264,7 @@ class RelayDiscovery(Service):
|
||||
async def _check_via_direct_connection(self, peer_id: ID) -> bool | None:
|
||||
"""Check protocol support via direct connection."""
|
||||
try:
|
||||
with trio.fail_after(self.stream_timeout):
|
||||
with trio.fail_after(STREAM_TIMEOUT):
|
||||
stream = await self.host.new_stream(peer_id, [PROTOCOL_ID])
|
||||
if stream:
|
||||
await stream.close()
|
||||
@ -381,7 +370,7 @@ class RelayDiscovery(Service):
|
||||
|
||||
# Open a stream to the relay with timeout
|
||||
try:
|
||||
with trio.fail_after(self.stream_timeout):
|
||||
with trio.fail_after(STREAM_TIMEOUT):
|
||||
stream = await self.host.new_stream(peer_id, [PROTOCOL_ID])
|
||||
if not stream:
|
||||
logger.error("Failed to open stream to relay %s", peer_id)
|
||||
@ -397,7 +386,7 @@ class RelayDiscovery(Service):
|
||||
peer=self.host.get_id().to_bytes(),
|
||||
)
|
||||
|
||||
with trio.fail_after(self.stream_timeout):
|
||||
with trio.fail_after(STREAM_TIMEOUT):
|
||||
await stream.write(request.SerializeToString())
|
||||
|
||||
# Wait for response
|
||||
|
||||
@ -5,7 +5,6 @@ This module implements the Circuit Relay v2 protocol as specified in:
|
||||
https://github.com/libp2p/specs/blob/master/relay/circuit-v2.md
|
||||
"""
|
||||
|
||||
from enum import Enum, auto
|
||||
import logging
|
||||
import time
|
||||
from typing import (
|
||||
@ -38,15 +37,6 @@ from libp2p.tools.async_service import (
|
||||
Service,
|
||||
)
|
||||
|
||||
from .config import (
|
||||
DEFAULT_MAX_CIRCUIT_BYTES,
|
||||
DEFAULT_MAX_CIRCUIT_CONNS,
|
||||
DEFAULT_MAX_CIRCUIT_DURATION,
|
||||
DEFAULT_MAX_RESERVATIONS,
|
||||
DEFAULT_PROTOCOL_CLOSE_TIMEOUT,
|
||||
DEFAULT_PROTOCOL_READ_TIMEOUT,
|
||||
DEFAULT_PROTOCOL_WRITE_TIMEOUT,
|
||||
)
|
||||
from .pb.circuit_pb2 import (
|
||||
HopMessage,
|
||||
Limit,
|
||||
@ -68,22 +58,18 @@ logger = logging.getLogger("libp2p.relay.circuit_v2")
|
||||
PROTOCOL_ID = TProtocol("/libp2p/circuit/relay/2.0.0")
|
||||
STOP_PROTOCOL_ID = TProtocol("/libp2p/circuit/relay/2.0.0/stop")
|
||||
|
||||
|
||||
# Direction enum for data piping
|
||||
class Pipe(Enum):
|
||||
SRC_TO_DST = auto()
|
||||
DST_TO_SRC = auto()
|
||||
|
||||
|
||||
# Default limits for relay resources
|
||||
DEFAULT_RELAY_LIMITS = RelayLimits(
|
||||
duration=DEFAULT_MAX_CIRCUIT_DURATION,
|
||||
data=DEFAULT_MAX_CIRCUIT_BYTES,
|
||||
max_circuit_conns=DEFAULT_MAX_CIRCUIT_CONNS,
|
||||
max_reservations=DEFAULT_MAX_RESERVATIONS,
|
||||
duration=60 * 60, # 1 hour
|
||||
data=1024 * 1024 * 1024, # 1GB
|
||||
max_circuit_conns=8,
|
||||
max_reservations=4,
|
||||
)
|
||||
|
||||
# Stream operation constants
|
||||
# Stream operation timeouts
|
||||
STREAM_READ_TIMEOUT = 15 # seconds
|
||||
STREAM_WRITE_TIMEOUT = 15 # seconds
|
||||
STREAM_CLOSE_TIMEOUT = 10 # seconds
|
||||
MAX_READ_RETRIES = 5 # Maximum number of read retries
|
||||
|
||||
|
||||
@ -127,9 +113,6 @@ class CircuitV2Protocol(Service):
|
||||
host: IHost,
|
||||
limits: RelayLimits | None = None,
|
||||
allow_hop: bool = False,
|
||||
read_timeout: int = DEFAULT_PROTOCOL_READ_TIMEOUT,
|
||||
write_timeout: int = DEFAULT_PROTOCOL_WRITE_TIMEOUT,
|
||||
close_timeout: int = DEFAULT_PROTOCOL_CLOSE_TIMEOUT,
|
||||
) -> None:
|
||||
"""
|
||||
Initialize a Circuit Relay v2 protocol instance.
|
||||
@ -142,20 +125,11 @@ class CircuitV2Protocol(Service):
|
||||
Resource limits for the relay
|
||||
allow_hop : bool
|
||||
Whether to allow this node to act as a relay
|
||||
read_timeout : int
|
||||
Timeout for stream read operations, in seconds
|
||||
write_timeout : int
|
||||
Timeout for stream write operations, in seconds
|
||||
close_timeout : int
|
||||
Timeout for stream close operations, in seconds
|
||||
|
||||
"""
|
||||
self.host = host
|
||||
self.limits = limits or DEFAULT_RELAY_LIMITS
|
||||
self.allow_hop = allow_hop
|
||||
self.read_timeout = read_timeout
|
||||
self.write_timeout = write_timeout
|
||||
self.close_timeout = close_timeout
|
||||
self.resource_manager = RelayResourceManager(self.limits)
|
||||
self._active_relays: dict[ID, tuple[INetStream, INetStream | None]] = {}
|
||||
self.event_started = trio.Event()
|
||||
@ -200,7 +174,7 @@ class CircuitV2Protocol(Service):
|
||||
return
|
||||
|
||||
try:
|
||||
with trio.fail_after(self.close_timeout):
|
||||
with trio.fail_after(STREAM_CLOSE_TIMEOUT):
|
||||
await stream.close()
|
||||
except Exception:
|
||||
try:
|
||||
@ -242,7 +216,7 @@ class CircuitV2Protocol(Service):
|
||||
|
||||
while retries < max_retries:
|
||||
try:
|
||||
with trio.fail_after(self.read_timeout):
|
||||
with trio.fail_after(STREAM_READ_TIMEOUT):
|
||||
# Try reading with timeout
|
||||
logger.debug(
|
||||
"Attempting to read from stream (attempt %d/%d)",
|
||||
@ -319,7 +293,7 @@ class CircuitV2Protocol(Service):
|
||||
# First, handle the read timeout gracefully
|
||||
try:
|
||||
with trio.fail_after(
|
||||
self.read_timeout * 2
|
||||
STREAM_READ_TIMEOUT * 2
|
||||
): # Double the timeout for reading
|
||||
msg_bytes = await stream.read()
|
||||
if not msg_bytes:
|
||||
@ -440,7 +414,7 @@ class CircuitV2Protocol(Service):
|
||||
"""
|
||||
try:
|
||||
# Read the incoming message with timeout
|
||||
with trio.fail_after(self.read_timeout):
|
||||
with trio.fail_after(STREAM_READ_TIMEOUT):
|
||||
msg_bytes = await stream.read()
|
||||
stop_msg = StopMessage()
|
||||
stop_msg.ParseFromString(msg_bytes)
|
||||
@ -484,20 +458,8 @@ class CircuitV2Protocol(Service):
|
||||
|
||||
# Start relaying data
|
||||
async with trio.open_nursery() as nursery:
|
||||
nursery.start_soon(
|
||||
self._relay_data,
|
||||
src_stream,
|
||||
stream,
|
||||
peer_id,
|
||||
Pipe.SRC_TO_DST,
|
||||
)
|
||||
nursery.start_soon(
|
||||
self._relay_data,
|
||||
stream,
|
||||
src_stream,
|
||||
peer_id,
|
||||
Pipe.DST_TO_SRC,
|
||||
)
|
||||
nursery.start_soon(self._relay_data, src_stream, stream, peer_id)
|
||||
nursery.start_soon(self._relay_data, stream, src_stream, peer_id)
|
||||
|
||||
except trio.TooSlowError:
|
||||
logger.error("Timeout reading from stop stream")
|
||||
@ -547,7 +509,7 @@ class CircuitV2Protocol(Service):
|
||||
ttl = self.resource_manager.reserve(peer_id)
|
||||
|
||||
# Send reservation success response
|
||||
with trio.fail_after(self.write_timeout):
|
||||
with trio.fail_after(STREAM_WRITE_TIMEOUT):
|
||||
status = create_status(
|
||||
code=StatusCode.OK, message="Reservation accepted"
|
||||
)
|
||||
@ -598,7 +560,7 @@ class CircuitV2Protocol(Service):
|
||||
# Always close the stream when done with reservation
|
||||
if cast(INetStreamWithExtras, stream).is_open():
|
||||
try:
|
||||
with trio.fail_after(self.close_timeout):
|
||||
with trio.fail_after(STREAM_CLOSE_TIMEOUT):
|
||||
await stream.close()
|
||||
except Exception as close_err:
|
||||
logger.error("Error closing stream: %s", str(close_err))
|
||||
@ -634,7 +596,7 @@ class CircuitV2Protocol(Service):
|
||||
self._active_relays[peer_id] = (stream, None)
|
||||
|
||||
# Try to connect to the destination with timeout
|
||||
with trio.fail_after(self.read_timeout):
|
||||
with trio.fail_after(STREAM_READ_TIMEOUT):
|
||||
dst_stream = await self.host.new_stream(peer_id, [STOP_PROTOCOL_ID])
|
||||
if not dst_stream:
|
||||
raise ConnectionError("Could not connect to destination")
|
||||
@ -686,20 +648,8 @@ class CircuitV2Protocol(Service):
|
||||
|
||||
# Start relaying data
|
||||
async with trio.open_nursery() as nursery:
|
||||
nursery.start_soon(
|
||||
self._relay_data,
|
||||
stream,
|
||||
dst_stream,
|
||||
peer_id,
|
||||
Pipe.SRC_TO_DST,
|
||||
)
|
||||
nursery.start_soon(
|
||||
self._relay_data,
|
||||
dst_stream,
|
||||
stream,
|
||||
peer_id,
|
||||
Pipe.DST_TO_SRC,
|
||||
)
|
||||
nursery.start_soon(self._relay_data, stream, dst_stream, peer_id)
|
||||
nursery.start_soon(self._relay_data, dst_stream, stream, peer_id)
|
||||
|
||||
except (trio.TooSlowError, ConnectionError) as e:
|
||||
logger.error("Error establishing relay connection: %s", str(e))
|
||||
@ -735,7 +685,6 @@ class CircuitV2Protocol(Service):
|
||||
src_stream: INetStream,
|
||||
dst_stream: INetStream,
|
||||
peer_id: ID,
|
||||
direction: Pipe,
|
||||
) -> None:
|
||||
"""
|
||||
Relay data between two streams.
|
||||
@ -749,27 +698,24 @@ class CircuitV2Protocol(Service):
|
||||
peer_id : ID
|
||||
ID of the peer being relayed
|
||||
|
||||
direction : Pipe
|
||||
Direction of data flow (``Pipe.SRC_TO_DST`` or ``Pipe.DST_TO_SRC``)
|
||||
|
||||
"""
|
||||
try:
|
||||
while True:
|
||||
# Read data with retries
|
||||
data = await self._read_stream_with_retry(src_stream)
|
||||
if not data:
|
||||
logger.info("%s closed/reset", direction.name)
|
||||
logger.info("Source stream closed/reset")
|
||||
break
|
||||
|
||||
# Write data with timeout
|
||||
try:
|
||||
with trio.fail_after(self.write_timeout):
|
||||
with trio.fail_after(STREAM_WRITE_TIMEOUT):
|
||||
await dst_stream.write(data)
|
||||
except trio.TooSlowError:
|
||||
logger.error("Timeout writing in %s", direction.name)
|
||||
logger.error("Timeout writing to destination stream")
|
||||
break
|
||||
except Exception as e:
|
||||
logger.error("Error writing in %s: %s", direction.name, str(e))
|
||||
logger.error("Error writing to destination stream: %s", str(e))
|
||||
break
|
||||
|
||||
# Update resource usage
|
||||
@ -798,7 +744,7 @@ class CircuitV2Protocol(Service):
|
||||
"""Send a status message."""
|
||||
try:
|
||||
logger.debug("Sending status message with code %s: %s", code, message)
|
||||
with trio.fail_after(self.write_timeout * 2): # Double the timeout
|
||||
with trio.fail_after(STREAM_WRITE_TIMEOUT * 2): # Double the timeout
|
||||
# Create a proto Status directly
|
||||
pb_status = PbStatus()
|
||||
pb_status.code = cast(
|
||||
@ -836,7 +782,7 @@ class CircuitV2Protocol(Service):
|
||||
"""Send a status message on a STOP stream."""
|
||||
try:
|
||||
logger.debug("Sending stop status message with code %s: %s", code, message)
|
||||
with trio.fail_after(self.write_timeout * 2): # Double the timeout
|
||||
with trio.fail_after(STREAM_WRITE_TIMEOUT * 2): # Double the timeout
|
||||
# Create a proto Status directly
|
||||
pb_status = PbStatus()
|
||||
pb_status.code = cast(
|
||||
|
||||
@ -8,7 +8,6 @@ including reservations and connection limits.
|
||||
from dataclasses import (
|
||||
dataclass,
|
||||
)
|
||||
from enum import Enum, auto
|
||||
import hashlib
|
||||
import os
|
||||
import time
|
||||
@ -20,18 +19,6 @@ from libp2p.peer.id import (
|
||||
# Import the protobuf definitions
|
||||
from .pb.circuit_pb2 import Reservation as PbReservation
|
||||
|
||||
RANDOM_BYTES_LENGTH = 16 # 128 bits of randomness
|
||||
TIMESTAMP_MULTIPLIER = 1000000 # To convert seconds to microseconds
|
||||
|
||||
|
||||
# Reservation status enum
|
||||
class ReservationStatus(Enum):
|
||||
"""Lifecycle status of a relay reservation."""
|
||||
|
||||
ACTIVE = auto()
|
||||
EXPIRED = auto()
|
||||
REJECTED = auto()
|
||||
|
||||
|
||||
@dataclass
|
||||
class RelayLimits:
|
||||
@ -81,8 +68,8 @@ class Reservation:
|
||||
# - Peer ID to bind it to the specific peer
|
||||
# - Timestamp for uniqueness
|
||||
# - Hash everything for a fixed size output
|
||||
random_bytes = os.urandom(RANDOM_BYTES_LENGTH)
|
||||
timestamp = str(int(self.created_at * TIMESTAMP_MULTIPLIER)).encode()
|
||||
random_bytes = os.urandom(16) # 128 bits of randomness
|
||||
timestamp = str(int(self.created_at * 1000000)).encode()
|
||||
peer_bytes = self.peer_id.to_bytes()
|
||||
|
||||
# Combine all elements and hash them
|
||||
@ -97,15 +84,6 @@ class Reservation:
|
||||
"""Check if the reservation has expired."""
|
||||
return time.time() > self.expires_at
|
||||
|
||||
# Expose a friendly status enum
|
||||
|
||||
@property
|
||||
def status(self) -> ReservationStatus:
|
||||
"""Return the current status as a ``ReservationStatus`` enum."""
|
||||
return (
|
||||
ReservationStatus.EXPIRED if self.is_expired() else ReservationStatus.ACTIVE
|
||||
)
|
||||
|
||||
def can_accept_connection(self) -> bool:
|
||||
"""Check if a new connection can be accepted."""
|
||||
return (
|
||||
|
||||
@ -89,10 +89,7 @@ class CircuitV2Transport(ITransport):
|
||||
auto_reserve=config.enable_client,
|
||||
discovery_interval=config.discovery_interval,
|
||||
max_relays=config.max_relays,
|
||||
stream_timeout=config.timeouts.discovery_stream_timeout,
|
||||
peer_protocol_timeout=config.timeouts.peer_protocol_timeout,
|
||||
)
|
||||
self.relay_counter = 0 # for round robin load balancing
|
||||
|
||||
async def dial(
|
||||
self,
|
||||
@ -222,25 +219,11 @@ class CircuitV2Transport(ITransport):
|
||||
# Get a relay from the list of discovered relays
|
||||
relays = self.discovery.get_relays()
|
||||
if relays:
|
||||
# Prioritize relays with active reservations
|
||||
relays_with_reservations = []
|
||||
other_relays = []
|
||||
# TODO: Implement more sophisticated relay selection
|
||||
# For now, just return the first available relay
|
||||
return relays[0]
|
||||
|
||||
for relay_id in relays:
|
||||
relay_info = self.discovery.get_relay_info(relay_id)
|
||||
if relay_info and relay_info.has_reservation:
|
||||
relays_with_reservations.append(relay_id)
|
||||
else:
|
||||
other_relays.append(relay_id)
|
||||
|
||||
# Return first available relay with reservation, or fallback to others
|
||||
self.relay_counter += 1
|
||||
if relays_with_reservations:
|
||||
return relays_with_reservations[
|
||||
(self.relay_counter - 1) % len(relays_with_reservations)
|
||||
]
|
||||
elif other_relays:
|
||||
return other_relays[(self.relay_counter - 1) % len(other_relays)]
|
||||
# Wait and try discovery
|
||||
await trio.sleep(1)
|
||||
attempts += 1
|
||||
|
||||
|
||||
@ -1,4 +1,3 @@
|
||||
import logging
|
||||
from typing import (
|
||||
cast,
|
||||
)
|
||||
@ -16,8 +15,6 @@ from libp2p.io.msgio import (
|
||||
FixedSizeLenMsgReadWriter,
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
SIZE_NOISE_MESSAGE_LEN = 2
|
||||
MAX_NOISE_MESSAGE_LEN = 2 ** (8 * SIZE_NOISE_MESSAGE_LEN) - 1
|
||||
SIZE_NOISE_MESSAGE_BODY_LEN = 2
|
||||
@ -53,25 +50,18 @@ class BaseNoiseMsgReadWriter(EncryptedMsgReadWriter):
|
||||
self.noise_state = noise_state
|
||||
|
||||
async def write_msg(self, msg: bytes, prefix_encoded: bool = False) -> None:
|
||||
logger.debug(f"Noise write_msg: encrypting {len(msg)} bytes")
|
||||
data_encrypted = self.encrypt(msg)
|
||||
if prefix_encoded:
|
||||
# Manually add the prefix if needed
|
||||
data_encrypted = self.prefix + data_encrypted
|
||||
logger.debug(f"Noise write_msg: writing {len(data_encrypted)} encrypted bytes")
|
||||
await self.read_writer.write_msg(data_encrypted)
|
||||
logger.debug("Noise write_msg: write completed successfully")
|
||||
|
||||
async def read_msg(self, prefix_encoded: bool = False) -> bytes:
|
||||
logger.debug("Noise read_msg: reading encrypted message")
|
||||
noise_msg_encrypted = await self.read_writer.read_msg()
|
||||
logger.debug(f"Noise read_msg: read {len(noise_msg_encrypted)} encrypted bytes")
|
||||
if prefix_encoded:
|
||||
result = self.decrypt(noise_msg_encrypted[len(self.prefix) :])
|
||||
return self.decrypt(noise_msg_encrypted[len(self.prefix) :])
|
||||
else:
|
||||
result = self.decrypt(noise_msg_encrypted)
|
||||
logger.debug(f"Noise read_msg: decrypted to {len(result)} bytes")
|
||||
return result
|
||||
return self.decrypt(noise_msg_encrypted)
|
||||
|
||||
async def close(self) -> None:
|
||||
await self.read_writer.close()
|
||||
|
||||
@ -1,7 +1,6 @@
|
||||
from dataclasses import (
|
||||
dataclass,
|
||||
)
|
||||
import logging
|
||||
|
||||
from libp2p.crypto.keys import (
|
||||
PrivateKey,
|
||||
@ -13,8 +12,6 @@ from libp2p.crypto.serialization import (
|
||||
|
||||
from .pb import noise_pb2 as noise_pb
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
SIGNED_DATA_PREFIX = "noise-libp2p-static-key:"
|
||||
|
||||
|
||||
@ -51,8 +48,6 @@ def make_handshake_payload_sig(
|
||||
id_privkey: PrivateKey, noise_static_pubkey: PublicKey
|
||||
) -> bytes:
|
||||
data = make_data_to_be_signed(noise_static_pubkey)
|
||||
logger.debug(f"make_handshake_payload_sig: signing data length: {len(data)}")
|
||||
logger.debug(f"make_handshake_payload_sig: signing data hex: {data.hex()}")
|
||||
return id_privkey.sign(data)
|
||||
|
||||
|
||||
@ -65,27 +60,4 @@ def verify_handshake_payload_sig(
|
||||
2. signed by the private key corresponding to `id_pubkey`
|
||||
"""
|
||||
expected_data = make_data_to_be_signed(noise_static_pubkey)
|
||||
logger.debug(
|
||||
f"verify_handshake_payload_sig: payload.id_pubkey type: "
|
||||
f"{type(payload.id_pubkey)}"
|
||||
)
|
||||
logger.debug(
|
||||
f"verify_handshake_payload_sig: noise_static_pubkey type: "
|
||||
f"{type(noise_static_pubkey)}"
|
||||
)
|
||||
logger.debug(
|
||||
f"verify_handshake_payload_sig: expected_data length: {len(expected_data)}"
|
||||
)
|
||||
logger.debug(
|
||||
f"verify_handshake_payload_sig: expected_data hex: {expected_data.hex()}"
|
||||
)
|
||||
logger.debug(
|
||||
f"verify_handshake_payload_sig: payload.id_sig length: {len(payload.id_sig)}"
|
||||
)
|
||||
try:
|
||||
result = payload.id_pubkey.verify(expected_data, payload.id_sig)
|
||||
logger.debug(f"verify_handshake_payload_sig: verification result: {result}")
|
||||
return result
|
||||
except Exception as e:
|
||||
logger.error(f"verify_handshake_payload_sig: verification exception: {e}")
|
||||
return False
|
||||
return payload.id_pubkey.verify(expected_data, payload.id_sig)
|
||||
|
||||
@ -2,7 +2,6 @@ from abc import (
|
||||
ABC,
|
||||
abstractmethod,
|
||||
)
|
||||
import logging
|
||||
|
||||
from cryptography.hazmat.primitives import (
|
||||
serialization,
|
||||
@ -47,8 +46,6 @@ from .messages import (
|
||||
verify_handshake_payload_sig,
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class IPattern(ABC):
|
||||
@abstractmethod
|
||||
@ -98,7 +95,6 @@ class PatternXX(BasePattern):
|
||||
self.early_data = early_data
|
||||
|
||||
async def handshake_inbound(self, conn: IRawConnection) -> ISecureConn:
|
||||
logger.debug(f"Noise XX handshake_inbound started for peer {self.local_peer}")
|
||||
noise_state = self.create_noise_state()
|
||||
noise_state.set_as_responder()
|
||||
noise_state.start_handshake()
|
||||
@ -111,22 +107,15 @@ class PatternXX(BasePattern):
|
||||
read_writer = NoiseHandshakeReadWriter(conn, noise_state)
|
||||
|
||||
# Consume msg#1.
|
||||
logger.debug("Noise XX handshake_inbound: reading msg#1")
|
||||
await read_writer.read_msg()
|
||||
logger.debug("Noise XX handshake_inbound: read msg#1 successfully")
|
||||
|
||||
# Send msg#2, which should include our handshake payload.
|
||||
logger.debug("Noise XX handshake_inbound: preparing msg#2")
|
||||
our_payload = self.make_handshake_payload()
|
||||
msg_2 = our_payload.serialize()
|
||||
logger.debug(f"Noise XX handshake_inbound: sending msg#2 ({len(msg_2)} bytes)")
|
||||
await read_writer.write_msg(msg_2)
|
||||
logger.debug("Noise XX handshake_inbound: sent msg#2 successfully")
|
||||
|
||||
# Receive and consume msg#3.
|
||||
logger.debug("Noise XX handshake_inbound: reading msg#3")
|
||||
msg_3 = await read_writer.read_msg()
|
||||
logger.debug(f"Noise XX handshake_inbound: read msg#3 ({len(msg_3)} bytes)")
|
||||
peer_handshake_payload = NoiseHandshakePayload.deserialize(msg_3)
|
||||
|
||||
if handshake_state.rs is None:
|
||||
@ -158,7 +147,6 @@ class PatternXX(BasePattern):
|
||||
async def handshake_outbound(
|
||||
self, conn: IRawConnection, remote_peer: ID
|
||||
) -> ISecureConn:
|
||||
logger.debug(f"Noise XX handshake_outbound started to peer {remote_peer}")
|
||||
noise_state = self.create_noise_state()
|
||||
|
||||
read_writer = NoiseHandshakeReadWriter(conn, noise_state)
|
||||
@ -171,15 +159,11 @@ class PatternXX(BasePattern):
|
||||
raise NoiseStateError("Handshake state is not initialized")
|
||||
|
||||
# Send msg#1, which is *not* encrypted.
|
||||
logger.debug("Noise XX handshake_outbound: sending msg#1")
|
||||
msg_1 = b""
|
||||
await read_writer.write_msg(msg_1)
|
||||
logger.debug("Noise XX handshake_outbound: sent msg#1 successfully")
|
||||
|
||||
# Read msg#2 from the remote, which contains the public key of the peer.
|
||||
logger.debug("Noise XX handshake_outbound: reading msg#2")
|
||||
msg_2 = await read_writer.read_msg()
|
||||
logger.debug(f"Noise XX handshake_outbound: read msg#2 ({len(msg_2)} bytes)")
|
||||
peer_handshake_payload = NoiseHandshakePayload.deserialize(msg_2)
|
||||
|
||||
if handshake_state.rs is None:
|
||||
@ -190,27 +174,8 @@ class PatternXX(BasePattern):
|
||||
)
|
||||
remote_pubkey = self._get_pubkey_from_noise_keypair(handshake_state.rs)
|
||||
|
||||
logger.debug(
|
||||
f"Noise XX handshake_outbound: verifying signature for peer {remote_peer}"
|
||||
)
|
||||
logger.debug(
|
||||
f"Noise XX handshake_outbound: remote_pubkey type: {type(remote_pubkey)}"
|
||||
)
|
||||
id_pubkey_repr = peer_handshake_payload.id_pubkey.to_bytes().hex()
|
||||
logger.debug(
|
||||
f"Noise XX handshake_outbound: peer_handshake_payload.id_pubkey: "
|
||||
f"{id_pubkey_repr}"
|
||||
)
|
||||
if not verify_handshake_payload_sig(peer_handshake_payload, remote_pubkey):
|
||||
logger.error(
|
||||
f"Noise XX handshake_outbound: signature verification failed for peer "
|
||||
f"{remote_peer}"
|
||||
)
|
||||
raise InvalidSignature
|
||||
logger.debug(
|
||||
f"Noise XX handshake_outbound: signature verification successful for peer "
|
||||
f"{remote_peer}"
|
||||
)
|
||||
remote_peer_id_from_pubkey = ID.from_pubkey(peer_handshake_payload.id_pubkey)
|
||||
if remote_peer_id_from_pubkey != remote_peer:
|
||||
raise PeerIDMismatchesPubkey(
|
||||
|
||||
@ -17,9 +17,6 @@ from libp2p.custom_types import (
|
||||
from libp2p.peer.id import (
|
||||
ID,
|
||||
)
|
||||
from libp2p.protocol_muxer.exceptions import (
|
||||
MultiselectError,
|
||||
)
|
||||
from libp2p.protocol_muxer.multiselect import (
|
||||
Multiselect,
|
||||
)
|
||||
@ -107,7 +104,7 @@ class SecurityMultistream(ABC):
|
||||
:param is_initiator: true if we are the initiator, false otherwise
|
||||
:return: selected secure transport
|
||||
"""
|
||||
protocol: TProtocol | None
|
||||
protocol: TProtocol
|
||||
communicator = MultiselectCommunicator(conn)
|
||||
if is_initiator:
|
||||
# Select protocol if initiator
|
||||
@ -117,9 +114,5 @@ class SecurityMultistream(ABC):
|
||||
else:
|
||||
# Select protocol if non-initiator
|
||||
protocol, _ = await self.multiselect.negotiate(communicator)
|
||||
if protocol is None:
|
||||
raise MultiselectError(
|
||||
"Failed to negotiate a security protocol: no protocol selected"
|
||||
)
|
||||
# Return transport from protocol
|
||||
return self.transports[protocol]
|
||||
|
||||
@ -1,3 +1,5 @@
|
||||
from collections.abc import AsyncGenerator
|
||||
from contextlib import asynccontextmanager
|
||||
from types import (
|
||||
TracebackType,
|
||||
)
|
||||
@ -13,7 +15,6 @@ from libp2p.abc import (
|
||||
from libp2p.stream_muxer.exceptions import (
|
||||
MuxedConnUnavailable,
|
||||
)
|
||||
from libp2p.stream_muxer.rw_lock import ReadWriteLock
|
||||
|
||||
from .constants import (
|
||||
HeaderTags,
|
||||
@ -33,6 +34,72 @@ if TYPE_CHECKING:
|
||||
)
|
||||
|
||||
|
||||
class ReadWriteLock:
|
||||
"""
|
||||
A read-write lock that allows multiple concurrent readers
|
||||
or one exclusive writer, implemented using Trio primitives.
|
||||
"""
|
||||
|
||||
def __init__(self) -> None:
|
||||
self._readers = 0
|
||||
self._readers_lock = trio.Lock() # Protects access to _readers count
|
||||
self._writer_lock = trio.Semaphore(1) # Allows only one writer at a time
|
||||
|
||||
async def acquire_read(self) -> None:
|
||||
"""Acquire a read lock. Multiple readers can hold it simultaneously."""
|
||||
try:
|
||||
async with self._readers_lock:
|
||||
if self._readers == 0:
|
||||
await self._writer_lock.acquire()
|
||||
self._readers += 1
|
||||
except trio.Cancelled:
|
||||
raise
|
||||
|
||||
async def release_read(self) -> None:
|
||||
"""Release a read lock."""
|
||||
async with self._readers_lock:
|
||||
if self._readers == 1:
|
||||
self._writer_lock.release()
|
||||
self._readers -= 1
|
||||
|
||||
async def acquire_write(self) -> None:
|
||||
"""Acquire an exclusive write lock."""
|
||||
try:
|
||||
await self._writer_lock.acquire()
|
||||
except trio.Cancelled:
|
||||
raise
|
||||
|
||||
def release_write(self) -> None:
|
||||
"""Release the exclusive write lock."""
|
||||
self._writer_lock.release()
|
||||
|
||||
@asynccontextmanager
|
||||
async def read_lock(self) -> AsyncGenerator[None, None]:
|
||||
"""Context manager for acquiring and releasing a read lock safely."""
|
||||
acquire = False
|
||||
try:
|
||||
await self.acquire_read()
|
||||
acquire = True
|
||||
yield
|
||||
finally:
|
||||
if acquire:
|
||||
with trio.CancelScope() as scope:
|
||||
scope.shield = True
|
||||
await self.release_read()
|
||||
|
||||
@asynccontextmanager
|
||||
async def write_lock(self) -> AsyncGenerator[None, None]:
|
||||
"""Context manager for acquiring and releasing a write lock safely."""
|
||||
acquire = False
|
||||
try:
|
||||
await self.acquire_write()
|
||||
acquire = True
|
||||
yield
|
||||
finally:
|
||||
if acquire:
|
||||
self.release_write()
|
||||
|
||||
|
||||
class MplexStream(IMuxedStream):
|
||||
"""
|
||||
reference: https://github.com/libp2p/go-mplex/blob/master/stream.go
|
||||
|
||||
@ -17,11 +17,7 @@ from libp2p.custom_types import (
|
||||
from libp2p.peer.id import (
|
||||
ID,
|
||||
)
|
||||
from libp2p.protocol_muxer.exceptions import (
|
||||
MultiselectError,
|
||||
)
|
||||
from libp2p.protocol_muxer.multiselect import (
|
||||
DEFAULT_NEGOTIATE_TIMEOUT,
|
||||
Multiselect,
|
||||
)
|
||||
from libp2p.protocol_muxer.multiselect_client import (
|
||||
@ -47,17 +43,11 @@ class MuxerMultistream:
|
||||
transports: "OrderedDict[TProtocol, TMuxerClass]"
|
||||
multiselect: Multiselect
|
||||
multiselect_client: MultiselectClient
|
||||
negotiate_timeout: int
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
muxer_transports_by_protocol: TMuxerOptions,
|
||||
negotiate_timeout: int = DEFAULT_NEGOTIATE_TIMEOUT,
|
||||
) -> None:
|
||||
def __init__(self, muxer_transports_by_protocol: TMuxerOptions) -> None:
|
||||
self.transports = OrderedDict()
|
||||
self.multiselect = Multiselect()
|
||||
self.multistream_client = MultiselectClient()
|
||||
self.negotiate_timeout = negotiate_timeout
|
||||
for protocol, transport in muxer_transports_by_protocol.items():
|
||||
self.add_transport(protocol, transport)
|
||||
|
||||
@ -83,26 +73,20 @@ class MuxerMultistream:
|
||||
:param conn: conn to choose a transport over
|
||||
:return: selected muxer transport
|
||||
"""
|
||||
protocol: TProtocol | None
|
||||
protocol: TProtocol
|
||||
communicator = MultiselectCommunicator(conn)
|
||||
if conn.is_initiator:
|
||||
protocol = await self.multiselect_client.select_one_of(
|
||||
tuple(self.transports.keys()), communicator, self.negotiate_timeout
|
||||
tuple(self.transports.keys()), communicator
|
||||
)
|
||||
else:
|
||||
protocol, _ = await self.multiselect.negotiate(
|
||||
communicator, self.negotiate_timeout
|
||||
)
|
||||
if protocol is None:
|
||||
raise MultiselectError(
|
||||
"Fail to negotiate a stream muxer protocol: no protocol selected"
|
||||
)
|
||||
protocol, _ = await self.multiselect.negotiate(communicator)
|
||||
return self.transports[protocol]
|
||||
|
||||
async def new_conn(self, conn: ISecureConn, peer_id: ID) -> IMuxedConn:
|
||||
communicator = MultiselectCommunicator(conn)
|
||||
protocol = await self.multistream_client.select_one_of(
|
||||
tuple(self.transports.keys()), communicator, self.negotiate_timeout
|
||||
tuple(self.transports.keys()), communicator
|
||||
)
|
||||
transport_class = self.transports[protocol]
|
||||
if protocol == PROTOCOL_ID:
|
||||
|
||||
@ -1,70 +0,0 @@
|
||||
from collections.abc import AsyncGenerator
|
||||
from contextlib import asynccontextmanager
|
||||
|
||||
import trio
|
||||
|
||||
|
||||
class ReadWriteLock:
|
||||
"""
|
||||
A read-write lock that allows multiple concurrent readers
|
||||
or one exclusive writer, implemented using Trio primitives.
|
||||
"""
|
||||
|
||||
def __init__(self) -> None:
|
||||
self._readers = 0
|
||||
self._readers_lock = trio.Lock() # Protects access to _readers count
|
||||
self._writer_lock = trio.Semaphore(1) # Allows only one writer at a time
|
||||
|
||||
async def acquire_read(self) -> None:
|
||||
"""Acquire a read lock. Multiple readers can hold it simultaneously."""
|
||||
try:
|
||||
async with self._readers_lock:
|
||||
if self._readers == 0:
|
||||
await self._writer_lock.acquire()
|
||||
self._readers += 1
|
||||
except trio.Cancelled:
|
||||
raise
|
||||
|
||||
async def release_read(self) -> None:
|
||||
"""Release a read lock."""
|
||||
async with self._readers_lock:
|
||||
if self._readers == 1:
|
||||
self._writer_lock.release()
|
||||
self._readers -= 1
|
||||
|
||||
async def acquire_write(self) -> None:
|
||||
"""Acquire an exclusive write lock."""
|
||||
try:
|
||||
await self._writer_lock.acquire()
|
||||
except trio.Cancelled:
|
||||
raise
|
||||
|
||||
def release_write(self) -> None:
|
||||
"""Release the exclusive write lock."""
|
||||
self._writer_lock.release()
|
||||
|
||||
@asynccontextmanager
|
||||
async def read_lock(self) -> AsyncGenerator[None, None]:
|
||||
"""Context manager for acquiring and releasing a read lock safely."""
|
||||
acquire = False
|
||||
try:
|
||||
await self.acquire_read()
|
||||
acquire = True
|
||||
yield
|
||||
finally:
|
||||
if acquire:
|
||||
with trio.CancelScope() as scope:
|
||||
scope.shield = True
|
||||
await self.release_read()
|
||||
|
||||
@asynccontextmanager
|
||||
async def write_lock(self) -> AsyncGenerator[None, None]:
|
||||
"""Context manager for acquiring and releasing a write lock safely."""
|
||||
acquire = False
|
||||
try:
|
||||
await self.acquire_write()
|
||||
acquire = True
|
||||
yield
|
||||
finally:
|
||||
if acquire:
|
||||
self.release_write()
|
||||
@ -44,7 +44,6 @@ from libp2p.stream_muxer.exceptions import (
|
||||
MuxedStreamError,
|
||||
MuxedStreamReset,
|
||||
)
|
||||
from libp2p.stream_muxer.rw_lock import ReadWriteLock
|
||||
|
||||
# Configure logger for this module
|
||||
logger = logging.getLogger("libp2p.stream_muxer.yamux")
|
||||
@ -81,8 +80,6 @@ class YamuxStream(IMuxedStream):
|
||||
self.send_window = DEFAULT_WINDOW_SIZE
|
||||
self.recv_window = DEFAULT_WINDOW_SIZE
|
||||
self.window_lock = trio.Lock()
|
||||
self.rw_lock = ReadWriteLock()
|
||||
self.close_lock = trio.Lock()
|
||||
|
||||
async def __aenter__(self) -> "YamuxStream":
|
||||
"""Enter the async context manager."""
|
||||
@ -98,54 +95,52 @@ class YamuxStream(IMuxedStream):
|
||||
await self.close()
|
||||
|
||||
async def write(self, data: bytes) -> None:
|
||||
async with self.rw_lock.write_lock():
|
||||
if self.send_closed:
|
||||
raise MuxedStreamError("Stream is closed for sending")
|
||||
if self.send_closed:
|
||||
raise MuxedStreamError("Stream is closed for sending")
|
||||
|
||||
# Flow control: Check if we have enough send window
|
||||
total_len = len(data)
|
||||
sent = 0
|
||||
logger.debug(f"Stream {self.stream_id}: Starts writing {total_len} bytes ")
|
||||
while sent < total_len:
|
||||
# Wait for available window with timeout
|
||||
timeout = False
|
||||
async with self.window_lock:
|
||||
if self.send_window == 0:
|
||||
logger.debug(
|
||||
f"Stream {self.stream_id}: "
|
||||
"Window is zero, waiting for update"
|
||||
)
|
||||
# Release lock and wait with timeout
|
||||
self.window_lock.release()
|
||||
# To avoid re-acquiring the lock immediately,
|
||||
with trio.move_on_after(5.0) as cancel_scope:
|
||||
while self.send_window == 0 and not self.closed:
|
||||
await trio.sleep(0.01)
|
||||
# If we timed out, cancel the scope
|
||||
timeout = cancel_scope.cancelled_caught
|
||||
# Re-acquire lock
|
||||
await self.window_lock.acquire()
|
||||
|
||||
# If we timed out waiting for window update, raise an error
|
||||
if timeout:
|
||||
raise MuxedStreamError(
|
||||
"Timed out waiting for window update after 5 seconds."
|
||||
)
|
||||
|
||||
if self.closed:
|
||||
raise MuxedStreamError("Stream is closed")
|
||||
|
||||
# Calculate how much we can send now
|
||||
to_send = min(self.send_window, total_len - sent)
|
||||
chunk = data[sent : sent + to_send]
|
||||
self.send_window -= to_send
|
||||
|
||||
# Send the data
|
||||
header = struct.pack(
|
||||
YAMUX_HEADER_FORMAT, 0, TYPE_DATA, 0, self.stream_id, len(chunk)
|
||||
# Flow control: Check if we have enough send window
|
||||
total_len = len(data)
|
||||
sent = 0
|
||||
logger.debug(f"Stream {self.stream_id}: Starts writing {total_len} bytes ")
|
||||
while sent < total_len:
|
||||
# Wait for available window with timeout
|
||||
timeout = False
|
||||
async with self.window_lock:
|
||||
if self.send_window == 0:
|
||||
logger.debug(
|
||||
f"Stream {self.stream_id}: Window is zero, waiting for update"
|
||||
)
|
||||
await self.conn.secured_conn.write(header + chunk)
|
||||
sent += to_send
|
||||
# Release lock and wait with timeout
|
||||
self.window_lock.release()
|
||||
# To avoid re-acquiring the lock immediately,
|
||||
with trio.move_on_after(5.0) as cancel_scope:
|
||||
while self.send_window == 0 and not self.closed:
|
||||
await trio.sleep(0.01)
|
||||
# If we timed out, cancel the scope
|
||||
timeout = cancel_scope.cancelled_caught
|
||||
# Re-acquire lock
|
||||
await self.window_lock.acquire()
|
||||
|
||||
# If we timed out waiting for window update, raise an error
|
||||
if timeout:
|
||||
raise MuxedStreamError(
|
||||
"Timed out waiting for window update after 5 seconds."
|
||||
)
|
||||
|
||||
if self.closed:
|
||||
raise MuxedStreamError("Stream is closed")
|
||||
|
||||
# Calculate how much we can send now
|
||||
to_send = min(self.send_window, total_len - sent)
|
||||
chunk = data[sent : sent + to_send]
|
||||
self.send_window -= to_send
|
||||
|
||||
# Send the data
|
||||
header = struct.pack(
|
||||
YAMUX_HEADER_FORMAT, 0, TYPE_DATA, 0, self.stream_id, len(chunk)
|
||||
)
|
||||
await self.conn.secured_conn.write(header + chunk)
|
||||
sent += to_send
|
||||
|
||||
async def send_window_update(self, increment: int, skip_lock: bool = False) -> None:
|
||||
"""
|
||||
@ -262,32 +257,30 @@ class YamuxStream(IMuxedStream):
|
||||
return data
|
||||
|
||||
async def close(self) -> None:
|
||||
async with self.close_lock:
|
||||
if not self.send_closed:
|
||||
logger.debug(f"Half-closing stream {self.stream_id} (local end)")
|
||||
header = struct.pack(
|
||||
YAMUX_HEADER_FORMAT, 0, TYPE_DATA, FLAG_FIN, self.stream_id, 0
|
||||
)
|
||||
await self.conn.secured_conn.write(header)
|
||||
self.send_closed = True
|
||||
if not self.send_closed:
|
||||
logger.debug(f"Half-closing stream {self.stream_id} (local end)")
|
||||
header = struct.pack(
|
||||
YAMUX_HEADER_FORMAT, 0, TYPE_DATA, FLAG_FIN, self.stream_id, 0
|
||||
)
|
||||
await self.conn.secured_conn.write(header)
|
||||
self.send_closed = True
|
||||
|
||||
# Only set fully closed if both directions are closed
|
||||
if self.send_closed and self.recv_closed:
|
||||
self.closed = True
|
||||
else:
|
||||
# Stream is half-closed but not fully closed
|
||||
self.closed = False
|
||||
# Only set fully closed if both directions are closed
|
||||
if self.send_closed and self.recv_closed:
|
||||
self.closed = True
|
||||
else:
|
||||
# Stream is half-closed but not fully closed
|
||||
self.closed = False
|
||||
|
||||
async def reset(self) -> None:
|
||||
if not self.closed:
|
||||
async with self.close_lock:
|
||||
logger.debug(f"Resetting stream {self.stream_id}")
|
||||
header = struct.pack(
|
||||
YAMUX_HEADER_FORMAT, 0, TYPE_DATA, FLAG_RST, self.stream_id, 0
|
||||
)
|
||||
await self.conn.secured_conn.write(header)
|
||||
self.closed = True
|
||||
self.reset_received = True # Mark as reset
|
||||
logger.debug(f"Resetting stream {self.stream_id}")
|
||||
header = struct.pack(
|
||||
YAMUX_HEADER_FORMAT, 0, TYPE_DATA, FLAG_RST, self.stream_id, 0
|
||||
)
|
||||
await self.conn.secured_conn.write(header)
|
||||
self.closed = True
|
||||
self.reset_received = True # Mark as reset
|
||||
|
||||
def set_deadline(self, ttl: int) -> bool:
|
||||
"""
|
||||
|
||||
@ -1,57 +0,0 @@
|
||||
from typing import Any
|
||||
|
||||
from .tcp.tcp import TCP
|
||||
from .websocket.transport import WebsocketTransport
|
||||
from .transport_registry import (
|
||||
TransportRegistry,
|
||||
create_transport_for_multiaddr,
|
||||
get_transport_registry,
|
||||
register_transport,
|
||||
get_supported_transport_protocols,
|
||||
)
|
||||
from .upgrader import TransportUpgrader
|
||||
from libp2p.abc import ITransport
|
||||
|
||||
def create_transport(protocol: str, upgrader: TransportUpgrader | None = None, **kwargs: Any) -> ITransport:
|
||||
"""
|
||||
Convenience function to create a transport instance.
|
||||
|
||||
:param protocol: The transport protocol ("tcp", "ws", "wss", or custom)
|
||||
:param upgrader: Optional transport upgrader (required for WebSocket)
|
||||
:param kwargs: Additional arguments for transport construction (e.g., tls_client_config, tls_server_config)
|
||||
:return: Transport instance
|
||||
"""
|
||||
# First check if it's a built-in protocol
|
||||
if protocol in ["ws", "wss"]:
|
||||
if upgrader is None:
|
||||
raise ValueError(f"WebSocket transport requires an upgrader")
|
||||
return WebsocketTransport(
|
||||
upgrader,
|
||||
tls_client_config=kwargs.get("tls_client_config"),
|
||||
tls_server_config=kwargs.get("tls_server_config"),
|
||||
handshake_timeout=kwargs.get("handshake_timeout", 15.0)
|
||||
)
|
||||
elif protocol == "tcp":
|
||||
return TCP()
|
||||
else:
|
||||
# Check if it's a custom registered transport
|
||||
registry = get_transport_registry()
|
||||
transport_class = registry.get_transport(protocol)
|
||||
if transport_class:
|
||||
transport = registry.create_transport(protocol, upgrader, **kwargs)
|
||||
if transport is None:
|
||||
raise ValueError(f"Failed to create transport for protocol: {protocol}")
|
||||
return transport
|
||||
else:
|
||||
raise ValueError(f"Unsupported transport protocol: {protocol}")
|
||||
|
||||
__all__ = [
|
||||
"TCP",
|
||||
"WebsocketTransport",
|
||||
"TransportRegistry",
|
||||
"create_transport_for_multiaddr",
|
||||
"create_transport",
|
||||
"get_transport_registry",
|
||||
"register_transport",
|
||||
"get_supported_transport_protocols",
|
||||
]
|
||||
|
||||
@ -1,345 +0,0 @@
|
||||
"""
|
||||
Configuration classes for QUIC transport.
|
||||
"""
|
||||
|
||||
from dataclasses import (
|
||||
dataclass,
|
||||
field,
|
||||
)
|
||||
import ssl
|
||||
from typing import Any, Literal, TypedDict
|
||||
|
||||
from libp2p.custom_types import TProtocol
|
||||
from libp2p.network.config import ConnectionConfig
|
||||
|
||||
|
||||
class QUICTransportKwargs(TypedDict, total=False):
|
||||
"""Type definition for kwargs accepted by new_transport function."""
|
||||
|
||||
# Connection settings
|
||||
idle_timeout: float
|
||||
max_datagram_size: int
|
||||
local_port: int | None
|
||||
|
||||
# Protocol version support
|
||||
enable_draft29: bool
|
||||
enable_v1: bool
|
||||
|
||||
# TLS settings
|
||||
verify_mode: ssl.VerifyMode
|
||||
alpn_protocols: list[str]
|
||||
|
||||
# Performance settings
|
||||
max_concurrent_streams: int
|
||||
connection_window: int
|
||||
stream_window: int
|
||||
|
||||
# Logging and debugging
|
||||
enable_qlog: bool
|
||||
qlog_dir: str | None
|
||||
|
||||
# Connection management
|
||||
max_connections: int
|
||||
connection_timeout: float
|
||||
|
||||
# Protocol identifiers
|
||||
PROTOCOL_QUIC_V1: TProtocol
|
||||
PROTOCOL_QUIC_DRAFT29: TProtocol
|
||||
|
||||
|
||||
@dataclass
|
||||
class QUICTransportConfig(ConnectionConfig):
|
||||
"""Configuration for QUIC transport."""
|
||||
|
||||
# Connection settings
|
||||
idle_timeout: float = 30.0 # Seconds before an idle connection is closed.
|
||||
max_datagram_size: int = (
|
||||
1200 # Maximum size of UDP datagrams to avoid IP fragmentation.
|
||||
)
|
||||
local_port: int | None = (
|
||||
None # Local port to bind to. If None, a random port is chosen.
|
||||
)
|
||||
|
||||
# Protocol version support
|
||||
enable_draft29: bool = True # Enable QUIC draft-29 for compatibility
|
||||
enable_v1: bool = True # Enable QUIC v1 (RFC 9000)
|
||||
|
||||
# TLS settings
|
||||
verify_mode: ssl.VerifyMode = ssl.CERT_NONE
|
||||
alpn_protocols: list[str] = field(default_factory=lambda: ["libp2p"])
|
||||
|
||||
# Performance settings
|
||||
max_concurrent_streams: int = 100 # Maximum concurrent streams per connection
|
||||
connection_window: int = 1024 * 1024 # Connection flow control window
|
||||
stream_window: int = 64 * 1024 # Stream flow control window
|
||||
|
||||
# Logging and debugging
|
||||
enable_qlog: bool = False # Enable QUIC logging
|
||||
qlog_dir: str | None = None # Directory for QUIC logs
|
||||
|
||||
# Connection management
|
||||
max_connections: int = 1000 # Maximum number of connections
|
||||
connection_timeout: float = 10.0 # Connection establishment timeout
|
||||
|
||||
MAX_CONCURRENT_STREAMS: int = 1000
|
||||
"""Maximum number of concurrent streams per connection."""
|
||||
|
||||
MAX_INCOMING_STREAMS: int = 1000
|
||||
"""Maximum number of incoming streams per connection."""
|
||||
|
||||
CONNECTION_HANDSHAKE_TIMEOUT: float = 60.0
|
||||
"""Timeout for connection handshake (seconds)."""
|
||||
|
||||
MAX_OUTGOING_STREAMS: int = 1000
|
||||
"""Maximum number of outgoing streams per connection."""
|
||||
|
||||
CONNECTION_CLOSE_TIMEOUT: int = 10
|
||||
"""Timeout for opening new connection (seconds)."""
|
||||
|
||||
# Stream timeouts
|
||||
STREAM_OPEN_TIMEOUT: float = 5.0
|
||||
"""Timeout for opening new streams (seconds)."""
|
||||
|
||||
STREAM_ACCEPT_TIMEOUT: float = 30.0
|
||||
"""Timeout for accepting incoming streams (seconds)."""
|
||||
|
||||
STREAM_READ_TIMEOUT: float = 30.0
|
||||
"""Default timeout for stream read operations (seconds)."""
|
||||
|
||||
STREAM_WRITE_TIMEOUT: float = 30.0
|
||||
"""Default timeout for stream write operations (seconds)."""
|
||||
|
||||
STREAM_CLOSE_TIMEOUT: float = 10.0
|
||||
"""Timeout for graceful stream close (seconds)."""
|
||||
|
||||
# Flow control configuration
|
||||
STREAM_FLOW_CONTROL_WINDOW: int = 1024 * 1024 # 1MB
|
||||
"""Per-stream flow control window size."""
|
||||
|
||||
CONNECTION_FLOW_CONTROL_WINDOW: int = 1536 * 1024 # 1.5MB
|
||||
"""Connection-wide flow control window size."""
|
||||
|
||||
# Buffer management
|
||||
MAX_STREAM_RECEIVE_BUFFER: int = 2 * 1024 * 1024 # 2MB
|
||||
"""Maximum receive buffer size per stream."""
|
||||
|
||||
STREAM_RECEIVE_BUFFER_LOW_WATERMARK: int = 64 * 1024 # 64KB
|
||||
"""Low watermark for stream receive buffer."""
|
||||
|
||||
STREAM_RECEIVE_BUFFER_HIGH_WATERMARK: int = 512 * 1024 # 512KB
|
||||
"""High watermark for stream receive buffer."""
|
||||
|
||||
# Stream lifecycle configuration
|
||||
ENABLE_STREAM_RESET_ON_ERROR: bool = True
|
||||
"""Whether to automatically reset streams on errors."""
|
||||
|
||||
STREAM_RESET_ERROR_CODE: int = 1
|
||||
"""Default error code for stream resets."""
|
||||
|
||||
ENABLE_STREAM_KEEP_ALIVE: bool = False
|
||||
"""Whether to enable stream keep-alive mechanisms."""
|
||||
|
||||
STREAM_KEEP_ALIVE_INTERVAL: float = 30.0
|
||||
"""Interval for stream keep-alive pings (seconds)."""
|
||||
|
||||
# Resource management
|
||||
ENABLE_STREAM_RESOURCE_TRACKING: bool = True
|
||||
"""Whether to track stream resource usage."""
|
||||
|
||||
STREAM_MEMORY_LIMIT_PER_STREAM: int = 2 * 1024 * 1024 # 2MB
|
||||
"""Memory limit per individual stream."""
|
||||
|
||||
STREAM_MEMORY_LIMIT_PER_CONNECTION: int = 100 * 1024 * 1024 # 100MB
|
||||
"""Total memory limit for all streams per connection."""
|
||||
|
||||
# Concurrency and performance
|
||||
ENABLE_STREAM_BATCHING: bool = True
|
||||
"""Whether to batch multiple stream operations."""
|
||||
|
||||
STREAM_BATCH_SIZE: int = 10
|
||||
"""Number of streams to process in a batch."""
|
||||
|
||||
STREAM_PROCESSING_CONCURRENCY: int = 100
|
||||
"""Maximum concurrent stream processing tasks."""
|
||||
|
||||
# Debugging and monitoring
|
||||
ENABLE_STREAM_METRICS: bool = True
|
||||
"""Whether to collect stream metrics."""
|
||||
|
||||
ENABLE_STREAM_TIMELINE_TRACKING: bool = True
|
||||
"""Whether to track stream lifecycle timelines."""
|
||||
|
||||
STREAM_METRICS_COLLECTION_INTERVAL: float = 60.0
|
||||
"""Interval for collecting stream metrics (seconds)."""
|
||||
|
||||
# Error handling configuration
|
||||
STREAM_ERROR_RETRY_ATTEMPTS: int = 3
|
||||
"""Number of retry attempts for recoverable stream errors."""
|
||||
|
||||
STREAM_ERROR_RETRY_DELAY: float = 1.0
|
||||
"""Initial delay between stream error retries (seconds)."""
|
||||
|
||||
STREAM_ERROR_RETRY_BACKOFF_FACTOR: float = 2.0
|
||||
"""Backoff factor for stream error retries."""
|
||||
|
||||
# Protocol identifiers matching go-libp2p
|
||||
PROTOCOL_QUIC_V1: TProtocol = TProtocol("quic-v1") # RFC 9000
|
||||
PROTOCOL_QUIC_DRAFT29: TProtocol = TProtocol("quic") # draft-29
|
||||
|
||||
def __post_init__(self) -> None:
|
||||
"""Validate configuration after initialization."""
|
||||
if not (self.enable_draft29 or self.enable_v1):
|
||||
raise ValueError("At least one QUIC version must be enabled")
|
||||
|
||||
if self.idle_timeout <= 0:
|
||||
raise ValueError("Idle timeout must be positive")
|
||||
|
||||
if self.max_datagram_size < 1200:
|
||||
raise ValueError("Max datagram size must be at least 1200 bytes")
|
||||
|
||||
# Validate timeouts
|
||||
timeout_fields = [
|
||||
"STREAM_OPEN_TIMEOUT",
|
||||
"STREAM_ACCEPT_TIMEOUT",
|
||||
"STREAM_READ_TIMEOUT",
|
||||
"STREAM_WRITE_TIMEOUT",
|
||||
"STREAM_CLOSE_TIMEOUT",
|
||||
]
|
||||
for timeout_field in timeout_fields:
|
||||
if getattr(self, timeout_field) <= 0:
|
||||
raise ValueError(f"{timeout_field} must be positive")
|
||||
|
||||
# Validate flow control windows
|
||||
if self.STREAM_FLOW_CONTROL_WINDOW <= 0:
|
||||
raise ValueError("STREAM_FLOW_CONTROL_WINDOW must be positive")
|
||||
|
||||
if self.CONNECTION_FLOW_CONTROL_WINDOW < self.STREAM_FLOW_CONTROL_WINDOW:
|
||||
raise ValueError(
|
||||
"CONNECTION_FLOW_CONTROL_WINDOW must be >= STREAM_FLOW_CONTROL_WINDOW"
|
||||
)
|
||||
|
||||
# Validate buffer sizes
|
||||
if self.MAX_STREAM_RECEIVE_BUFFER <= 0:
|
||||
raise ValueError("MAX_STREAM_RECEIVE_BUFFER must be positive")
|
||||
|
||||
if self.STREAM_RECEIVE_BUFFER_HIGH_WATERMARK > self.MAX_STREAM_RECEIVE_BUFFER:
|
||||
raise ValueError(
|
||||
"STREAM_RECEIVE_BUFFER_HIGH_WATERMARK cannot".__add__(
|
||||
"exceed MAX_STREAM_RECEIVE_BUFFER"
|
||||
)
|
||||
)
|
||||
|
||||
if (
|
||||
self.STREAM_RECEIVE_BUFFER_LOW_WATERMARK
|
||||
>= self.STREAM_RECEIVE_BUFFER_HIGH_WATERMARK
|
||||
):
|
||||
raise ValueError(
|
||||
"STREAM_RECEIVE_BUFFER_LOW_WATERMARK must be < HIGH_WATERMARK"
|
||||
)
|
||||
|
||||
# Validate memory limits
|
||||
if self.STREAM_MEMORY_LIMIT_PER_STREAM <= 0:
|
||||
raise ValueError("STREAM_MEMORY_LIMIT_PER_STREAM must be positive")
|
||||
|
||||
if self.STREAM_MEMORY_LIMIT_PER_CONNECTION <= 0:
|
||||
raise ValueError("STREAM_MEMORY_LIMIT_PER_CONNECTION must be positive")
|
||||
|
||||
expected_stream_memory = (
|
||||
self.MAX_CONCURRENT_STREAMS * self.STREAM_MEMORY_LIMIT_PER_STREAM
|
||||
)
|
||||
if expected_stream_memory > self.STREAM_MEMORY_LIMIT_PER_CONNECTION * 2:
|
||||
# Allow some headroom, but warn if configuration seems inconsistent
|
||||
import logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
logger.warning(
|
||||
"Stream memory configuration may be inconsistent: "
|
||||
f"{self.MAX_CONCURRENT_STREAMS} streams ×"
|
||||
"{self.STREAM_MEMORY_LIMIT_PER_STREAM} bytes "
|
||||
"could exceed connection limit of"
|
||||
f"{self.STREAM_MEMORY_LIMIT_PER_CONNECTION} bytes"
|
||||
)
|
||||
|
||||
def get_stream_config_dict(self) -> dict[str, Any]:
|
||||
"""Get stream-specific configuration as dictionary."""
|
||||
stream_config = {}
|
||||
for attr_name in dir(self):
|
||||
if attr_name.startswith(
|
||||
("STREAM_", "MAX_", "ENABLE_STREAM", "CONNECTION_FLOW")
|
||||
):
|
||||
stream_config[attr_name.lower()] = getattr(self, attr_name)
|
||||
return stream_config
|
||||
|
||||
|
||||
# Additional configuration classes for specific stream features
|
||||
|
||||
|
||||
class QUICStreamFlowControlConfig:
|
||||
"""Configuration for QUIC stream flow control."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
initial_window_size: int = 512 * 1024,
|
||||
max_window_size: int = 2 * 1024 * 1024,
|
||||
window_update_threshold: float = 0.5,
|
||||
enable_auto_tuning: bool = True,
|
||||
):
|
||||
self.initial_window_size = initial_window_size
|
||||
self.max_window_size = max_window_size
|
||||
self.window_update_threshold = window_update_threshold
|
||||
self.enable_auto_tuning = enable_auto_tuning
|
||||
|
||||
|
||||
def create_stream_config_for_use_case(
|
||||
use_case: Literal[
|
||||
"high_throughput", "low_latency", "many_streams", "memory_constrained"
|
||||
],
|
||||
) -> QUICTransportConfig:
|
||||
"""
|
||||
Create optimized stream configuration for specific use cases.
|
||||
|
||||
Args:
|
||||
use_case: One of "high_throughput", "low_latency", "many_streams","
|
||||
"memory_constrained"
|
||||
|
||||
Returns:
|
||||
Optimized QUICTransportConfig
|
||||
|
||||
"""
|
||||
base_config = QUICTransportConfig()
|
||||
|
||||
if use_case == "high_throughput":
|
||||
# Optimize for high throughput
|
||||
base_config.STREAM_FLOW_CONTROL_WINDOW = 2 * 1024 * 1024 # 2MB
|
||||
base_config.CONNECTION_FLOW_CONTROL_WINDOW = 10 * 1024 * 1024 # 10MB
|
||||
base_config.MAX_STREAM_RECEIVE_BUFFER = 4 * 1024 * 1024 # 4MB
|
||||
base_config.STREAM_PROCESSING_CONCURRENCY = 200
|
||||
|
||||
elif use_case == "low_latency":
|
||||
# Optimize for low latency
|
||||
base_config.STREAM_OPEN_TIMEOUT = 1.0
|
||||
base_config.STREAM_READ_TIMEOUT = 5.0
|
||||
base_config.STREAM_WRITE_TIMEOUT = 5.0
|
||||
base_config.ENABLE_STREAM_BATCHING = False
|
||||
base_config.STREAM_BATCH_SIZE = 1
|
||||
|
||||
elif use_case == "many_streams":
|
||||
# Optimize for many concurrent streams
|
||||
base_config.MAX_CONCURRENT_STREAMS = 5000
|
||||
base_config.STREAM_FLOW_CONTROL_WINDOW = 128 * 1024 # 128KB
|
||||
base_config.MAX_STREAM_RECEIVE_BUFFER = 256 * 1024 # 256KB
|
||||
base_config.STREAM_PROCESSING_CONCURRENCY = 500
|
||||
|
||||
elif use_case == "memory_constrained":
|
||||
# Optimize for low memory usage
|
||||
base_config.MAX_CONCURRENT_STREAMS = 100
|
||||
base_config.STREAM_FLOW_CONTROL_WINDOW = 64 * 1024 # 64KB
|
||||
base_config.CONNECTION_FLOW_CONTROL_WINDOW = 256 * 1024 # 256KB
|
||||
base_config.MAX_STREAM_RECEIVE_BUFFER = 128 * 1024 # 128KB
|
||||
base_config.STREAM_MEMORY_LIMIT_PER_STREAM = 512 * 1024 # 512KB
|
||||
base_config.STREAM_PROCESSING_CONCURRENCY = 50
|
||||
|
||||
else:
|
||||
raise ValueError(f"Unknown use case: {use_case}")
|
||||
|
||||
return base_config
|
||||
File diff suppressed because it is too large
Load Diff
@ -1,391 +0,0 @@
|
||||
"""
|
||||
QUIC Transport exceptions
|
||||
"""
|
||||
|
||||
from typing import Any, Literal
|
||||
|
||||
|
||||
class QUICError(Exception):
|
||||
"""Base exception for all QUIC transport errors."""
|
||||
|
||||
def __init__(self, message: str, error_code: int | None = None):
|
||||
super().__init__(message)
|
||||
self.error_code = error_code
|
||||
|
||||
|
||||
# Transport-level exceptions
|
||||
|
||||
|
||||
class QUICTransportError(QUICError):
|
||||
"""Base exception for QUIC transport operations."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class QUICDialError(QUICTransportError):
|
||||
"""Error occurred during QUIC connection establishment."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class QUICListenError(QUICTransportError):
|
||||
"""Error occurred during QUIC listener operations."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class QUICSecurityError(QUICTransportError):
|
||||
"""Error related to QUIC security/TLS operations."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
# Connection-level exceptions
|
||||
|
||||
|
||||
class QUICConnectionError(QUICError):
|
||||
"""Base exception for QUIC connection operations."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class QUICConnectionClosedError(QUICConnectionError):
|
||||
"""QUIC connection has been closed."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class QUICConnectionTimeoutError(QUICConnectionError):
|
||||
"""QUIC connection operation timed out."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class QUICHandshakeError(QUICConnectionError):
|
||||
"""Error during QUIC handshake process."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class QUICPeerVerificationError(QUICConnectionError):
|
||||
"""Error verifying peer identity during handshake."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
# Stream-level exceptions
|
||||
|
||||
|
||||
class QUICStreamError(QUICError):
|
||||
"""Base exception for QUIC stream operations."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
message: str,
|
||||
stream_id: str | None = None,
|
||||
error_code: int | None = None,
|
||||
):
|
||||
super().__init__(message, error_code)
|
||||
self.stream_id = stream_id
|
||||
|
||||
|
||||
class QUICStreamClosedError(QUICStreamError):
|
||||
"""Stream is closed and cannot be used for I/O operations."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class QUICStreamResetError(QUICStreamError):
|
||||
"""Stream was reset by local or remote peer."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
message: str,
|
||||
stream_id: str | None = None,
|
||||
error_code: int | None = None,
|
||||
reset_by_peer: bool = False,
|
||||
):
|
||||
super().__init__(message, stream_id, error_code)
|
||||
self.reset_by_peer = reset_by_peer
|
||||
|
||||
|
||||
class QUICStreamTimeoutError(QUICStreamError):
|
||||
"""Stream operation timed out."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class QUICStreamBackpressureError(QUICStreamError):
|
||||
"""Stream write blocked due to flow control."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class QUICStreamLimitError(QUICStreamError):
|
||||
"""Stream limit reached (too many concurrent streams)."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class QUICStreamStateError(QUICStreamError):
|
||||
"""Invalid operation for current stream state."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
message: str,
|
||||
stream_id: str | None = None,
|
||||
current_state: str | None = None,
|
||||
attempted_operation: str | None = None,
|
||||
):
|
||||
super().__init__(message, stream_id)
|
||||
self.current_state = current_state
|
||||
self.attempted_operation = attempted_operation
|
||||
|
||||
|
||||
# Flow control exceptions
|
||||
|
||||
|
||||
class QUICFlowControlError(QUICError):
|
||||
"""Base exception for flow control related errors."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class QUICFlowControlViolationError(QUICFlowControlError):
|
||||
"""Flow control limits were violated."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class QUICFlowControlDeadlockError(QUICFlowControlError):
|
||||
"""Flow control deadlock detected."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
# Resource management exceptions
|
||||
|
||||
|
||||
class QUICResourceError(QUICError):
|
||||
"""Base exception for resource management errors."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class QUICMemoryLimitError(QUICResourceError):
|
||||
"""Memory limit exceeded."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class QUICConnectionLimitError(QUICResourceError):
|
||||
"""Connection limit exceeded."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
# Multiaddr and addressing exceptions
|
||||
|
||||
|
||||
class QUICAddressError(QUICError):
|
||||
"""Base exception for QUIC addressing errors."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class QUICInvalidMultiaddrError(QUICAddressError):
|
||||
"""Invalid multiaddr format for QUIC transport."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class QUICAddressResolutionError(QUICAddressError):
|
||||
"""Failed to resolve QUIC address."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class QUICProtocolError(QUICError):
|
||||
"""Base exception for QUIC protocol errors."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class QUICVersionNegotiationError(QUICProtocolError):
|
||||
"""QUIC version negotiation failed."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class QUICUnsupportedVersionError(QUICProtocolError):
|
||||
"""Unsupported QUIC version."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
# Configuration exceptions
|
||||
|
||||
|
||||
class QUICConfigurationError(QUICError):
|
||||
"""Base exception for QUIC configuration errors."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class QUICInvalidConfigError(QUICConfigurationError):
|
||||
"""Invalid QUIC configuration parameters."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class QUICCertificateError(QUICConfigurationError):
|
||||
"""Error with TLS certificate configuration."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
def map_quic_error_code(error_code: int) -> str:
|
||||
"""
|
||||
Map QUIC error codes to human-readable descriptions.
|
||||
Based on RFC 9000 Transport Error Codes.
|
||||
"""
|
||||
error_codes = {
|
||||
0x00: "NO_ERROR",
|
||||
0x01: "INTERNAL_ERROR",
|
||||
0x02: "CONNECTION_REFUSED",
|
||||
0x03: "FLOW_CONTROL_ERROR",
|
||||
0x04: "STREAM_LIMIT_ERROR",
|
||||
0x05: "STREAM_STATE_ERROR",
|
||||
0x06: "FINAL_SIZE_ERROR",
|
||||
0x07: "FRAME_ENCODING_ERROR",
|
||||
0x08: "TRANSPORT_PARAMETER_ERROR",
|
||||
0x09: "CONNECTION_ID_LIMIT_ERROR",
|
||||
0x0A: "PROTOCOL_VIOLATION",
|
||||
0x0B: "INVALID_TOKEN",
|
||||
0x0C: "APPLICATION_ERROR",
|
||||
0x0D: "CRYPTO_BUFFER_EXCEEDED",
|
||||
0x0E: "KEY_UPDATE_ERROR",
|
||||
0x0F: "AEAD_LIMIT_REACHED",
|
||||
0x10: "NO_VIABLE_PATH",
|
||||
}
|
||||
|
||||
return error_codes.get(error_code, f"UNKNOWN_ERROR_{error_code:02X}")
|
||||
|
||||
|
||||
def create_stream_error(
|
||||
error_type: str,
|
||||
message: str,
|
||||
stream_id: str | None = None,
|
||||
error_code: int | None = None,
|
||||
) -> QUICStreamError:
|
||||
"""
|
||||
Factory function to create appropriate stream error based on type.
|
||||
|
||||
Args:
|
||||
error_type: Type of error ("closed", "reset", "timeout", "backpressure", etc.)
|
||||
message: Error message
|
||||
stream_id: Stream identifier
|
||||
error_code: QUIC error code
|
||||
|
||||
Returns:
|
||||
Appropriate QUICStreamError subclass
|
||||
|
||||
"""
|
||||
error_type = error_type.lower()
|
||||
|
||||
if error_type in ("closed", "close"):
|
||||
return QUICStreamClosedError(message, stream_id, error_code)
|
||||
elif error_type == "reset":
|
||||
return QUICStreamResetError(message, stream_id, error_code)
|
||||
elif error_type == "timeout":
|
||||
return QUICStreamTimeoutError(message, stream_id, error_code)
|
||||
elif error_type in ("backpressure", "flow_control"):
|
||||
return QUICStreamBackpressureError(message, stream_id, error_code)
|
||||
elif error_type in ("limit", "stream_limit"):
|
||||
return QUICStreamLimitError(message, stream_id, error_code)
|
||||
elif error_type == "state":
|
||||
return QUICStreamStateError(message, stream_id)
|
||||
else:
|
||||
return QUICStreamError(message, stream_id, error_code)
|
||||
|
||||
|
||||
def create_connection_error(
|
||||
error_type: str, message: str, error_code: int | None = None
|
||||
) -> QUICConnectionError:
|
||||
"""
|
||||
Factory function to create appropriate connection error based on type.
|
||||
|
||||
Args:
|
||||
error_type: Type of error ("closed", "timeout", "handshake", etc.)
|
||||
message: Error message
|
||||
error_code: QUIC error code
|
||||
|
||||
Returns:
|
||||
Appropriate QUICConnectionError subclass
|
||||
|
||||
"""
|
||||
error_type = error_type.lower()
|
||||
|
||||
if error_type in ("closed", "close"):
|
||||
return QUICConnectionClosedError(message, error_code)
|
||||
elif error_type == "timeout":
|
||||
return QUICConnectionTimeoutError(message, error_code)
|
||||
elif error_type == "handshake":
|
||||
return QUICHandshakeError(message, error_code)
|
||||
elif error_type in ("peer_verification", "verification"):
|
||||
return QUICPeerVerificationError(message, error_code)
|
||||
else:
|
||||
return QUICConnectionError(message, error_code)
|
||||
|
||||
|
||||
class QUICErrorContext:
|
||||
"""
|
||||
Context manager for handling QUIC errors with automatic error mapping.
|
||||
Useful for converting low-level aioquic errors to py-libp2p QUIC errors.
|
||||
"""
|
||||
|
||||
def __init__(self, operation: str, component: str = "quic") -> None:
|
||||
self.operation = operation
|
||||
self.component = component
|
||||
|
||||
def __enter__(self) -> "QUICErrorContext":
|
||||
return self
|
||||
|
||||
# TODO: Fix types for exc_type
|
||||
def __exit__(
|
||||
self,
|
||||
exc_type: type[BaseException] | None | None,
|
||||
exc_val: BaseException | None,
|
||||
exc_tb: Any,
|
||||
) -> Literal[False]:
|
||||
if exc_type is None:
|
||||
return False
|
||||
|
||||
if exc_val is None:
|
||||
return False
|
||||
|
||||
# Map common aioquic exceptions to our exceptions
|
||||
if "ConnectionClosed" in str(exc_type):
|
||||
raise QUICConnectionClosedError(
|
||||
f"Connection closed during {self.operation}: {exc_val}"
|
||||
) from exc_val
|
||||
elif "StreamReset" in str(exc_type):
|
||||
raise QUICStreamResetError(
|
||||
f"Stream reset during {self.operation}: {exc_val}"
|
||||
) from exc_val
|
||||
elif "timeout" in str(exc_val).lower():
|
||||
if "stream" in self.component.lower():
|
||||
raise QUICStreamTimeoutError(
|
||||
f"Timeout during {self.operation}: {exc_val}"
|
||||
) from exc_val
|
||||
else:
|
||||
raise QUICConnectionTimeoutError(
|
||||
f"Timeout during {self.operation}: {exc_val}"
|
||||
) from exc_val
|
||||
elif "flow control" in str(exc_val).lower():
|
||||
raise QUICStreamBackpressureError(
|
||||
f"Flow control error during {self.operation}: {exc_val}"
|
||||
) from exc_val
|
||||
|
||||
# Let other exceptions propagate
|
||||
return False
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -1,656 +0,0 @@
|
||||
"""
|
||||
QUIC Stream implementation
|
||||
Provides stream interface over QUIC's native multiplexing.
|
||||
"""
|
||||
|
||||
from enum import Enum
|
||||
import logging
|
||||
import time
|
||||
from types import TracebackType
|
||||
from typing import TYPE_CHECKING, Any, cast
|
||||
|
||||
import trio
|
||||
|
||||
from .exceptions import (
|
||||
QUICStreamBackpressureError,
|
||||
QUICStreamClosedError,
|
||||
QUICStreamResetError,
|
||||
QUICStreamTimeoutError,
|
||||
)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from libp2p.abc import IMuxedStream
|
||||
from libp2p.custom_types import TProtocol
|
||||
|
||||
from .connection import QUICConnection
|
||||
else:
|
||||
IMuxedStream = cast(type, object)
|
||||
TProtocol = cast(type, object)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class StreamState(Enum):
|
||||
"""Stream lifecycle states following libp2p patterns."""
|
||||
|
||||
OPEN = "open"
|
||||
WRITE_CLOSED = "write_closed"
|
||||
READ_CLOSED = "read_closed"
|
||||
CLOSED = "closed"
|
||||
RESET = "reset"
|
||||
|
||||
|
||||
class StreamDirection(Enum):
|
||||
"""Stream direction for tracking initiator."""
|
||||
|
||||
INBOUND = "inbound"
|
||||
OUTBOUND = "outbound"
|
||||
|
||||
|
||||
class StreamTimeline:
|
||||
"""Track stream lifecycle events for debugging and monitoring."""
|
||||
|
||||
def __init__(self) -> None:
|
||||
self.created_at = time.time()
|
||||
self.opened_at: float | None = None
|
||||
self.first_data_at: float | None = None
|
||||
self.closed_at: float | None = None
|
||||
self.reset_at: float | None = None
|
||||
self.error_code: int | None = None
|
||||
|
||||
def record_open(self) -> None:
|
||||
self.opened_at = time.time()
|
||||
|
||||
def record_first_data(self) -> None:
|
||||
if self.first_data_at is None:
|
||||
self.first_data_at = time.time()
|
||||
|
||||
def record_close(self) -> None:
|
||||
self.closed_at = time.time()
|
||||
|
||||
def record_reset(self, error_code: int) -> None:
|
||||
self.reset_at = time.time()
|
||||
self.error_code = error_code
|
||||
|
||||
|
||||
class QUICStream(IMuxedStream):
|
||||
"""
|
||||
QUIC Stream implementation following libp2p IMuxedStream interface.
|
||||
|
||||
Based on patterns from go-libp2p and js-libp2p, this implementation:
|
||||
- Leverages QUIC's native multiplexing and flow control
|
||||
- Integrates with libp2p resource management
|
||||
- Provides comprehensive error handling with QUIC-specific codes
|
||||
- Supports bidirectional communication with independent close semantics
|
||||
- Implements proper stream lifecycle management
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
connection: "QUICConnection",
|
||||
stream_id: int,
|
||||
direction: StreamDirection,
|
||||
remote_addr: tuple[str, int],
|
||||
resource_scope: Any | None = None,
|
||||
):
|
||||
"""
|
||||
Initialize QUIC stream.
|
||||
|
||||
Args:
|
||||
connection: Parent QUIC connection
|
||||
stream_id: QUIC stream identifier
|
||||
direction: Stream direction (inbound/outbound)
|
||||
resource_scope: Resource manager scope for memory accounting
|
||||
remote_addr: Remote addr stream is connected to
|
||||
|
||||
"""
|
||||
self._connection = connection
|
||||
self._stream_id = stream_id
|
||||
self._direction = direction
|
||||
self._resource_scope = resource_scope
|
||||
|
||||
# libp2p interface compliance
|
||||
self._protocol: TProtocol | None = None
|
||||
self._metadata: dict[str, Any] = {}
|
||||
self._remote_addr = remote_addr
|
||||
|
||||
# Stream state management
|
||||
self._state = StreamState.OPEN
|
||||
self._state_lock = trio.Lock()
|
||||
|
||||
# Flow control and buffering
|
||||
self._receive_buffer = bytearray()
|
||||
self._receive_buffer_lock = trio.Lock()
|
||||
self._receive_event = trio.Event()
|
||||
self._backpressure_event = trio.Event()
|
||||
self._backpressure_event.set() # Initially no backpressure
|
||||
|
||||
# Close/reset state
|
||||
self._write_closed = False
|
||||
self._read_closed = False
|
||||
self._close_event = trio.Event()
|
||||
self._reset_error_code: int | None = None
|
||||
|
||||
# Lifecycle tracking
|
||||
self._timeline = StreamTimeline()
|
||||
self._timeline.record_open()
|
||||
|
||||
# Resource accounting
|
||||
self._memory_reserved = 0
|
||||
|
||||
# Stream constant configurations
|
||||
self.READ_TIMEOUT = connection._transport._config.STREAM_READ_TIMEOUT
|
||||
self.WRITE_TIMEOUT = connection._transport._config.STREAM_WRITE_TIMEOUT
|
||||
self.FLOW_CONTROL_WINDOW_SIZE = (
|
||||
connection._transport._config.STREAM_FLOW_CONTROL_WINDOW
|
||||
)
|
||||
self.MAX_RECEIVE_BUFFER_SIZE = (
|
||||
connection._transport._config.MAX_STREAM_RECEIVE_BUFFER
|
||||
)
|
||||
|
||||
if self._resource_scope:
|
||||
self._reserve_memory(self.FLOW_CONTROL_WINDOW_SIZE)
|
||||
|
||||
logger.debug(
|
||||
f"Created QUIC stream {stream_id} "
|
||||
f"({direction.value}, connection: {connection.remote_peer_id()})"
|
||||
)
|
||||
|
||||
# Properties for libp2p interface compliance
|
||||
|
||||
@property
|
||||
def protocol(self) -> TProtocol | None:
|
||||
"""Get the protocol identifier for this stream."""
|
||||
return self._protocol
|
||||
|
||||
@protocol.setter
|
||||
def protocol(self, protocol_id: TProtocol) -> None:
|
||||
"""Set the protocol identifier for this stream."""
|
||||
self._protocol = protocol_id
|
||||
self._metadata["protocol"] = protocol_id
|
||||
logger.debug(f"Stream {self.stream_id} protocol set to: {protocol_id}")
|
||||
|
||||
@property
|
||||
def stream_id(self) -> str:
|
||||
"""Get stream ID as string for libp2p compatibility."""
|
||||
return str(self._stream_id)
|
||||
|
||||
@property
|
||||
def muxed_conn(self) -> "QUICConnection": # type: ignore
|
||||
"""Get the parent muxed connection."""
|
||||
return self._connection
|
||||
|
||||
@property
|
||||
def state(self) -> StreamState:
|
||||
"""Get current stream state."""
|
||||
return self._state
|
||||
|
||||
@property
|
||||
def direction(self) -> StreamDirection:
|
||||
"""Get stream direction."""
|
||||
return self._direction
|
||||
|
||||
@property
|
||||
def is_initiator(self) -> bool:
|
||||
"""Check if this stream was locally initiated."""
|
||||
return self._direction == StreamDirection.OUTBOUND
|
||||
|
||||
# Core stream operations
|
||||
|
||||
async def read(self, n: int | None = None) -> bytes:
|
||||
"""
|
||||
Read data from the stream with QUIC flow control.
|
||||
|
||||
Args:
|
||||
n: Maximum number of bytes to read. If None or -1, read all available.
|
||||
|
||||
Returns:
|
||||
Data read from stream
|
||||
|
||||
Raises:
|
||||
QUICStreamClosedError: Stream is closed
|
||||
QUICStreamResetError: Stream was reset
|
||||
QUICStreamTimeoutError: Read timeout exceeded
|
||||
|
||||
"""
|
||||
if n is None:
|
||||
n = -1
|
||||
|
||||
async with self._state_lock:
|
||||
if self._state in (StreamState.CLOSED, StreamState.RESET):
|
||||
raise QUICStreamClosedError(f"Stream {self.stream_id} is closed")
|
||||
|
||||
if self._read_closed:
|
||||
# Return any remaining buffered data, then EOF
|
||||
async with self._receive_buffer_lock:
|
||||
if self._receive_buffer:
|
||||
data = self._extract_data_from_buffer(n)
|
||||
self._timeline.record_first_data()
|
||||
return data
|
||||
return b""
|
||||
|
||||
# Wait for data with timeout
|
||||
timeout = self.READ_TIMEOUT
|
||||
try:
|
||||
with trio.move_on_after(timeout) as cancel_scope:
|
||||
while True:
|
||||
async with self._receive_buffer_lock:
|
||||
if self._receive_buffer:
|
||||
data = self._extract_data_from_buffer(n)
|
||||
self._timeline.record_first_data()
|
||||
return data
|
||||
|
||||
# Check if stream was closed while waiting
|
||||
if self._read_closed:
|
||||
return b""
|
||||
|
||||
# Wait for more data
|
||||
await self._receive_event.wait()
|
||||
self._receive_event = trio.Event() # Reset for next wait
|
||||
|
||||
if cancel_scope.cancelled_caught:
|
||||
raise QUICStreamTimeoutError(f"Read timeout on stream {self.stream_id}")
|
||||
|
||||
return b""
|
||||
except QUICStreamResetError:
|
||||
# Stream was reset while reading
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f"Error reading from stream {self.stream_id}: {e}")
|
||||
await self._handle_stream_error(e)
|
||||
raise
|
||||
|
||||
async def write(self, data: bytes) -> None:
|
||||
"""
|
||||
Write data to the stream with QUIC flow control.
|
||||
|
||||
Args:
|
||||
data: Data to write
|
||||
|
||||
Raises:
|
||||
QUICStreamClosedError: Stream is closed for writing
|
||||
QUICStreamBackpressureError: Flow control window exhausted
|
||||
QUICStreamResetError: Stream was reset
|
||||
|
||||
"""
|
||||
if not data:
|
||||
return
|
||||
|
||||
async with self._state_lock:
|
||||
if self._state in (StreamState.CLOSED, StreamState.RESET):
|
||||
raise QUICStreamClosedError(f"Stream {self.stream_id} is closed")
|
||||
|
||||
if self._write_closed:
|
||||
raise QUICStreamClosedError(
|
||||
f"Stream {self.stream_id} write side is closed"
|
||||
)
|
||||
|
||||
try:
|
||||
# Handle flow control backpressure
|
||||
await self._backpressure_event.wait()
|
||||
|
||||
# Send data through QUIC connection
|
||||
self._connection._quic.send_stream_data(self._stream_id, data)
|
||||
await self._connection._transmit()
|
||||
|
||||
self._timeline.record_first_data()
|
||||
logger.debug(f"Wrote {len(data)} bytes to stream {self.stream_id}")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error writing to stream {self.stream_id}: {e}")
|
||||
# Convert QUIC-specific errors
|
||||
if "flow control" in str(e).lower():
|
||||
raise QUICStreamBackpressureError(f"Flow control limit reached: {e}")
|
||||
await self._handle_stream_error(e)
|
||||
raise
|
||||
|
||||
async def close(self) -> None:
|
||||
"""
|
||||
Close the stream gracefully (both read and write sides).
|
||||
|
||||
This implements proper close semantics where both sides
|
||||
are closed and resources are cleaned up.
|
||||
"""
|
||||
async with self._state_lock:
|
||||
if self._state in (StreamState.CLOSED, StreamState.RESET):
|
||||
return
|
||||
|
||||
logger.debug(f"Closing stream {self.stream_id}")
|
||||
|
||||
# Close both sides
|
||||
if not self._write_closed:
|
||||
await self.close_write()
|
||||
if not self._read_closed:
|
||||
await self.close_read()
|
||||
|
||||
# Update state and cleanup
|
||||
async with self._state_lock:
|
||||
self._state = StreamState.CLOSED
|
||||
|
||||
await self._cleanup_resources()
|
||||
self._timeline.record_close()
|
||||
self._close_event.set()
|
||||
|
||||
logger.debug(f"Stream {self.stream_id} closed")
|
||||
|
||||
async def close_write(self) -> None:
|
||||
"""Close the write side of the stream."""
|
||||
if self._write_closed:
|
||||
return
|
||||
|
||||
try:
|
||||
# Send FIN to close write side
|
||||
self._connection._quic.send_stream_data(
|
||||
self._stream_id, b"", end_stream=True
|
||||
)
|
||||
await self._connection._transmit()
|
||||
|
||||
self._write_closed = True
|
||||
|
||||
async with self._state_lock:
|
||||
if self._read_closed:
|
||||
self._state = StreamState.CLOSED
|
||||
else:
|
||||
self._state = StreamState.WRITE_CLOSED
|
||||
|
||||
logger.debug(f"Stream {self.stream_id} write side closed")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error closing write side of stream {self.stream_id}: {e}")
|
||||
|
||||
async def close_read(self) -> None:
|
||||
"""Close the read side of the stream."""
|
||||
if self._read_closed:
|
||||
return
|
||||
|
||||
try:
|
||||
self._read_closed = True
|
||||
|
||||
async with self._state_lock:
|
||||
if self._write_closed:
|
||||
self._state = StreamState.CLOSED
|
||||
else:
|
||||
self._state = StreamState.READ_CLOSED
|
||||
|
||||
# Wake up any pending reads
|
||||
self._receive_event.set()
|
||||
|
||||
logger.debug(f"Stream {self.stream_id} read side closed")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error closing read side of stream {self.stream_id}: {e}")
|
||||
|
||||
async def reset(self, error_code: int = 0) -> None:
|
||||
"""
|
||||
Reset the stream with the given error code.
|
||||
|
||||
Args:
|
||||
error_code: QUIC error code for the reset
|
||||
|
||||
"""
|
||||
async with self._state_lock:
|
||||
if self._state == StreamState.RESET:
|
||||
return
|
||||
|
||||
logger.debug(
|
||||
f"Resetting stream {self.stream_id} with error code {error_code}"
|
||||
)
|
||||
|
||||
self._state = StreamState.RESET
|
||||
self._reset_error_code = error_code
|
||||
|
||||
try:
|
||||
# Send QUIC reset frame
|
||||
self._connection._quic.reset_stream(self._stream_id, error_code)
|
||||
await self._connection._transmit()
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error sending reset for stream {self.stream_id}: {e}")
|
||||
finally:
|
||||
# Always cleanup resources
|
||||
await self._cleanup_resources()
|
||||
self._timeline.record_reset(error_code)
|
||||
self._close_event.set()
|
||||
|
||||
def is_closed(self) -> bool:
|
||||
"""Check if stream is completely closed."""
|
||||
return self._state in (StreamState.CLOSED, StreamState.RESET)
|
||||
|
||||
def is_reset(self) -> bool:
|
||||
"""Check if stream was reset."""
|
||||
return self._state == StreamState.RESET
|
||||
|
||||
def can_read(self) -> bool:
|
||||
"""Check if stream can be read from."""
|
||||
return not self._read_closed and self._state not in (
|
||||
StreamState.CLOSED,
|
||||
StreamState.RESET,
|
||||
)
|
||||
|
||||
def can_write(self) -> bool:
|
||||
"""Check if stream can be written to."""
|
||||
return not self._write_closed and self._state not in (
|
||||
StreamState.CLOSED,
|
||||
StreamState.RESET,
|
||||
)
|
||||
|
||||
async def handle_data_received(self, data: bytes, end_stream: bool) -> None:
|
||||
"""
|
||||
Handle data received from the QUIC connection.
|
||||
|
||||
Args:
|
||||
data: Received data
|
||||
end_stream: Whether this is the last data (FIN received)
|
||||
|
||||
"""
|
||||
if self._state == StreamState.RESET:
|
||||
return
|
||||
|
||||
if data:
|
||||
async with self._receive_buffer_lock:
|
||||
if len(self._receive_buffer) + len(data) > self.MAX_RECEIVE_BUFFER_SIZE:
|
||||
logger.warning(
|
||||
f"Stream {self.stream_id} receive buffer overflow, "
|
||||
f"dropping {len(data)} bytes"
|
||||
)
|
||||
return
|
||||
|
||||
self._receive_buffer.extend(data)
|
||||
self._timeline.record_first_data()
|
||||
|
||||
# Notify waiting readers
|
||||
self._receive_event.set()
|
||||
|
||||
logger.debug(f"Stream {self.stream_id} received {len(data)} bytes")
|
||||
|
||||
if end_stream:
|
||||
self._read_closed = True
|
||||
async with self._state_lock:
|
||||
if self._write_closed:
|
||||
self._state = StreamState.CLOSED
|
||||
else:
|
||||
self._state = StreamState.READ_CLOSED
|
||||
|
||||
# Wake up readers to process remaining data and EOF
|
||||
self._receive_event.set()
|
||||
|
||||
logger.debug(f"Stream {self.stream_id} received FIN")
|
||||
|
||||
async def handle_stop_sending(self, error_code: int) -> None:
|
||||
"""
|
||||
Handle STOP_SENDING frame from remote peer.
|
||||
|
||||
When a STOP_SENDING frame is received, the peer is requesting that we
|
||||
stop sending data on this stream. We respond by resetting the stream.
|
||||
|
||||
Args:
|
||||
error_code: Error code from the STOP_SENDING frame
|
||||
|
||||
"""
|
||||
logger.debug(
|
||||
f"Stream {self.stream_id} handling STOP_SENDING (error_code={error_code})"
|
||||
)
|
||||
|
||||
self._write_closed = True
|
||||
|
||||
# Wake up any pending write operations
|
||||
self._backpressure_event.set()
|
||||
|
||||
async with self._state_lock:
|
||||
if self.direction == StreamDirection.OUTBOUND:
|
||||
self._state = StreamState.CLOSED
|
||||
elif self._read_closed:
|
||||
self._state = StreamState.CLOSED
|
||||
else:
|
||||
# Only write side closed - add WRITE_CLOSED state if needed
|
||||
self._state = StreamState.WRITE_CLOSED
|
||||
|
||||
# Send RESET_STREAM in response (QUIC protocol requirement)
|
||||
try:
|
||||
self._connection._quic.reset_stream(int(self.stream_id), error_code)
|
||||
await self._connection._transmit()
|
||||
logger.debug(f"Sent RESET_STREAM for stream {self.stream_id}")
|
||||
except Exception as e:
|
||||
logger.warning(
|
||||
f"Could not send RESET_STREAM for stream {self.stream_id}: {e}"
|
||||
)
|
||||
|
||||
async def handle_reset(self, error_code: int) -> None:
|
||||
"""
|
||||
Handle stream reset from remote peer.
|
||||
|
||||
Args:
|
||||
error_code: QUIC error code from reset frame
|
||||
|
||||
"""
|
||||
logger.debug(
|
||||
f"Stream {self.stream_id} reset by peer with error code {error_code}"
|
||||
)
|
||||
|
||||
async with self._state_lock:
|
||||
self._state = StreamState.RESET
|
||||
self._reset_error_code = error_code
|
||||
|
||||
await self._cleanup_resources()
|
||||
self._timeline.record_reset(error_code)
|
||||
self._close_event.set()
|
||||
|
||||
# Wake up any pending operations
|
||||
self._receive_event.set()
|
||||
self._backpressure_event.set()
|
||||
|
||||
async def handle_flow_control_update(self, available_window: int) -> None:
|
||||
"""
|
||||
Handle flow control window updates.
|
||||
|
||||
Args:
|
||||
available_window: Available flow control window size
|
||||
|
||||
"""
|
||||
if available_window > 0:
|
||||
self._backpressure_event.set()
|
||||
logger.debug(
|
||||
f"Stream {self.stream_id} flow control".__add__(
|
||||
f"window updated: {available_window}"
|
||||
)
|
||||
)
|
||||
else:
|
||||
self._backpressure_event = trio.Event() # Reset to blocking state
|
||||
logger.debug(f"Stream {self.stream_id} flow control window exhausted")
|
||||
|
||||
def _extract_data_from_buffer(self, n: int) -> bytes:
|
||||
"""Extract data from receive buffer with specified limit."""
|
||||
if n == -1:
|
||||
# Read all available data
|
||||
data = bytes(self._receive_buffer)
|
||||
self._receive_buffer.clear()
|
||||
else:
|
||||
# Read up to n bytes
|
||||
data = bytes(self._receive_buffer[:n])
|
||||
self._receive_buffer = self._receive_buffer[n:]
|
||||
|
||||
return data
|
||||
|
||||
async def _handle_stream_error(self, error: Exception) -> None:
|
||||
"""Handle errors by resetting the stream."""
|
||||
logger.error(f"Stream {self.stream_id} error: {error}")
|
||||
await self.reset(error_code=1) # Generic error code
|
||||
|
||||
def _reserve_memory(self, size: int) -> None:
|
||||
"""Reserve memory with resource manager."""
|
||||
if self._resource_scope:
|
||||
try:
|
||||
self._resource_scope.reserve_memory(size)
|
||||
self._memory_reserved += size
|
||||
except Exception as e:
|
||||
logger.warning(
|
||||
f"Failed to reserve memory for stream {self.stream_id}: {e}"
|
||||
)
|
||||
|
||||
def _release_memory(self, size: int) -> None:
|
||||
"""Release memory with resource manager."""
|
||||
if self._resource_scope and size > 0:
|
||||
try:
|
||||
self._resource_scope.release_memory(size)
|
||||
self._memory_reserved = max(0, self._memory_reserved - size)
|
||||
except Exception as e:
|
||||
logger.warning(
|
||||
f"Failed to release memory for stream {self.stream_id}: {e}"
|
||||
)
|
||||
|
||||
async def _cleanup_resources(self) -> None:
|
||||
"""Clean up stream resources."""
|
||||
# Release all reserved memory
|
||||
if self._memory_reserved > 0:
|
||||
self._release_memory(self._memory_reserved)
|
||||
|
||||
# Clear receive buffer
|
||||
async with self._receive_buffer_lock:
|
||||
self._receive_buffer.clear()
|
||||
|
||||
# Remove from connection's stream registry
|
||||
self._connection._remove_stream(self._stream_id)
|
||||
|
||||
logger.debug(f"Stream {self.stream_id} resources cleaned up")
|
||||
|
||||
# Abstact implementations
|
||||
|
||||
def get_remote_address(self) -> tuple[str, int]:
|
||||
return self._remote_addr
|
||||
|
||||
async def __aenter__(self) -> "QUICStream":
|
||||
"""Enter the async context manager."""
|
||||
return self
|
||||
|
||||
async def __aexit__(
|
||||
self,
|
||||
exc_type: type[BaseException] | None,
|
||||
exc_val: BaseException | None,
|
||||
exc_tb: TracebackType | None,
|
||||
) -> None:
|
||||
"""Exit the async context manager and close the stream."""
|
||||
logger.debug("Exiting the context and closing the stream")
|
||||
await self.close()
|
||||
|
||||
def set_deadline(self, ttl: int) -> bool:
|
||||
"""
|
||||
Set a deadline for the stream. QUIC does not support deadlines natively,
|
||||
so this method always returns False to indicate the operation is unsupported.
|
||||
|
||||
:param ttl: Time-to-live in seconds (ignored).
|
||||
:return: False, as deadlines are not supported.
|
||||
"""
|
||||
raise NotImplementedError("QUIC does not support setting read deadlines")
|
||||
|
||||
# String representation for debugging
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return (
|
||||
f"QUICStream(id={self.stream_id}, "
|
||||
f"state={self._state.value}, "
|
||||
f"direction={self._direction.value}, "
|
||||
f"protocol={self._protocol})"
|
||||
)
|
||||
|
||||
def __str__(self) -> str:
|
||||
return f"QUICStream({self.stream_id})"
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user