Compare commits
63 Commits
79f9ff1596
...
feature/ws
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
aa09275015 | ||
|
|
59bf3f6587 | ||
|
|
4fb15fe7a3 | ||
|
|
e595fe6591 | ||
|
|
326aa491cc | ||
|
|
464e95a4bd | ||
|
|
fd95167705 | ||
|
|
9e7fea7633 | ||
|
|
993cf9ab7f | ||
|
|
6f4e8eb9f6 | ||
|
|
634cd40fdc | ||
|
|
6310864b0b | ||
|
|
4d2c9838c5 | ||
|
|
ab8a7f7a96 | ||
|
|
59268f0391 | ||
|
|
a833694568 | ||
|
|
6d5ee55393 | ||
|
|
0dc381e948 | ||
|
|
34cd1017c1 | ||
|
|
a64b79d953 | ||
|
|
216ebf4a25 | ||
|
|
39f6908478 | ||
|
|
3f813cd510 | ||
|
|
59a00d371b | ||
|
|
524d1145bb | ||
|
|
bf56d84ef0 | ||
|
|
59069bfba2 | ||
|
|
26dc848081 | ||
|
|
ad16ddb903 | ||
|
|
d870c9e08a | ||
|
|
616505e8a9 | ||
|
|
12cdfe6c8a | ||
|
|
97402f6e60 | ||
|
|
237adbbf21 | ||
|
|
ac3b997758 | ||
|
|
5425c59e7d | ||
|
|
d8330525ef | ||
|
|
b65f76e4db | ||
|
|
12b6f30f9b | ||
|
|
722bca0c87 | ||
|
|
d38c655e79 | ||
|
|
ce6aacb25f | ||
|
|
38ae62b542 | ||
|
|
709ad1ba7d | ||
|
|
1c91c4a1b5 | ||
|
|
4de72e2d98 | ||
|
|
61d6fb173d | ||
|
|
66f720f1ee | ||
|
|
7fce83be82 | ||
|
|
9ad21182a8 | ||
|
|
a7afe4ff21 | ||
|
|
3f128936c4 | ||
|
|
bddcfb1440 | ||
|
|
a04b8271cc | ||
|
|
d5390db7af | ||
|
|
28d5a3a9ad | ||
|
|
26ed015cca | ||
|
|
0723f52d76 | ||
|
|
b147de5ae9 | ||
|
|
df80ad5343 | ||
|
|
708fb268bc | ||
|
|
85f472d824 | ||
|
|
3c99503eb1 |
188
.gitea/workflows/build.yml
Normal file
188
.gitea/workflows/build.yml
Normal file
@@ -0,0 +1,188 @@
|
||||
name: Build Release Binaries
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- 'v*'
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
targets:
|
||||
description: 'Targets to build (comma-separated: amd64,arm64,armv7,mac-arm64)'
|
||||
required: false
|
||||
default: 'amd64'
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
|
||||
jobs:
|
||||
# Always builds on push tags. On manual dispatch, reads inputs.
|
||||
build-amd64:
|
||||
if: >-
|
||||
github.event_name == 'push' ||
|
||||
contains(github.event.inputs.targets, 'amd64')
|
||||
runs-on: ubuntu-latest
|
||||
container:
|
||||
image: rust:1-bookworm
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Install dependencies
|
||||
run: apt-get update && apt-get install -y cmake pkg-config libasound2-dev
|
||||
|
||||
- name: Cache cargo
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cargo/registry
|
||||
~/.cargo/git
|
||||
target
|
||||
key: cargo-amd64-${{ hashFiles('Cargo.lock') }}
|
||||
restore-keys: cargo-amd64-
|
||||
|
||||
- name: Build headless binaries
|
||||
run: cargo build --release --bin wzp-relay --bin wzp-client --bin wzp-bench --bin wzp-web
|
||||
|
||||
- name: Build audio client
|
||||
run: |
|
||||
cargo build --release --bin wzp-client --features audio
|
||||
cp target/release/wzp-client target/release/wzp-client-audio
|
||||
cargo build --release --bin wzp-client
|
||||
|
||||
- name: Run tests
|
||||
run: cargo test --workspace --lib
|
||||
|
||||
- name: Package
|
||||
run: |
|
||||
mkdir -p dist/wzp-linux-amd64
|
||||
cp target/release/wzp-relay dist/wzp-linux-amd64/
|
||||
cp target/release/wzp-client dist/wzp-linux-amd64/
|
||||
cp target/release/wzp-client-audio dist/wzp-linux-amd64/
|
||||
cp target/release/wzp-web dist/wzp-linux-amd64/
|
||||
cp target/release/wzp-bench dist/wzp-linux-amd64/
|
||||
cp -r crates/wzp-web/static dist/wzp-linux-amd64/
|
||||
cd dist && tar czf wzp-linux-amd64.tar.gz wzp-linux-amd64/
|
||||
|
||||
- name: Upload artifact
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: wzp-linux-amd64
|
||||
path: dist/wzp-linux-amd64.tar.gz
|
||||
|
||||
build-arm64:
|
||||
if: >-
|
||||
github.event_name == 'push' ||
|
||||
contains(github.event.inputs.targets, 'arm64')
|
||||
runs-on: ubuntu-latest
|
||||
container:
|
||||
image: rust:1-bookworm
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Install cross-compilation tools
|
||||
run: |
|
||||
dpkg --add-architecture arm64
|
||||
apt-get update
|
||||
apt-get install -y cmake pkg-config gcc-aarch64-linux-gnu libc6-dev-arm64-cross
|
||||
rustup target add aarch64-unknown-linux-gnu
|
||||
|
||||
- name: Cache cargo
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cargo/registry
|
||||
~/.cargo/git
|
||||
target
|
||||
key: cargo-arm64-${{ hashFiles('Cargo.lock') }}
|
||||
restore-keys: cargo-arm64-
|
||||
|
||||
- name: Build
|
||||
env:
|
||||
CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_LINKER: aarch64-linux-gnu-gcc
|
||||
CC_aarch64_unknown_linux_gnu: aarch64-linux-gnu-gcc
|
||||
run: |
|
||||
cargo build --release --target aarch64-unknown-linux-gnu \
|
||||
--bin wzp-relay --bin wzp-client --bin wzp-bench --bin wzp-web
|
||||
|
||||
- name: Package
|
||||
run: |
|
||||
mkdir -p dist/wzp-linux-arm64
|
||||
cp target/aarch64-unknown-linux-gnu/release/wzp-relay dist/wzp-linux-arm64/
|
||||
cp target/aarch64-unknown-linux-gnu/release/wzp-client dist/wzp-linux-arm64/
|
||||
cp target/aarch64-unknown-linux-gnu/release/wzp-web dist/wzp-linux-arm64/
|
||||
cp target/aarch64-unknown-linux-gnu/release/wzp-bench dist/wzp-linux-arm64/
|
||||
cp -r crates/wzp-web/static dist/wzp-linux-arm64/
|
||||
cd dist && tar czf wzp-linux-arm64.tar.gz wzp-linux-arm64/
|
||||
|
||||
- name: Upload artifact
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: wzp-linux-arm64
|
||||
path: dist/wzp-linux-arm64.tar.gz
|
||||
|
||||
build-armv7:
|
||||
if: >-
|
||||
github.event_name == 'push' ||
|
||||
contains(github.event.inputs.targets, 'armv7')
|
||||
runs-on: ubuntu-latest
|
||||
container:
|
||||
image: rust:1-bookworm
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Install cross-compilation tools
|
||||
run: |
|
||||
dpkg --add-architecture armhf
|
||||
apt-get update
|
||||
apt-get install -y cmake pkg-config gcc-arm-linux-gnueabihf libc6-dev-armhf-cross
|
||||
rustup target add armv7-unknown-linux-gnueabihf
|
||||
|
||||
- name: Cache cargo
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cargo/registry
|
||||
~/.cargo/git
|
||||
target
|
||||
key: cargo-armv7-${{ hashFiles('Cargo.lock') }}
|
||||
restore-keys: cargo-armv7-
|
||||
|
||||
- name: Build
|
||||
env:
|
||||
CARGO_TARGET_ARMV7_UNKNOWN_LINUX_GNUEABIHF_LINKER: arm-linux-gnueabihf-gcc
|
||||
CC_armv7_unknown_linux_gnueabihf: arm-linux-gnueabihf-gcc
|
||||
run: |
|
||||
cargo build --release --target armv7-unknown-linux-gnueabihf \
|
||||
--bin wzp-relay --bin wzp-client --bin wzp-bench --bin wzp-web
|
||||
|
||||
- name: Package
|
||||
run: |
|
||||
mkdir -p dist/wzp-linux-armv7
|
||||
cp target/armv7-unknown-linux-gnueabihf/release/wzp-relay dist/wzp-linux-armv7/
|
||||
cp target/armv7-unknown-linux-gnueabihf/release/wzp-client dist/wzp-linux-armv7/
|
||||
cp target/armv7-unknown-linux-gnueabihf/release/wzp-web dist/wzp-linux-armv7/
|
||||
cp target/armv7-unknown-linux-gnueabihf/release/wzp-bench dist/wzp-linux-armv7/
|
||||
cp -r crates/wzp-web/static dist/wzp-linux-armv7/
|
||||
cd dist && tar czf wzp-linux-armv7.tar.gz wzp-linux-armv7/
|
||||
|
||||
- name: Upload artifact
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: wzp-linux-armv7
|
||||
path: dist/wzp-linux-armv7.tar.gz
|
||||
|
||||
# Release job — creates a release with all artifacts when a tag is pushed
|
||||
release:
|
||||
if: startsWith(github.ref, 'refs/tags/v')
|
||||
needs: [build-amd64]
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Download all artifacts
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
path: artifacts
|
||||
|
||||
- name: Create release
|
||||
uses: softprops/action-gh-release@v2
|
||||
with:
|
||||
files: artifacts/**/*.tar.gz
|
||||
generate_release_notes: true
|
||||
4
.gitignore
vendored
4
.gitignore
vendored
@@ -1,2 +1,6 @@
|
||||
/target
|
||||
.DS_Store
|
||||
.claude/
|
||||
*.swp
|
||||
*.swo
|
||||
*~
|
||||
|
||||
3
.gitmodules
vendored
Normal file
3
.gitmodules
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
[submodule "deps/featherchat"]
|
||||
path = deps/featherchat
|
||||
url = ssh://git@git.manko.yoga:222/manawenuz/featherChat.git
|
||||
2156
Cargo.lock
generated
2156
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -8,6 +8,7 @@ members = [
|
||||
"crates/wzp-transport",
|
||||
"crates/wzp-relay",
|
||||
"crates/wzp-client",
|
||||
"crates/wzp-web",
|
||||
]
|
||||
|
||||
[workspace.package]
|
||||
@@ -50,3 +51,4 @@ wzp-codec = { path = "crates/wzp-codec" }
|
||||
wzp-fec = { path = "crates/wzp-fec" }
|
||||
wzp-crypto = { path = "crates/wzp-crypto" }
|
||||
wzp-transport = { path = "crates/wzp-transport" }
|
||||
wzp-client = { path = "crates/wzp-client" }
|
||||
|
||||
87
README.md
Normal file
87
README.md
Normal file
@@ -0,0 +1,87 @@
|
||||
# WarzonePhone
|
||||
|
||||
Custom lossy VoIP protocol built in Rust. E2E encrypted, FEC-protected, adaptive quality, designed for hostile network conditions.
|
||||
|
||||
## Quick Start
|
||||
|
||||
```bash
|
||||
# Build
|
||||
cargo build --release
|
||||
|
||||
# Run relay
|
||||
./target/release/wzp-relay --listen 0.0.0.0:4433
|
||||
|
||||
# Send a test tone
|
||||
./target/release/wzp-client --send-tone 5 relay-addr:4433
|
||||
|
||||
# Web bridge (browser calls)
|
||||
./target/release/wzp-web --port 8080 --relay 127.0.0.1:4433 --tls
|
||||
# Open https://localhost:8080/room-name in two browser tabs
|
||||
```
|
||||
|
||||
## Architecture
|
||||
|
||||
See [docs/ARCHITECTURE.md](docs/ARCHITECTURE.md) for the full system architecture with Mermaid diagrams covering:
|
||||
|
||||
- System overview and data flow
|
||||
- Crate dependency graph (8 crates)
|
||||
- Wire formats (MediaHeader, MiniHeader, TrunkFrame, SignalMessage)
|
||||
- Cryptographic handshake (X25519 + Ed25519 + ChaCha20-Poly1305)
|
||||
- Identity model (BIP39 seed, featherChat compatible)
|
||||
- Quality profiles (GOOD/DEGRADED/CATASTROPHIC)
|
||||
- FEC protection (RaptorQ with interleaving)
|
||||
- Adaptive jitter buffer (NetEq-inspired)
|
||||
- Telemetry stack (Prometheus + Grafana)
|
||||
- Deployment topology
|
||||
|
||||
## Features
|
||||
|
||||
- **3 quality tiers**: Opus 24k (28.8 kbps) / Opus 6k (9 kbps) / Codec2 1200 (2.4 kbps)
|
||||
- **RaptorQ FEC**: Recovers from 20-100% packet loss depending on tier
|
||||
- **E2E encryption**: ChaCha20-Poly1305 with X25519 key exchange
|
||||
- **Adaptive jitter buffer**: EMA-based playout delay tracking
|
||||
- **Silence suppression**: VAD + comfort noise (~50% bandwidth savings)
|
||||
- **ML noise removal**: RNNoise (nnnoiseless pure Rust port)
|
||||
- **Mini-frames**: 67% header compression for steady-state packets
|
||||
- **Trunking**: Multiplex sessions into batched datagrams
|
||||
- **featherChat integration**: Shared BIP39 identity, token auth, call signaling
|
||||
- **Prometheus metrics**: Relay, web bridge, inter-relay probes
|
||||
- **Grafana dashboard**: Pre-built JSON with 18 panels
|
||||
|
||||
## Documentation
|
||||
|
||||
| Document | Description |
|
||||
|----------|-------------|
|
||||
| [ARCHITECTURE.md](docs/ARCHITECTURE.md) | Full system architecture with diagrams |
|
||||
| [TELEMETRY.md](docs/TELEMETRY.md) | Prometheus metrics specification |
|
||||
| [INTEGRATION_TASKS.md](docs/INTEGRATION_TASKS.md) | featherChat integration tracker |
|
||||
| [WZP-FC-SHARED-CRATES.md](docs/WZP-FC-SHARED-CRATES.md) | Shared crate strategy |
|
||||
| [grafana-dashboard.json](docs/grafana-dashboard.json) | Importable Grafana dashboard |
|
||||
|
||||
## Binaries
|
||||
|
||||
| Binary | Description |
|
||||
|--------|-------------|
|
||||
| `wzp-relay` | Relay daemon (SFU room mode, forward mode, probes) |
|
||||
| `wzp-client` | CLI client (send-tone, record, live mic, echo-test, drift-test, sweep) |
|
||||
| `wzp-web` | Browser bridge (HTTPS + WebSocket + AudioWorklet) |
|
||||
| `wzp-bench` | Component benchmarks |
|
||||
|
||||
## Linux Build
|
||||
|
||||
```bash
|
||||
./scripts/build-linux.sh --prepare # Create Hetzner VM + install deps
|
||||
./scripts/build-linux.sh --build # Build release binaries
|
||||
./scripts/build-linux.sh --transfer # Download to target/linux-x86_64/
|
||||
./scripts/build-linux.sh --destroy # Delete VM
|
||||
```
|
||||
|
||||
## Tests
|
||||
|
||||
```bash
|
||||
cargo test --workspace # 272 tests
|
||||
```
|
||||
|
||||
## License
|
||||
|
||||
MIT OR Apache-2.0
|
||||
@@ -18,7 +18,15 @@ tracing-subscriber = { workspace = true }
|
||||
async-trait = { workspace = true }
|
||||
bytes = { workspace = true }
|
||||
anyhow = "1"
|
||||
cpal = "0.15"
|
||||
serde = { workspace = true }
|
||||
serde_json = "1"
|
||||
chrono = "0.4"
|
||||
rustls = { version = "0.23", default-features = false, features = ["ring", "std"] }
|
||||
cpal = { version = "0.15", optional = true }
|
||||
|
||||
[features]
|
||||
default = []
|
||||
audio = ["cpal"]
|
||||
|
||||
[[bin]]
|
||||
name = "wzp-client"
|
||||
|
||||
@@ -136,8 +136,13 @@ pub fn bench_fec_recovery(loss_pct: f32) -> FecResult {
|
||||
let profile = QualityProfile::GOOD; // 5 frames/block, 0.2 ratio
|
||||
let frames_per_block = profile.frames_per_block as usize;
|
||||
let num_blocks = 100;
|
||||
// Use a higher FEC ratio for the bench so recovery is possible at higher loss
|
||||
let fec_ratio = if loss_pct > 20.0 { 1.0 } else { 0.5 };
|
||||
// Scale FEC ratio to survive the requested loss rate.
|
||||
// At X% loss, we keep (1-X/100) of packets. We need at least
|
||||
// frames_per_block packets to recover, so total packets needed =
|
||||
// frames_per_block / (1 - loss/100). Ratio = (total - source) / source.
|
||||
let keep_fraction = 1.0 - (loss_pct / 100.0).min(0.95);
|
||||
let total_needed = (frames_per_block as f32 / keep_fraction).ceil();
|
||||
let fec_ratio = ((total_needed / frames_per_block as f32) - 1.0).max(0.2);
|
||||
|
||||
let start = Instant::now();
|
||||
|
||||
@@ -313,18 +318,18 @@ pub fn bench_full_pipeline() -> PipelineResult {
|
||||
}
|
||||
let total_encode_pipeline = enc_start.elapsed();
|
||||
|
||||
// Decode pipeline: ingest all packets, then try to decode
|
||||
// Decode pipeline: ingest all packets, then decode one frame per source frame.
|
||||
// We call decode_next once per ingested source frame, matching the real-time
|
||||
// cadence (one decode per frame period).
|
||||
let dec_start = Instant::now();
|
||||
let mut dec_pcm = vec![0i16; frame_samples];
|
||||
for packets in &all_packets {
|
||||
for pkt in packets {
|
||||
decoder.ingest(pkt.clone());
|
||||
}
|
||||
// Attempt to decode after each frame's packets are ingested
|
||||
// Attempt to decode one frame per ingested source frame
|
||||
let _ = decoder.decode_next(&mut dec_pcm);
|
||||
}
|
||||
// Drain any remaining frames
|
||||
while decoder.decode_next(&mut dec_pcm).is_some() {}
|
||||
let total_decode_pipeline = dec_start.elapsed();
|
||||
|
||||
let total_time = total_encode_pipeline + total_decode_pipeline;
|
||||
@@ -378,7 +383,7 @@ mod tests {
|
||||
#[test]
|
||||
fn pipeline_runs() {
|
||||
let result = bench_full_pipeline();
|
||||
assert_eq!(result.frames, 200);
|
||||
assert_eq!(result.frames, 50);
|
||||
assert!(result.wire_bytes_out > 0);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,17 +2,21 @@
|
||||
//!
|
||||
//! Pipeline: mic → encode → FEC → encrypt → send / recv → decrypt → FEC → decode → speaker
|
||||
|
||||
use bytes::Bytes;
|
||||
use tracing::{debug, warn};
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
use bytes::Bytes;
|
||||
use tracing::{debug, info, warn};
|
||||
|
||||
use wzp_codec::{ComfortNoise, NoiseSupressor, SilenceDetector};
|
||||
use wzp_fec::{RaptorQFecDecoder, RaptorQFecEncoder};
|
||||
use wzp_proto::jitter::{JitterBuffer, PlayoutResult};
|
||||
use wzp_proto::packet::{MediaHeader, MediaPacket};
|
||||
use wzp_proto::packet::{MediaHeader, MediaPacket, MiniFrameContext};
|
||||
use wzp_proto::quality::AdaptiveQualityController;
|
||||
use wzp_proto::traits::{
|
||||
AudioDecoder, AudioEncoder, FecDecoder, FecEncoder,
|
||||
};
|
||||
use wzp_proto::QualityProfile;
|
||||
use wzp_proto::packet::QualityReport;
|
||||
use wzp_proto::{CodecId, QualityProfile};
|
||||
|
||||
/// Configuration for a call session.
|
||||
pub struct CallConfig {
|
||||
@@ -24,15 +28,165 @@ pub struct CallConfig {
|
||||
pub jitter_max: usize,
|
||||
/// Jitter buffer min depth before playout.
|
||||
pub jitter_min: usize,
|
||||
/// Enable silence suppression (default: true).
|
||||
pub suppression_enabled: bool,
|
||||
/// RMS threshold for silence detection (default: 100.0 for i16 PCM).
|
||||
pub silence_threshold_rms: f64,
|
||||
/// Hangover frames before suppression begins (default: 5 = 100ms at 20ms frames).
|
||||
pub silence_hangover_frames: u32,
|
||||
/// Comfort noise amplitude (default: 50).
|
||||
pub comfort_noise_level: i16,
|
||||
/// Enable ML-based noise suppression via RNNoise (default: true).
|
||||
pub noise_suppression: bool,
|
||||
/// Enable mini-frame header compression (default: true).
|
||||
/// When enabled, only every 50th frame carries a full 12-byte MediaHeader;
|
||||
/// intermediate frames use a compact 4-byte MiniHeader.
|
||||
pub mini_frames_enabled: bool,
|
||||
/// Enable adaptive jitter buffer (default: true).
|
||||
///
|
||||
/// When true, the jitter buffer target depth is automatically adjusted
|
||||
/// based on observed inter-arrival jitter (NetEq-inspired algorithm).
|
||||
pub adaptive_jitter: bool,
|
||||
}
|
||||
|
||||
impl Default for CallConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
profile: QualityProfile::GOOD,
|
||||
jitter_target: 50,
|
||||
jitter_target: 10,
|
||||
jitter_max: 250,
|
||||
jitter_min: 25,
|
||||
jitter_min: 3, // 60ms — low latency start, still smooths jitter
|
||||
suppression_enabled: true,
|
||||
silence_threshold_rms: 100.0,
|
||||
silence_hangover_frames: 5,
|
||||
comfort_noise_level: 50,
|
||||
noise_suppression: true,
|
||||
mini_frames_enabled: true,
|
||||
adaptive_jitter: true,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl CallConfig {
|
||||
/// Build a `CallConfig` tuned for the given quality profile.
|
||||
pub fn from_profile(profile: QualityProfile) -> Self {
|
||||
let (jitter_target, jitter_max, jitter_min) = if profile == QualityProfile::CATASTROPHIC {
|
||||
// Catastrophic: larger jitter buffer to absorb spikes
|
||||
(20, 500, 8)
|
||||
} else if profile == QualityProfile::DEGRADED {
|
||||
// Degraded: moderately deeper buffer
|
||||
(15, 350, 5)
|
||||
} else {
|
||||
// Good: low-latency defaults
|
||||
(10, 250, 3)
|
||||
};
|
||||
Self {
|
||||
profile,
|
||||
jitter_target,
|
||||
jitter_max,
|
||||
jitter_min,
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Sliding-window quality adapter that reacts to relay `QualityReport`s.
|
||||
///
|
||||
/// Thresholds (per-report):
|
||||
/// - loss > 15% OR rtt > 200ms => CATASTROPHIC
|
||||
/// - loss > 5% OR rtt > 100ms => DEGRADED
|
||||
/// - otherwise => GOOD
|
||||
///
|
||||
/// Hysteresis: a profile switch is only recommended after the new profile
|
||||
/// has been the recommendation for 3 or more consecutive reports.
|
||||
pub struct QualityAdapter {
|
||||
/// Sliding window of the last N reports.
|
||||
window: std::collections::VecDeque<QualityReport>,
|
||||
/// Maximum window size.
|
||||
max_window: usize,
|
||||
/// Number of consecutive reports recommending the same (non-current) profile.
|
||||
consecutive_same: u32,
|
||||
/// The profile that the last `consecutive_same` reports recommended.
|
||||
pending_profile: Option<QualityProfile>,
|
||||
}
|
||||
|
||||
/// Number of consecutive reports required before accepting a switch.
|
||||
const HYSTERESIS_COUNT: u32 = 3;
|
||||
/// Default sliding window capacity.
|
||||
const ADAPTER_WINDOW: usize = 10;
|
||||
|
||||
impl QualityAdapter {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
window: std::collections::VecDeque::with_capacity(ADAPTER_WINDOW),
|
||||
max_window: ADAPTER_WINDOW,
|
||||
consecutive_same: 0,
|
||||
pending_profile: None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Record a new quality report from the relay.
|
||||
pub fn ingest(&mut self, report: &QualityReport) {
|
||||
if self.window.len() >= self.max_window {
|
||||
self.window.pop_front();
|
||||
}
|
||||
self.window.push_back(*report);
|
||||
}
|
||||
|
||||
/// Classify a single report into a recommended profile.
|
||||
fn classify(report: &QualityReport) -> QualityProfile {
|
||||
let loss = report.loss_percent();
|
||||
let rtt = report.rtt_ms();
|
||||
|
||||
if loss > 15.0 || rtt > 200 {
|
||||
QualityProfile::CATASTROPHIC
|
||||
} else if loss > 5.0 || rtt > 100 {
|
||||
QualityProfile::DEGRADED
|
||||
} else {
|
||||
QualityProfile::GOOD
|
||||
}
|
||||
}
|
||||
|
||||
/// Return the best profile based on the most recent report in the window.
|
||||
pub fn recommended_profile(&self) -> QualityProfile {
|
||||
match self.window.back() {
|
||||
Some(report) => Self::classify(report),
|
||||
None => QualityProfile::GOOD,
|
||||
}
|
||||
}
|
||||
|
||||
/// Determine if a profile switch should happen, applying hysteresis.
|
||||
///
|
||||
/// Returns `Some(new_profile)` only when the recommendation has differed
|
||||
/// from `current` for at least `HYSTERESIS_COUNT` consecutive reports.
|
||||
pub fn should_switch(&mut self, current: &QualityProfile) -> Option<QualityProfile> {
|
||||
let recommended = self.recommended_profile();
|
||||
|
||||
if recommended == *current {
|
||||
// Conditions match current profile — reset pending state.
|
||||
self.consecutive_same = 0;
|
||||
self.pending_profile = None;
|
||||
return None;
|
||||
}
|
||||
|
||||
// Recommended differs from current.
|
||||
match self.pending_profile {
|
||||
Some(pending) if pending == recommended => {
|
||||
self.consecutive_same += 1;
|
||||
}
|
||||
_ => {
|
||||
// New or changed recommendation — restart counter.
|
||||
self.pending_profile = Some(recommended);
|
||||
self.consecutive_same = 1;
|
||||
}
|
||||
}
|
||||
|
||||
if self.consecutive_same >= HYSTERESIS_COUNT {
|
||||
self.consecutive_same = 0;
|
||||
self.pending_profile = None;
|
||||
Some(recommended)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -53,6 +207,24 @@ pub struct CallEncoder {
|
||||
frame_in_block: u8,
|
||||
/// Timestamp counter (ms).
|
||||
timestamp_ms: u32,
|
||||
/// Silence detector for suppression.
|
||||
silence_detector: SilenceDetector,
|
||||
/// Whether silence suppression is enabled.
|
||||
suppression_enabled: bool,
|
||||
/// Total frames suppressed (telemetry).
|
||||
frames_suppressed: u64,
|
||||
/// Frames since last CN packet was sent.
|
||||
cn_counter: u32,
|
||||
/// Comfort noise amplitude level (stored for CN packet payload).
|
||||
cn_level: i16,
|
||||
/// ML-based noise suppressor (RNNoise).
|
||||
denoiser: NoiseSupressor,
|
||||
/// Mini-frame compression context (tracks last full header).
|
||||
mini_context: MiniFrameContext,
|
||||
/// Whether mini-frame header compression is enabled.
|
||||
mini_frames_enabled: bool,
|
||||
/// Frames encoded since the last full header was emitted.
|
||||
frames_since_full: u32,
|
||||
}
|
||||
|
||||
impl CallEncoder {
|
||||
@@ -65,6 +237,35 @@ impl CallEncoder {
|
||||
block_id: 0,
|
||||
frame_in_block: 0,
|
||||
timestamp_ms: 0,
|
||||
silence_detector: SilenceDetector::new(
|
||||
config.silence_threshold_rms,
|
||||
config.silence_hangover_frames,
|
||||
),
|
||||
suppression_enabled: config.suppression_enabled,
|
||||
frames_suppressed: 0,
|
||||
cn_counter: 0,
|
||||
cn_level: config.comfort_noise_level,
|
||||
denoiser: {
|
||||
let mut d = NoiseSupressor::new();
|
||||
d.set_enabled(config.noise_suppression);
|
||||
d
|
||||
},
|
||||
mini_context: MiniFrameContext::default(),
|
||||
mini_frames_enabled: config.mini_frames_enabled,
|
||||
frames_since_full: 0,
|
||||
}
|
||||
}
|
||||
|
||||
/// Serialize a `MediaPacket` for transmission, applying mini-frame
|
||||
/// compression when enabled.
|
||||
///
|
||||
/// Returns compact wire bytes: either `[FRAME_TYPE_FULL][MediaHeader][payload]`
|
||||
/// or `[FRAME_TYPE_MINI][MiniHeader][payload]`.
|
||||
pub fn serialize_compact(&mut self, packet: &MediaPacket) -> Bytes {
|
||||
if self.mini_frames_enabled {
|
||||
packet.encode_compact(&mut self.mini_context, &mut self.frames_since_full)
|
||||
} else {
|
||||
packet.to_bytes()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -73,6 +274,55 @@ impl CallEncoder {
|
||||
/// Input: 48kHz mono PCM, frame size depends on profile (960 for 20ms, 1920 for 40ms).
|
||||
/// Output: one or more MediaPackets to send.
|
||||
pub fn encode_frame(&mut self, pcm: &[i16]) -> Result<Vec<MediaPacket>, anyhow::Error> {
|
||||
// Noise suppression: denoise the PCM before silence detection and encoding.
|
||||
let pcm = if self.denoiser.is_enabled() {
|
||||
let mut buf = pcm.to_vec();
|
||||
self.denoiser.process(&mut buf);
|
||||
buf
|
||||
} else {
|
||||
pcm.to_vec()
|
||||
};
|
||||
let pcm = &pcm[..];
|
||||
|
||||
// Silence suppression: skip encoding silent frames, periodically send CN.
|
||||
if self.suppression_enabled && self.silence_detector.is_silent(pcm) {
|
||||
self.frames_suppressed += 1;
|
||||
self.cn_counter += 1;
|
||||
|
||||
// Advance timestamp even for suppressed frames.
|
||||
self.timestamp_ms = self
|
||||
.timestamp_ms
|
||||
.wrapping_add(self.profile.frame_duration_ms as u32);
|
||||
|
||||
// Every 10 frames (~200ms), send a comfort noise packet.
|
||||
if self.cn_counter % 10 == 0 {
|
||||
let cn_pkt = MediaPacket {
|
||||
header: MediaHeader {
|
||||
version: 0,
|
||||
is_repair: false,
|
||||
codec_id: CodecId::ComfortNoise,
|
||||
has_quality_report: false,
|
||||
fec_ratio_encoded: 0,
|
||||
seq: self.seq,
|
||||
timestamp: self.timestamp_ms,
|
||||
fec_block: self.block_id,
|
||||
fec_symbol: 0,
|
||||
reserved: 0,
|
||||
csrc_count: 0,
|
||||
},
|
||||
payload: Bytes::from(vec![self.cn_level as u8]),
|
||||
quality_report: None,
|
||||
};
|
||||
self.seq = self.seq.wrapping_add(1);
|
||||
return Ok(vec![cn_pkt]);
|
||||
}
|
||||
|
||||
return Ok(vec![]);
|
||||
}
|
||||
|
||||
// Not silent — reset CN counter and proceed with normal encoding.
|
||||
self.cn_counter = 0;
|
||||
|
||||
// Encode audio
|
||||
let mut encoded = vec![0u8; self.audio_enc.max_frame_bytes()];
|
||||
let enc_len = self.audio_enc.encode(pcm, &mut encoded)?;
|
||||
@@ -164,19 +414,42 @@ pub struct CallDecoder {
|
||||
pub quality: AdaptiveQualityController,
|
||||
/// Current profile.
|
||||
profile: QualityProfile,
|
||||
/// Comfort noise generator for filling silent gaps.
|
||||
comfort_noise: ComfortNoise,
|
||||
/// Whether the last decoded frame was comfort noise.
|
||||
last_was_cn: bool,
|
||||
/// Mini-frame decompression context (tracks last full header baseline).
|
||||
mini_context: MiniFrameContext,
|
||||
}
|
||||
|
||||
impl CallDecoder {
|
||||
pub fn new(config: &CallConfig) -> Self {
|
||||
let jitter = if config.adaptive_jitter {
|
||||
JitterBuffer::new_adaptive(config.jitter_min, config.jitter_max)
|
||||
} else {
|
||||
JitterBuffer::new(config.jitter_target, config.jitter_max, config.jitter_min)
|
||||
};
|
||||
Self {
|
||||
audio_dec: wzp_codec::create_decoder(config.profile),
|
||||
fec_dec: wzp_fec::create_decoder(&config.profile),
|
||||
jitter: JitterBuffer::new(config.jitter_target, config.jitter_max, config.jitter_min),
|
||||
jitter,
|
||||
quality: AdaptiveQualityController::new(),
|
||||
profile: config.profile,
|
||||
comfort_noise: ComfortNoise::new(50),
|
||||
last_was_cn: false,
|
||||
mini_context: MiniFrameContext::default(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Deserialize a compact wire-format buffer into a `MediaPacket`,
|
||||
/// auto-detecting full vs mini headers.
|
||||
///
|
||||
/// Returns `None` on malformed data or if a mini-frame arrives before
|
||||
/// any full header baseline has been established.
|
||||
pub fn deserialize_compact(&mut self, buf: &[u8]) -> Option<MediaPacket> {
|
||||
MediaPacket::decode_compact(buf, &mut self.mini_context)
|
||||
}
|
||||
|
||||
/// Feed a received media packet into the decode pipeline.
|
||||
pub fn ingest(&mut self, packet: MediaPacket) {
|
||||
// Feed to FEC decoder
|
||||
@@ -199,25 +472,46 @@ impl CallDecoder {
|
||||
pub fn decode_next(&mut self, pcm: &mut [i16]) -> Option<usize> {
|
||||
match self.jitter.pop() {
|
||||
PlayoutResult::Packet(pkt) => {
|
||||
match self.audio_dec.decode(&pkt.payload, pcm) {
|
||||
// Comfort noise packet: generate CN instead of decoding audio.
|
||||
if pkt.header.codec_id == CodecId::ComfortNoise {
|
||||
self.comfort_noise.generate(pcm);
|
||||
self.last_was_cn = true;
|
||||
self.jitter.record_decode();
|
||||
return Some(pcm.len());
|
||||
}
|
||||
|
||||
self.last_was_cn = false;
|
||||
let result = match self.audio_dec.decode(&pkt.payload, pcm) {
|
||||
Ok(n) => Some(n),
|
||||
Err(e) => {
|
||||
warn!("decode error: {e}, using PLC");
|
||||
self.audio_dec.decode_lost(pcm).ok()
|
||||
}
|
||||
};
|
||||
if result.is_some() {
|
||||
self.jitter.record_decode();
|
||||
}
|
||||
result
|
||||
}
|
||||
PlayoutResult::Missing { seq } => {
|
||||
// Only generate PLC if there are still packets buffered ahead.
|
||||
// Otherwise we've drained everything — return None to stop.
|
||||
if self.jitter.depth() > 0 {
|
||||
debug!(seq, "packet loss, generating PLC");
|
||||
self.audio_dec.decode_lost(pcm).ok()
|
||||
let result = self.audio_dec.decode_lost(pcm).ok();
|
||||
if result.is_some() {
|
||||
self.jitter.record_decode();
|
||||
}
|
||||
result
|
||||
} else {
|
||||
self.jitter.record_underrun();
|
||||
None
|
||||
}
|
||||
}
|
||||
PlayoutResult::NotReady => None,
|
||||
PlayoutResult::NotReady => {
|
||||
self.jitter.record_underrun();
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -225,6 +519,57 @@ impl CallDecoder {
|
||||
pub fn profile(&self) -> QualityProfile {
|
||||
self.profile
|
||||
}
|
||||
|
||||
/// Get jitter buffer statistics.
|
||||
pub fn stats(&self) -> &wzp_proto::jitter::JitterStats {
|
||||
self.jitter.stats()
|
||||
}
|
||||
|
||||
/// Reset jitter buffer statistics counters.
|
||||
pub fn reset_stats(&mut self) {
|
||||
self.jitter.reset_stats();
|
||||
}
|
||||
}
|
||||
|
||||
/// Periodic telemetry logger for jitter buffer statistics.
|
||||
///
|
||||
/// Call `maybe_log` on each decode tick; it will emit a `tracing::info!` event
|
||||
/// no more frequently than the configured interval.
|
||||
pub struct JitterTelemetry {
|
||||
interval: Duration,
|
||||
last_report: Instant,
|
||||
}
|
||||
|
||||
impl JitterTelemetry {
|
||||
/// Create a new telemetry logger that reports at most once per `interval_secs`.
|
||||
pub fn new(interval_secs: u64) -> Self {
|
||||
Self {
|
||||
interval: Duration::from_secs(interval_secs),
|
||||
last_report: Instant::now(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Log jitter statistics if the interval has elapsed. Returns `true` when a
|
||||
/// log line was emitted.
|
||||
pub fn maybe_log(&mut self, stats: &wzp_proto::jitter::JitterStats) -> bool {
|
||||
let now = Instant::now();
|
||||
if now.duration_since(self.last_report) >= self.interval {
|
||||
info!(
|
||||
buffer_depth = stats.current_depth,
|
||||
underruns = stats.underruns,
|
||||
overruns = stats.overruns,
|
||||
late_packets = stats.packets_late,
|
||||
total_received = stats.packets_received,
|
||||
total_decoded = stats.total_decoded,
|
||||
max_depth_seen = stats.max_depth_seen,
|
||||
"jitter buffer telemetry"
|
||||
);
|
||||
self.last_report = now;
|
||||
true
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
@@ -296,4 +641,279 @@ mod tests {
|
||||
let mut pcm = vec![0i16; 960];
|
||||
assert!(dec.decode_next(&mut pcm).is_none());
|
||||
}
|
||||
|
||||
// ---- QualityAdapter tests ----
|
||||
|
||||
/// Helper: build a QualityReport from human-readable loss% and RTT ms.
|
||||
fn make_report(loss_pct_f: f32, rtt_ms: u16) -> QualityReport {
|
||||
QualityReport {
|
||||
loss_pct: (loss_pct_f / 100.0 * 255.0) as u8,
|
||||
rtt_4ms: (rtt_ms / 4) as u8,
|
||||
jitter_ms: 10,
|
||||
bitrate_cap_kbps: 200,
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn good_conditions_stays_good() {
|
||||
let mut adapter = QualityAdapter::new();
|
||||
let good = make_report(1.0, 40);
|
||||
for _ in 0..10 {
|
||||
adapter.ingest(&good);
|
||||
}
|
||||
assert_eq!(adapter.recommended_profile(), QualityProfile::GOOD);
|
||||
|
||||
let current = QualityProfile::GOOD;
|
||||
for _ in 0..10 {
|
||||
adapter.ingest(&good);
|
||||
assert!(adapter.should_switch(¤t).is_none());
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn high_loss_degrades() {
|
||||
let mut adapter = QualityAdapter::new();
|
||||
// 8% loss, low RTT => DEGRADED
|
||||
let degraded = make_report(8.0, 40);
|
||||
let mut current = QualityProfile::GOOD;
|
||||
|
||||
// Feed 3 consecutive degraded reports to pass hysteresis
|
||||
for _ in 0..3 {
|
||||
adapter.ingest(°raded);
|
||||
if let Some(new) = adapter.should_switch(¤t) {
|
||||
current = new;
|
||||
}
|
||||
}
|
||||
assert_eq!(current, QualityProfile::DEGRADED);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn catastrophic_conditions() {
|
||||
let mut adapter = QualityAdapter::new();
|
||||
// 20% loss => CATASTROPHIC
|
||||
let terrible = make_report(20.0, 50);
|
||||
let mut current = QualityProfile::GOOD;
|
||||
|
||||
for _ in 0..3 {
|
||||
adapter.ingest(&terrible);
|
||||
if let Some(new) = adapter.should_switch(¤t) {
|
||||
current = new;
|
||||
}
|
||||
}
|
||||
assert_eq!(current, QualityProfile::CATASTROPHIC);
|
||||
|
||||
// Also test via high RTT alone (250ms > 200ms threshold)
|
||||
let mut adapter2 = QualityAdapter::new();
|
||||
let high_rtt = make_report(1.0, 252); // rtt_4ms rounds to 63 => 252ms
|
||||
let mut current2 = QualityProfile::GOOD;
|
||||
|
||||
for _ in 0..3 {
|
||||
adapter2.ingest(&high_rtt);
|
||||
if let Some(new) = adapter2.should_switch(¤t2) {
|
||||
current2 = new;
|
||||
}
|
||||
}
|
||||
assert_eq!(current2, QualityProfile::CATASTROPHIC);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn hysteresis_prevents_flapping() {
|
||||
let mut adapter = QualityAdapter::new();
|
||||
let good = make_report(1.0, 40);
|
||||
let bad = make_report(8.0, 40); // DEGRADED
|
||||
let current = QualityProfile::GOOD;
|
||||
|
||||
// Alternate good/bad — should never trigger a switch because
|
||||
// we never get 3 consecutive same-recommendation reports.
|
||||
for _ in 0..20 {
|
||||
adapter.ingest(&bad);
|
||||
assert!(adapter.should_switch(¤t).is_none());
|
||||
adapter.ingest(&good);
|
||||
assert!(adapter.should_switch(¤t).is_none());
|
||||
}
|
||||
assert_eq!(current, QualityProfile::GOOD);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn recovery_to_good() {
|
||||
let mut adapter = QualityAdapter::new();
|
||||
let bad = make_report(20.0, 50);
|
||||
let good = make_report(1.0, 40);
|
||||
|
||||
// Drive to CATASTROPHIC first
|
||||
let mut current = QualityProfile::GOOD;
|
||||
for _ in 0..3 {
|
||||
adapter.ingest(&bad);
|
||||
if let Some(new) = adapter.should_switch(¤t) {
|
||||
current = new;
|
||||
}
|
||||
}
|
||||
assert_eq!(current, QualityProfile::CATASTROPHIC);
|
||||
|
||||
// Now feed good reports — should recover to GOOD after 3 consecutive
|
||||
for _ in 0..3 {
|
||||
adapter.ingest(&good);
|
||||
if let Some(new) = adapter.should_switch(¤t) {
|
||||
current = new;
|
||||
}
|
||||
}
|
||||
assert_eq!(current, QualityProfile::GOOD);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn call_config_from_profile() {
|
||||
let good = CallConfig::from_profile(QualityProfile::GOOD);
|
||||
assert_eq!(good.profile, QualityProfile::GOOD);
|
||||
assert_eq!(good.jitter_min, 3);
|
||||
|
||||
let degraded = CallConfig::from_profile(QualityProfile::DEGRADED);
|
||||
assert_eq!(degraded.profile, QualityProfile::DEGRADED);
|
||||
assert!(degraded.jitter_target > good.jitter_target);
|
||||
|
||||
let catastrophic = CallConfig::from_profile(QualityProfile::CATASTROPHIC);
|
||||
assert_eq!(catastrophic.profile, QualityProfile::CATASTROPHIC);
|
||||
assert!(catastrophic.jitter_max > degraded.jitter_max);
|
||||
}
|
||||
|
||||
// ---- JitterStats telemetry tests ----
|
||||
|
||||
fn make_test_packet(seq: u16) -> MediaPacket {
|
||||
MediaPacket {
|
||||
header: MediaHeader {
|
||||
version: 0,
|
||||
is_repair: false,
|
||||
codec_id: CodecId::Opus24k,
|
||||
has_quality_report: false,
|
||||
fec_ratio_encoded: 0,
|
||||
seq,
|
||||
timestamp: seq as u32 * 20,
|
||||
fec_block: 0,
|
||||
fec_symbol: seq as u8,
|
||||
reserved: 0,
|
||||
csrc_count: 0,
|
||||
},
|
||||
payload: Bytes::from(vec![0u8; 60]),
|
||||
quality_report: None,
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn stats_track_ingestion() {
|
||||
let config = CallConfig::default();
|
||||
let mut dec = CallDecoder::new(&config);
|
||||
|
||||
for i in 0..5u16 {
|
||||
dec.ingest(make_test_packet(i));
|
||||
}
|
||||
|
||||
let stats = dec.stats();
|
||||
assert_eq!(stats.packets_received, 5);
|
||||
assert_eq!(stats.current_depth, 5);
|
||||
assert_eq!(stats.max_depth_seen, 5);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn stats_track_underruns() {
|
||||
let config = CallConfig::default();
|
||||
let mut dec = CallDecoder::new(&config);
|
||||
|
||||
// Empty buffer — decode_next should record underruns
|
||||
let mut pcm = vec![0i16; 960];
|
||||
dec.decode_next(&mut pcm);
|
||||
dec.decode_next(&mut pcm);
|
||||
dec.decode_next(&mut pcm);
|
||||
|
||||
assert_eq!(dec.stats().underruns, 3);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn stats_reset() {
|
||||
let config = CallConfig::default();
|
||||
let mut dec = CallDecoder::new(&config);
|
||||
|
||||
// Generate some stats: ingest packets and trigger underruns on empty buffer
|
||||
for i in 0..3u16 {
|
||||
dec.ingest(make_test_packet(i));
|
||||
}
|
||||
// Also call decode on empty decoder to get underruns
|
||||
let config2 = CallConfig::default();
|
||||
let mut dec2 = CallDecoder::new(&config2);
|
||||
let mut pcm = vec![0i16; 960];
|
||||
dec2.decode_next(&mut pcm); // underrun — nothing in buffer
|
||||
|
||||
assert!(dec.stats().packets_received > 0);
|
||||
assert!(dec2.stats().underruns > 0);
|
||||
|
||||
// Test reset on the decoder with ingested packets
|
||||
dec.reset_stats();
|
||||
let stats = dec.stats();
|
||||
assert_eq!(stats.packets_received, 0);
|
||||
assert_eq!(stats.underruns, 0);
|
||||
assert_eq!(stats.overruns, 0);
|
||||
assert_eq!(stats.total_decoded, 0);
|
||||
assert_eq!(stats.packets_late, 0);
|
||||
assert_eq!(stats.max_depth_seen, 0);
|
||||
|
||||
// Test reset on the decoder with underruns
|
||||
dec2.reset_stats();
|
||||
assert_eq!(dec2.stats().underruns, 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn telemetry_respects_interval() {
|
||||
use wzp_proto::jitter::JitterStats;
|
||||
|
||||
let mut telemetry = JitterTelemetry::new(60); // 60-second interval
|
||||
let stats = JitterStats::default();
|
||||
|
||||
// First call right after creation — should not log because no time has passed
|
||||
// (the interval hasn't elapsed since construction)
|
||||
let logged = telemetry.maybe_log(&stats);
|
||||
assert!(!logged, "should not log before interval elapses");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn silence_suppression_skips_silent_frames() {
|
||||
let config = CallConfig {
|
||||
suppression_enabled: true,
|
||||
silence_threshold_rms: 100.0,
|
||||
silence_hangover_frames: 5,
|
||||
comfort_noise_level: 50,
|
||||
..Default::default()
|
||||
};
|
||||
let mut enc = CallEncoder::new(&config);
|
||||
|
||||
let silence = vec![0i16; 960];
|
||||
let mut total_packets = 0;
|
||||
let mut cn_packets = 0;
|
||||
|
||||
for _ in 0..20 {
|
||||
let packets = enc.encode_frame(&silence).unwrap();
|
||||
for p in &packets {
|
||||
if p.header.codec_id == CodecId::ComfortNoise {
|
||||
cn_packets += 1;
|
||||
// CN payload should be a single byte with the noise level.
|
||||
assert_eq!(p.payload.len(), 1);
|
||||
}
|
||||
}
|
||||
total_packets += packets.len();
|
||||
}
|
||||
|
||||
// First 5 frames are hangover (not suppressed) => 5 normal source packets
|
||||
// (plus potential repair packets from FEC block completion).
|
||||
// Remaining 15 frames are suppressed; CN every 10 frames => 1 CN packet
|
||||
// (cn_counter hits 10 on the 10th suppressed frame).
|
||||
assert!(
|
||||
total_packets < 20,
|
||||
"suppression should reduce packet count, got {total_packets}"
|
||||
);
|
||||
assert!(
|
||||
cn_packets >= 1,
|
||||
"should have at least one CN packet, got {cn_packets}"
|
||||
);
|
||||
assert!(
|
||||
enc.frames_suppressed > 0,
|
||||
"frames_suppressed should be > 0"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,58 +1,332 @@
|
||||
//! WarzonePhone CLI test client.
|
||||
//!
|
||||
//! Usage: wzp-client [--live] [relay-addr]
|
||||
//! Usage:
|
||||
//! wzp-client [relay-addr] Send silence frames (connectivity test)
|
||||
//! wzp-client --live [relay-addr] Live mic/speaker mode
|
||||
//! wzp-client --send-tone 10 [relay-addr] Send 10s of 440Hz test tone
|
||||
//! wzp-client --record out.raw [relay-addr] Record received audio to raw PCM file
|
||||
//! wzp-client --send-tone 10 --record out.raw [relay-addr] Both at once
|
||||
//!
|
||||
//! Without `--live`: sends silence frames for testing.
|
||||
//! With `--live`: captures microphone audio and plays received audio through speakers.
|
||||
//! Raw PCM files are 48kHz mono 16-bit signed little-endian.
|
||||
//! Play with: ffplay -f s16le -ar 48000 -ac 1 out.raw
|
||||
//! Or convert: ffmpeg -f s16le -ar 48000 -ac 1 -i out.raw out.wav
|
||||
|
||||
use std::net::SocketAddr;
|
||||
use std::sync::Arc;
|
||||
|
||||
use tracing::{error, info};
|
||||
|
||||
use wzp_client::audio_io::{AudioCapture, AudioPlayback, FRAME_SAMPLES};
|
||||
use wzp_client::call::{CallConfig, CallDecoder, CallEncoder};
|
||||
use wzp_proto::MediaTransport;
|
||||
|
||||
const FRAME_SAMPLES: usize = 960; // 20ms @ 48kHz
|
||||
|
||||
/// Generate a sine wave tone.
|
||||
fn generate_sine_frame(freq_hz: f32, sample_rate: u32, frame_offset: u64) -> Vec<i16> {
|
||||
let start_sample = frame_offset * FRAME_SAMPLES as u64;
|
||||
(0..FRAME_SAMPLES)
|
||||
.map(|i| {
|
||||
let t = (start_sample + i as u64) as f32 / sample_rate as f32;
|
||||
(f32::sin(2.0 * std::f32::consts::PI * freq_hz * t) * 16000.0) as i16
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct CliArgs {
|
||||
relay_addr: SocketAddr,
|
||||
live: bool,
|
||||
send_tone_secs: Option<u32>,
|
||||
send_file: Option<String>,
|
||||
record_file: Option<String>,
|
||||
echo_test_secs: Option<u32>,
|
||||
drift_test_secs: Option<u32>,
|
||||
sweep: bool,
|
||||
seed_hex: Option<String>,
|
||||
mnemonic: Option<String>,
|
||||
room: Option<String>,
|
||||
token: Option<String>,
|
||||
_metrics_file: Option<String>,
|
||||
}
|
||||
|
||||
impl CliArgs {
|
||||
/// Resolve the identity seed from --seed, --mnemonic, or generate a new one.
|
||||
pub fn resolve_seed(&self) -> wzp_crypto::Seed {
|
||||
if let Some(ref hex_str) = self.seed_hex {
|
||||
let seed = wzp_crypto::Seed::from_hex(hex_str).expect("invalid --seed hex");
|
||||
let id = seed.derive_identity();
|
||||
let fp = id.public_identity().fingerprint;
|
||||
info!(fingerprint = %fp, "identity from --seed");
|
||||
seed
|
||||
} else if let Some(ref words) = self.mnemonic {
|
||||
let seed = wzp_crypto::Seed::from_mnemonic(words).expect("invalid --mnemonic");
|
||||
let id = seed.derive_identity();
|
||||
let fp = id.public_identity().fingerprint;
|
||||
info!(fingerprint = %fp, "identity from --mnemonic");
|
||||
seed
|
||||
} else {
|
||||
let seed = wzp_crypto::Seed::generate();
|
||||
let id = seed.derive_identity();
|
||||
let fp = id.public_identity().fingerprint;
|
||||
info!(fingerprint = %fp, "generated ephemeral identity");
|
||||
seed
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn parse_args() -> CliArgs {
|
||||
let args: Vec<String> = std::env::args().collect();
|
||||
let mut live = false;
|
||||
let mut send_tone_secs = None;
|
||||
let mut send_file = None;
|
||||
let mut record_file = None;
|
||||
let mut echo_test_secs = None;
|
||||
let mut drift_test_secs = None;
|
||||
let mut sweep = false;
|
||||
let mut seed_hex = None;
|
||||
let mut mnemonic = None;
|
||||
let mut room = None;
|
||||
let mut token = None;
|
||||
let mut metrics_file = None;
|
||||
let mut relay_str = None;
|
||||
|
||||
let mut i = 1;
|
||||
while i < args.len() {
|
||||
match args[i].as_str() {
|
||||
"--live" => live = true,
|
||||
"--send-tone" => {
|
||||
i += 1;
|
||||
send_tone_secs = Some(
|
||||
args.get(i)
|
||||
.expect("--send-tone requires seconds")
|
||||
.parse()
|
||||
.expect("--send-tone value must be a number"),
|
||||
);
|
||||
}
|
||||
"--send-file" => {
|
||||
i += 1;
|
||||
send_file = Some(
|
||||
args.get(i)
|
||||
.expect("--send-file requires a filename")
|
||||
.to_string(),
|
||||
);
|
||||
}
|
||||
"--seed" => {
|
||||
i += 1;
|
||||
seed_hex = Some(args.get(i).expect("--seed requires hex string").to_string());
|
||||
}
|
||||
"--mnemonic" => {
|
||||
// Consume all remaining words until next flag or end
|
||||
i += 1;
|
||||
let mut words = Vec::new();
|
||||
while i < args.len() && !args[i].starts_with('-') {
|
||||
words.push(args[i].clone());
|
||||
i += 1;
|
||||
}
|
||||
i -= 1; // back up since outer loop will increment
|
||||
mnemonic = Some(words.join(" "));
|
||||
}
|
||||
"--room" => {
|
||||
i += 1;
|
||||
room = Some(args.get(i).expect("--room requires a name").to_string());
|
||||
}
|
||||
"--token" => {
|
||||
i += 1;
|
||||
token = Some(args.get(i).expect("--token requires a value").to_string());
|
||||
}
|
||||
"--metrics-file" => {
|
||||
i += 1;
|
||||
metrics_file = Some(
|
||||
args.get(i)
|
||||
.expect("--metrics-file requires a path")
|
||||
.to_string(),
|
||||
);
|
||||
}
|
||||
"--record" => {
|
||||
i += 1;
|
||||
record_file = Some(
|
||||
args.get(i)
|
||||
.expect("--record requires a filename")
|
||||
.to_string(),
|
||||
);
|
||||
}
|
||||
"--echo-test" => {
|
||||
i += 1;
|
||||
echo_test_secs = Some(
|
||||
args.get(i)
|
||||
.expect("--echo-test requires seconds")
|
||||
.parse()
|
||||
.expect("--echo-test value must be a number"),
|
||||
);
|
||||
}
|
||||
"--drift-test" => {
|
||||
i += 1;
|
||||
drift_test_secs = Some(
|
||||
args.get(i)
|
||||
.expect("--drift-test requires seconds")
|
||||
.parse()
|
||||
.expect("--drift-test value must be a number"),
|
||||
);
|
||||
}
|
||||
"--sweep" => sweep = true,
|
||||
"--help" | "-h" => {
|
||||
eprintln!("Usage: wzp-client [options] [relay-addr]");
|
||||
eprintln!();
|
||||
eprintln!("Options:");
|
||||
eprintln!(" --live Live mic/speaker mode");
|
||||
eprintln!(" --send-tone <secs> Send a 440Hz test tone for N seconds");
|
||||
eprintln!(" --send-file <file> Send a raw PCM file (48kHz mono s16le)");
|
||||
eprintln!(" --record <file.raw> Record received audio to raw PCM file");
|
||||
eprintln!(" --echo-test <secs> Run automated echo quality test");
|
||||
eprintln!(" --drift-test <secs> Run automated clock-drift measurement");
|
||||
eprintln!(" --sweep Run jitter buffer parameter sweep (local, no network)");
|
||||
eprintln!(" --seed <hex> Identity seed (64 hex chars, featherChat compatible)");
|
||||
eprintln!(" --mnemonic <words...> Identity seed as BIP39 mnemonic (24 words)");
|
||||
eprintln!(" --room <name> Room name (hashed for privacy before sending)");
|
||||
eprintln!(" --token <token> featherChat bearer token for relay auth");
|
||||
eprintln!(" --metrics-file <path> Write JSONL telemetry to file (1 line/sec)");
|
||||
eprintln!(" (48kHz mono s16le, play with ffplay -f s16le -ar 48000 -ch_layout mono file.raw)");
|
||||
eprintln!();
|
||||
eprintln!("Default relay: 127.0.0.1:4433");
|
||||
std::process::exit(0);
|
||||
}
|
||||
other => {
|
||||
if relay_str.is_none() && !other.starts_with('-') {
|
||||
relay_str = Some(other.to_string());
|
||||
} else {
|
||||
eprintln!("unknown argument: {other}");
|
||||
std::process::exit(1);
|
||||
}
|
||||
}
|
||||
}
|
||||
i += 1;
|
||||
}
|
||||
|
||||
let relay_addr: SocketAddr = relay_str
|
||||
.unwrap_or_else(|| "127.0.0.1:4433".to_string())
|
||||
.parse()
|
||||
.expect("invalid relay address");
|
||||
|
||||
CliArgs {
|
||||
relay_addr,
|
||||
live,
|
||||
send_tone_secs,
|
||||
send_file,
|
||||
record_file,
|
||||
echo_test_secs,
|
||||
drift_test_secs,
|
||||
sweep,
|
||||
seed_hex,
|
||||
mnemonic,
|
||||
room,
|
||||
token,
|
||||
_metrics_file: metrics_file,
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> anyhow::Result<()> {
|
||||
tracing_subscriber::fmt().init();
|
||||
rustls::crypto::ring::default_provider()
|
||||
.install_default()
|
||||
.expect("failed to install rustls crypto provider");
|
||||
|
||||
let args: Vec<String> = std::env::args().collect();
|
||||
let live = args.iter().any(|a| a == "--live");
|
||||
let relay_addr: SocketAddr = args
|
||||
.iter()
|
||||
.skip(1)
|
||||
.find(|a| *a != "--live")
|
||||
.cloned()
|
||||
.unwrap_or_else(|| "127.0.0.1:4433".to_string())
|
||||
.parse()?;
|
||||
let cli = parse_args();
|
||||
|
||||
info!(%relay_addr, live, "WarzonePhone client connecting");
|
||||
// --sweep runs locally (no network), so handle it before connecting.
|
||||
if cli.sweep {
|
||||
wzp_client::sweep::run_and_print_default_sweep();
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let seed = cli.resolve_seed();
|
||||
|
||||
info!(
|
||||
relay = %cli.relay_addr,
|
||||
live = cli.live,
|
||||
send_tone = ?cli.send_tone_secs,
|
||||
record = ?cli.record_file,
|
||||
room = ?cli.room,
|
||||
"WarzonePhone client"
|
||||
);
|
||||
|
||||
// Hash room name for SNI privacy (or "default" if none specified)
|
||||
let sni = match &cli.room {
|
||||
Some(name) => {
|
||||
let hashed = wzp_crypto::hash_room_name(name);
|
||||
info!(room = %name, hashed = %hashed, "room name hashed for SNI");
|
||||
hashed
|
||||
}
|
||||
None => "default".to_string(),
|
||||
};
|
||||
|
||||
let client_config = wzp_transport::client_config();
|
||||
let endpoint = wzp_transport::create_endpoint("0.0.0.0:0".parse()?, None)?;
|
||||
let bind_addr = if cli.relay_addr.is_ipv6() {
|
||||
"[::]:0".parse()?
|
||||
} else {
|
||||
"0.0.0.0:0".parse()?
|
||||
};
|
||||
let endpoint = wzp_transport::create_endpoint(bind_addr, None)?;
|
||||
let connection =
|
||||
wzp_transport::connect(&endpoint, relay_addr, "localhost", client_config).await?;
|
||||
wzp_transport::connect(&endpoint, cli.relay_addr, &sni, client_config).await?;
|
||||
|
||||
info!("Connected to relay");
|
||||
|
||||
let transport = Arc::new(wzp_transport::QuinnTransport::new(connection));
|
||||
|
||||
if live {
|
||||
run_live(transport).await
|
||||
// Send auth token if provided (relay with --auth-url expects this first)
|
||||
if let Some(ref token) = cli.token {
|
||||
let auth = wzp_proto::SignalMessage::AuthToken {
|
||||
token: token.clone(),
|
||||
};
|
||||
transport.send_signal(&auth).await?;
|
||||
info!("auth token sent");
|
||||
}
|
||||
|
||||
// Crypto handshake — establishes verified identity + session key
|
||||
let _crypto_session = wzp_client::handshake::perform_handshake(
|
||||
&*transport,
|
||||
&seed.0,
|
||||
).await?;
|
||||
info!("crypto handshake complete");
|
||||
|
||||
if cli.live {
|
||||
#[cfg(feature = "audio")]
|
||||
{
|
||||
return run_live(transport).await;
|
||||
}
|
||||
#[cfg(not(feature = "audio"))]
|
||||
{
|
||||
anyhow::bail!("--live requires the 'audio' feature (build with: cargo build --features audio)");
|
||||
}
|
||||
} else if let Some(secs) = cli.echo_test_secs {
|
||||
let result = wzp_client::echo_test::run_echo_test(&*transport, secs, 5.0).await?;
|
||||
wzp_client::echo_test::print_report(&result);
|
||||
transport.close().await?;
|
||||
Ok(())
|
||||
} else if let Some(secs) = cli.drift_test_secs {
|
||||
let config = wzp_client::drift_test::DriftTestConfig {
|
||||
duration_secs: secs,
|
||||
tone_freq_hz: 440.0,
|
||||
};
|
||||
let result = wzp_client::drift_test::run_drift_test(&*transport, &config).await?;
|
||||
wzp_client::drift_test::print_drift_report(&result);
|
||||
transport.close().await?;
|
||||
Ok(())
|
||||
} else if cli.send_tone_secs.is_some() || cli.send_file.is_some() || cli.record_file.is_some() {
|
||||
run_file_mode(transport, cli.send_tone_secs, cli.send_file, cli.record_file).await
|
||||
} else {
|
||||
run_silence(transport).await
|
||||
}
|
||||
}
|
||||
|
||||
/// Original test mode: send silence frames.
|
||||
/// Send silence frames (connectivity test).
|
||||
async fn run_silence(transport: Arc<wzp_transport::QuinnTransport>) -> anyhow::Result<()> {
|
||||
let config = CallConfig::default();
|
||||
let mut encoder = CallEncoder::new(&config);
|
||||
|
||||
let frame_duration = tokio::time::Duration::from_millis(20);
|
||||
let pcm = vec![0i16; FRAME_SAMPLES]; // 20ms @ 48kHz silence
|
||||
let pcm = vec![0i16; FRAME_SAMPLES];
|
||||
|
||||
let mut total_source = 0u64;
|
||||
let mut total_repair = 0u64;
|
||||
@@ -84,25 +358,204 @@ async fn run_silence(transport: Arc<wzp_transport::QuinnTransport>) -> anyhow::R
|
||||
tokio::time::sleep(frame_duration).await;
|
||||
}
|
||||
|
||||
info!(
|
||||
total_source,
|
||||
total_repair,
|
||||
total_bytes,
|
||||
"done — closing"
|
||||
);
|
||||
info!(total_source, total_repair, total_bytes, "done — closing");
|
||||
let hangup = wzp_proto::SignalMessage::Hangup {
|
||||
reason: wzp_proto::HangupReason::Normal,
|
||||
};
|
||||
transport.send_signal(&hangup).await.ok();
|
||||
transport.close().await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// File/tone mode: send a test tone or audio file, and/or record received audio.
|
||||
async fn run_file_mode(
|
||||
transport: Arc<wzp_transport::QuinnTransport>,
|
||||
send_tone_secs: Option<u32>,
|
||||
send_file: Option<String>,
|
||||
record_file: Option<String>,
|
||||
) -> anyhow::Result<()> {
|
||||
let config = CallConfig::default();
|
||||
|
||||
// --- Send task: generate tone or play file ---
|
||||
let send_transport = transport.clone();
|
||||
let send_handle = tokio::spawn(async move {
|
||||
// Load PCM frames from file or generate tone
|
||||
let pcm_frames: Vec<Vec<i16>> = if let Some(ref path) = send_file {
|
||||
// Read raw PCM file (48kHz mono s16le)
|
||||
let bytes = match std::fs::read(path) {
|
||||
Ok(b) => b,
|
||||
Err(e) => { error!("read {path}: {e}"); return; }
|
||||
};
|
||||
let samples: Vec<i16> = bytes.chunks_exact(2)
|
||||
.map(|c| i16::from_le_bytes([c[0], c[1]]))
|
||||
.collect();
|
||||
let duration = samples.len() as f64 / 48_000.0;
|
||||
info!(file = %path, duration = format!("{:.1}s", duration), "sending audio file");
|
||||
samples.chunks(FRAME_SAMPLES)
|
||||
.filter(|c| c.len() == FRAME_SAMPLES)
|
||||
.map(|c| c.to_vec())
|
||||
.collect()
|
||||
} else if let Some(secs) = send_tone_secs {
|
||||
let total = (secs as u64) * 50;
|
||||
info!(seconds = secs, frames = total, "sending 440Hz tone");
|
||||
(0..total).map(|i| generate_sine_frame(440.0, 48_000, i)).collect()
|
||||
} else {
|
||||
// No sending, just wait
|
||||
tokio::signal::ctrl_c().await.ok();
|
||||
return;
|
||||
};
|
||||
|
||||
let mut encoder = CallEncoder::new(&config);
|
||||
let _total_frames = pcm_frames.len() as u64;
|
||||
let frame_duration = tokio::time::Duration::from_millis(20);
|
||||
|
||||
let mut total_source = 0u64;
|
||||
let mut total_repair = 0u64;
|
||||
|
||||
for (frame_idx, pcm) in pcm_frames.iter().enumerate() {
|
||||
let frame_idx = frame_idx as u64;
|
||||
let packets = match encoder.encode_frame(&pcm) {
|
||||
Ok(p) => p,
|
||||
Err(e) => {
|
||||
error!("encode error: {e}");
|
||||
continue;
|
||||
}
|
||||
};
|
||||
for pkt in &packets {
|
||||
if pkt.header.is_repair {
|
||||
total_repair += 1;
|
||||
} else {
|
||||
total_source += 1;
|
||||
}
|
||||
if let Err(e) = send_transport.send_media(pkt).await {
|
||||
error!("send error: {e}");
|
||||
return;
|
||||
}
|
||||
}
|
||||
if (frame_idx + 1) % 250 == 0 {
|
||||
info!(
|
||||
frame = frame_idx + 1,
|
||||
source = total_source,
|
||||
repair = total_repair,
|
||||
"send progress"
|
||||
);
|
||||
}
|
||||
tokio::time::sleep(frame_duration).await;
|
||||
}
|
||||
info!(total_source, total_repair, "tone send complete");
|
||||
});
|
||||
|
||||
// --- Recv task: decode and write to file ---
|
||||
let recv_transport = transport.clone();
|
||||
let record_path = record_file.clone();
|
||||
let recv_handle = tokio::spawn(async move {
|
||||
let record_path = match record_path {
|
||||
Some(p) => p,
|
||||
None => {
|
||||
// No recording, just wait for send to finish or Ctrl+C
|
||||
tokio::signal::ctrl_c().await.ok();
|
||||
return Vec::new();
|
||||
}
|
||||
};
|
||||
|
||||
let mut decoder = CallDecoder::new(&CallConfig::default());
|
||||
let mut pcm_buf = vec![0i16; FRAME_SAMPLES];
|
||||
let mut all_pcm: Vec<i16> = Vec::new();
|
||||
let mut frames_received = 0u64;
|
||||
|
||||
info!(file = %record_path, "recording received audio (Ctrl+C to stop and save)");
|
||||
|
||||
loop {
|
||||
tokio::select! {
|
||||
result = recv_transport.recv_media() => {
|
||||
match result {
|
||||
Ok(Some(pkt)) => {
|
||||
let is_repair = pkt.header.is_repair;
|
||||
decoder.ingest(pkt);
|
||||
if !is_repair {
|
||||
if let Some(n) = decoder.decode_next(&mut pcm_buf) {
|
||||
all_pcm.extend_from_slice(&pcm_buf[..n]);
|
||||
frames_received += 1;
|
||||
if frames_received % 250 == 0 {
|
||||
info!(
|
||||
frames = frames_received,
|
||||
samples = all_pcm.len(),
|
||||
"recv progress"
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(None) => {
|
||||
info!("connection closed by remote");
|
||||
break;
|
||||
}
|
||||
Err(e) => {
|
||||
error!("recv error: {e}");
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
_ = tokio::signal::ctrl_c() => {
|
||||
info!("Ctrl+C received, saving recording...");
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
all_pcm
|
||||
});
|
||||
|
||||
// Wait for send to finish (or ctrl+c in recv)
|
||||
let _ = send_handle.await;
|
||||
|
||||
// Send Hangup signal so the relay knows we're done
|
||||
let hangup = wzp_proto::SignalMessage::Hangup {
|
||||
reason: wzp_proto::HangupReason::Normal,
|
||||
};
|
||||
transport.send_signal(&hangup).await.ok();
|
||||
|
||||
let all_pcm = if record_file.is_some() {
|
||||
tokio::time::sleep(tokio::time::Duration::from_secs(2)).await;
|
||||
transport.close().await?;
|
||||
recv_handle.await.unwrap_or_default()
|
||||
} else {
|
||||
transport.close().await?;
|
||||
recv_handle.abort();
|
||||
Vec::new()
|
||||
};
|
||||
|
||||
// Write recorded audio to file
|
||||
if let Some(ref path) = record_file {
|
||||
if !all_pcm.is_empty() {
|
||||
let bytes: Vec<u8> = all_pcm.iter().flat_map(|s| s.to_le_bytes()).collect();
|
||||
std::fs::write(path, &bytes)?;
|
||||
let duration_secs = all_pcm.len() as f64 / 48_000.0;
|
||||
info!(
|
||||
file = %path,
|
||||
samples = all_pcm.len(),
|
||||
duration = format!("{:.1}s", duration_secs),
|
||||
bytes = bytes.len(),
|
||||
"recording saved"
|
||||
);
|
||||
info!("play with: ffplay -f s16le -ar 48000 -ac 1 {path}");
|
||||
} else {
|
||||
info!("no audio received, nothing to write");
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Live mode: capture from mic, encode, send; receive, decode, play.
|
||||
#[cfg(feature = "audio")]
|
||||
async fn run_live(transport: Arc<wzp_transport::QuinnTransport>) -> anyhow::Result<()> {
|
||||
use wzp_client::audio_io::{AudioCapture, AudioPlayback};
|
||||
|
||||
let capture = AudioCapture::start()?;
|
||||
let playback = AudioPlayback::start()?;
|
||||
info!("Audio I/O started — press Ctrl+C to stop");
|
||||
|
||||
// --- Send task: mic -> encode -> transport ---
|
||||
// AudioCapture::read_frame() is blocking, so we run this on a dedicated
|
||||
// OS thread. We use the tokio Handle to call the async send_media.
|
||||
let send_transport = transport.clone();
|
||||
let rt_handle = tokio::runtime::Handle::current();
|
||||
let send_handle = std::thread::Builder::new()
|
||||
@@ -113,7 +566,7 @@ async fn run_live(transport: Arc<wzp_transport::QuinnTransport>) -> anyhow::Resu
|
||||
loop {
|
||||
let frame = match capture.read_frame() {
|
||||
Some(f) => f,
|
||||
None => break, // channel closed / stopped
|
||||
None => break,
|
||||
};
|
||||
let packets = match encoder.encode_frame(&frame) {
|
||||
Ok(p) => p,
|
||||
@@ -131,7 +584,6 @@ async fn run_live(transport: Arc<wzp_transport::QuinnTransport>) -> anyhow::Resu
|
||||
}
|
||||
})?;
|
||||
|
||||
// --- Recv task: transport -> decode -> speaker ---
|
||||
let recv_transport = transport.clone();
|
||||
let recv_handle = tokio::spawn(async move {
|
||||
let config = CallConfig::default();
|
||||
@@ -140,14 +592,19 @@ async fn run_live(transport: Arc<wzp_transport::QuinnTransport>) -> anyhow::Resu
|
||||
loop {
|
||||
match recv_transport.recv_media().await {
|
||||
Ok(Some(pkt)) => {
|
||||
let is_repair = pkt.header.is_repair;
|
||||
decoder.ingest(pkt);
|
||||
while let Some(_n) = decoder.decode_next(&mut pcm_buf) {
|
||||
playback.write_frame(&pcm_buf);
|
||||
// Only decode for source packets (1 source = 1 audio frame).
|
||||
// Repair packets feed the FEC decoder but don't produce audio.
|
||||
if !is_repair {
|
||||
if let Some(_n) = decoder.decode_next(&mut pcm_buf) {
|
||||
playback.write_frame(&pcm_buf);
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(None) => {
|
||||
// No packet available right now, yield briefly.
|
||||
tokio::time::sleep(tokio::time::Duration::from_millis(1)).await;
|
||||
info!("connection closed");
|
||||
break;
|
||||
}
|
||||
Err(e) => {
|
||||
error!("recv error: {e}");
|
||||
@@ -157,14 +614,10 @@ async fn run_live(transport: Arc<wzp_transport::QuinnTransport>) -> anyhow::Resu
|
||||
}
|
||||
});
|
||||
|
||||
// Wait for Ctrl+C
|
||||
tokio::signal::ctrl_c()
|
||||
.await
|
||||
.expect("failed to listen for Ctrl+C");
|
||||
tokio::signal::ctrl_c().await?;
|
||||
info!("Shutting down...");
|
||||
|
||||
recv_handle.abort();
|
||||
// The send thread will exit once capture is dropped / stopped.
|
||||
drop(send_handle);
|
||||
transport.close().await?;
|
||||
info!("done");
|
||||
|
||||
293
crates/wzp-client/src/drift_test.rs
Normal file
293
crates/wzp-client/src/drift_test.rs
Normal file
@@ -0,0 +1,293 @@
|
||||
//! Automated clock-drift measurement tool.
|
||||
//!
|
||||
//! Sends N seconds of a known 440 Hz tone through the transport, records
|
||||
//! received frame timestamps on the other side, and compares actual received
|
||||
//! duration vs expected duration to quantify timing drift and packet loss.
|
||||
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
use tracing::info;
|
||||
|
||||
use wzp_proto::MediaTransport;
|
||||
|
||||
use crate::call::{CallConfig, CallDecoder, CallEncoder};
|
||||
|
||||
const FRAME_SAMPLES: usize = 960; // 20ms @ 48kHz
|
||||
const SAMPLE_RATE: u32 = 48_000;
|
||||
|
||||
/// Configuration for a drift measurement run.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct DriftTestConfig {
|
||||
/// How many seconds of tone to send.
|
||||
pub duration_secs: u32,
|
||||
/// Frequency of the test tone (Hz).
|
||||
pub tone_freq_hz: f32,
|
||||
}
|
||||
|
||||
impl Default for DriftTestConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
duration_secs: 10,
|
||||
tone_freq_hz: 440.0,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Results from a drift measurement run.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct DriftResult {
|
||||
/// Expected duration in milliseconds (`duration_secs * 1000`).
|
||||
pub expected_duration_ms: u64,
|
||||
/// Actual measured duration in milliseconds (last_recv - first_recv).
|
||||
pub actual_duration_ms: u64,
|
||||
/// Drift: `actual - expected` (positive = receiver clock ran slow / packets delayed).
|
||||
pub drift_ms: i64,
|
||||
/// Drift as a percentage of expected duration.
|
||||
pub drift_pct: f64,
|
||||
/// Total frames sent by the sender.
|
||||
pub frames_sent: u64,
|
||||
/// Total frames successfully received and decoded.
|
||||
pub frames_received: u64,
|
||||
/// Packet loss percentage: `(1 - frames_received / frames_sent) * 100`.
|
||||
pub loss_pct: f64,
|
||||
}
|
||||
|
||||
impl DriftResult {
|
||||
/// Compute a `DriftResult` from raw counters and timestamps.
|
||||
pub fn compute(
|
||||
expected_duration_ms: u64,
|
||||
actual_duration_ms: u64,
|
||||
frames_sent: u64,
|
||||
frames_received: u64,
|
||||
) -> Self {
|
||||
let drift_ms = actual_duration_ms as i64 - expected_duration_ms as i64;
|
||||
let drift_pct = if expected_duration_ms > 0 {
|
||||
drift_ms as f64 / expected_duration_ms as f64 * 100.0
|
||||
} else {
|
||||
0.0
|
||||
};
|
||||
let loss_pct = if frames_sent > 0 {
|
||||
(1.0 - frames_received as f64 / frames_sent as f64) * 100.0
|
||||
} else {
|
||||
0.0
|
||||
};
|
||||
Self {
|
||||
expected_duration_ms,
|
||||
actual_duration_ms,
|
||||
drift_ms,
|
||||
drift_pct,
|
||||
frames_sent,
|
||||
frames_received,
|
||||
loss_pct,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Generate a sine wave frame at a given frequency.
|
||||
fn sine_frame(freq_hz: f32, frame_offset: u64) -> Vec<i16> {
|
||||
let start = frame_offset * FRAME_SAMPLES as u64;
|
||||
(0..FRAME_SAMPLES)
|
||||
.map(|i| {
|
||||
let t = (start + i as u64) as f32 / SAMPLE_RATE as f32;
|
||||
(f32::sin(2.0 * std::f32::consts::PI * freq_hz * t) * 16000.0) as i16
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Run the drift measurement test.
|
||||
///
|
||||
/// 1. Spawns a send task that encodes `duration_secs` of tone at 20 ms intervals.
|
||||
/// 2. Spawns a recv task that counts decoded frames and tracks first/last timestamps.
|
||||
/// 3. After the sender finishes, waits 2 seconds for trailing packets.
|
||||
/// 4. Computes and returns the `DriftResult`.
|
||||
pub async fn run_drift_test(
|
||||
transport: &(dyn MediaTransport + Send + Sync),
|
||||
config: &DriftTestConfig,
|
||||
) -> anyhow::Result<DriftResult> {
|
||||
let call_config = CallConfig::default();
|
||||
let mut encoder = CallEncoder::new(&call_config);
|
||||
let mut decoder = CallDecoder::new(&call_config);
|
||||
|
||||
let total_frames: u64 = config.duration_secs as u64 * 50; // 50 frames/s at 20 ms
|
||||
let frame_duration = Duration::from_millis(20);
|
||||
let mut pcm_buf = vec![0i16; FRAME_SAMPLES];
|
||||
|
||||
let mut frames_sent: u64 = 0;
|
||||
let mut frames_received: u64 = 0;
|
||||
let mut first_recv_time: Option<Instant> = None;
|
||||
let mut last_recv_time: Option<Instant> = None;
|
||||
|
||||
info!(
|
||||
duration_secs = config.duration_secs,
|
||||
tone_hz = config.tone_freq_hz,
|
||||
total_frames = total_frames,
|
||||
"starting drift measurement"
|
||||
);
|
||||
|
||||
let start = Instant::now();
|
||||
|
||||
// Send + interleaved receive loop (same pattern as echo_test)
|
||||
for frame_idx in 0..total_frames {
|
||||
// --- send ---
|
||||
let pcm = sine_frame(config.tone_freq_hz, frame_idx);
|
||||
let packets = encoder.encode_frame(&pcm)?;
|
||||
for pkt in &packets {
|
||||
transport.send_media(pkt).await?;
|
||||
}
|
||||
frames_sent += 1;
|
||||
|
||||
// --- try to receive (short window so we don't block the sender) ---
|
||||
let recv_deadline = Instant::now() + Duration::from_millis(5);
|
||||
loop {
|
||||
if Instant::now() >= recv_deadline {
|
||||
break;
|
||||
}
|
||||
match tokio::time::timeout(Duration::from_millis(2), transport.recv_media()).await {
|
||||
Ok(Ok(Some(pkt))) => {
|
||||
let is_repair = pkt.header.is_repair;
|
||||
decoder.ingest(pkt);
|
||||
if !is_repair {
|
||||
if let Some(_n) = decoder.decode_next(&mut pcm_buf) {
|
||||
let now = Instant::now();
|
||||
if first_recv_time.is_none() {
|
||||
first_recv_time = Some(now);
|
||||
}
|
||||
last_recv_time = Some(now);
|
||||
frames_received += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
_ => break,
|
||||
}
|
||||
}
|
||||
|
||||
if (frame_idx + 1) % 250 == 0 {
|
||||
info!(
|
||||
frame = frame_idx + 1,
|
||||
sent = frames_sent,
|
||||
recv = frames_received,
|
||||
elapsed = format!("{:.1}s", start.elapsed().as_secs_f64()),
|
||||
"drift-test progress"
|
||||
);
|
||||
}
|
||||
|
||||
tokio::time::sleep(frame_duration).await;
|
||||
}
|
||||
|
||||
// Drain trailing packets for 2 seconds
|
||||
info!("sender done, draining trailing packets for 2s...");
|
||||
let drain_deadline = Instant::now() + Duration::from_secs(2);
|
||||
while Instant::now() < drain_deadline {
|
||||
match tokio::time::timeout(Duration::from_millis(100), transport.recv_media()).await {
|
||||
Ok(Ok(Some(pkt))) => {
|
||||
let is_repair = pkt.header.is_repair;
|
||||
decoder.ingest(pkt);
|
||||
if !is_repair {
|
||||
if let Some(_n) = decoder.decode_next(&mut pcm_buf) {
|
||||
let now = Instant::now();
|
||||
if first_recv_time.is_none() {
|
||||
first_recv_time = Some(now);
|
||||
}
|
||||
last_recv_time = Some(now);
|
||||
frames_received += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
_ => break,
|
||||
}
|
||||
}
|
||||
|
||||
// Compute result
|
||||
let expected_duration_ms = config.duration_secs as u64 * 1000;
|
||||
let actual_duration_ms = match (first_recv_time, last_recv_time) {
|
||||
(Some(first), Some(last)) => last.duration_since(first).as_millis() as u64,
|
||||
_ => 0,
|
||||
};
|
||||
|
||||
let result = DriftResult::compute(
|
||||
expected_duration_ms,
|
||||
actual_duration_ms,
|
||||
frames_sent,
|
||||
frames_received,
|
||||
);
|
||||
|
||||
info!(
|
||||
expected_ms = result.expected_duration_ms,
|
||||
actual_ms = result.actual_duration_ms,
|
||||
drift_ms = result.drift_ms,
|
||||
drift_pct = format!("{:.4}%", result.drift_pct),
|
||||
loss_pct = format!("{:.1}%", result.loss_pct),
|
||||
"drift measurement complete"
|
||||
);
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
/// Pretty-print the drift measurement results.
|
||||
pub fn print_drift_report(result: &DriftResult) {
|
||||
println!();
|
||||
println!("=== Drift Measurement Report ===");
|
||||
println!();
|
||||
println!("Frames sent: {}", result.frames_sent);
|
||||
println!("Frames received: {}", result.frames_received);
|
||||
println!("Packet loss: {:.1}%", result.loss_pct);
|
||||
println!();
|
||||
println!("Expected duration: {} ms", result.expected_duration_ms);
|
||||
println!("Actual duration: {} ms", result.actual_duration_ms);
|
||||
println!("Drift: {} ms ({:+.4}%)", result.drift_ms, result.drift_pct);
|
||||
println!();
|
||||
|
||||
// Interpretation
|
||||
let abs_drift = result.drift_ms.unsigned_abs();
|
||||
if result.frames_received == 0 {
|
||||
println!("WARNING: No frames received. Transport may be non-functional.");
|
||||
} else if abs_drift < 5 {
|
||||
println!("Result: EXCELLENT -- drift is negligible (<5 ms).");
|
||||
} else if abs_drift < 20 {
|
||||
println!("Result: GOOD -- drift is within acceptable bounds (<20 ms).");
|
||||
} else if abs_drift < 100 {
|
||||
println!("Result: FAIR -- noticeable drift ({} ms). Clock sync may be needed.", abs_drift);
|
||||
} else {
|
||||
println!("Result: POOR -- significant drift ({} ms). Investigate clock sources.", abs_drift);
|
||||
}
|
||||
println!();
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn drift_result_calculations() {
|
||||
// Perfect case: no drift, no loss
|
||||
let r = DriftResult::compute(10_000, 10_000, 500, 500);
|
||||
assert_eq!(r.drift_ms, 0);
|
||||
assert!((r.drift_pct - 0.0).abs() < f64::EPSILON);
|
||||
assert!((r.loss_pct - 0.0).abs() < f64::EPSILON);
|
||||
|
||||
// Positive drift (receiver duration longer than expected)
|
||||
let r = DriftResult::compute(10_000, 10_050, 500, 490);
|
||||
assert_eq!(r.drift_ms, 50);
|
||||
assert!((r.drift_pct - 0.5).abs() < 1e-9); // 50/10000 * 100 = 0.5%
|
||||
assert!((r.loss_pct - 2.0).abs() < 1e-9); // (1 - 490/500) * 100 = 2.0%
|
||||
|
||||
// Negative drift (receiver duration shorter than expected)
|
||||
let r = DriftResult::compute(10_000, 9_900, 500, 450);
|
||||
assert_eq!(r.drift_ms, -100);
|
||||
assert!((r.drift_pct - (-1.0)).abs() < 1e-9); // -100/10000 * 100 = -1.0%
|
||||
assert!((r.loss_pct - 10.0).abs() < 1e-9); // (1 - 450/500) * 100 = 10.0%
|
||||
|
||||
// Edge: zero frames sent (avoid division by zero)
|
||||
let r = DriftResult::compute(0, 0, 0, 0);
|
||||
assert_eq!(r.drift_ms, 0);
|
||||
assert!((r.drift_pct - 0.0).abs() < f64::EPSILON);
|
||||
assert!((r.loss_pct - 0.0).abs() < f64::EPSILON);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn drift_config_defaults() {
|
||||
let cfg = DriftTestConfig::default();
|
||||
assert_eq!(cfg.duration_secs, 10);
|
||||
assert!((cfg.tone_freq_hz - 440.0).abs() < f32::EPSILON);
|
||||
}
|
||||
}
|
||||
342
crates/wzp-client/src/echo_test.rs
Normal file
342
crates/wzp-client/src/echo_test.rs
Normal file
@@ -0,0 +1,342 @@
|
||||
//! Automated echo quality test.
|
||||
//!
|
||||
//! Sends a known test signal through a relay (echo mode), records the return,
|
||||
//! and analyzes quality over time to detect degradation, jitter buffer drift,
|
||||
//! and packet loss patterns.
|
||||
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
use tracing::info;
|
||||
|
||||
use wzp_proto::MediaTransport;
|
||||
|
||||
use crate::call::{CallConfig, CallDecoder, CallEncoder};
|
||||
|
||||
const FRAME_SAMPLES: usize = 960; // 20ms @ 48kHz
|
||||
const SAMPLE_RATE: u32 = 48_000;
|
||||
|
||||
/// Results from one analysis window.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct WindowResult {
|
||||
/// Window index (0-based).
|
||||
pub index: usize,
|
||||
/// Time offset from start (seconds).
|
||||
pub time_offset_secs: f64,
|
||||
/// Number of frames sent in this window.
|
||||
pub frames_sent: u32,
|
||||
/// Number of frames received (decoded) in this window.
|
||||
pub frames_received: u32,
|
||||
/// Packet loss percentage for this window.
|
||||
pub loss_pct: f32,
|
||||
/// Signal-to-noise ratio (dB) — higher is better.
|
||||
pub snr_db: f32,
|
||||
/// Cross-correlation with original signal (0.0-1.0).
|
||||
pub correlation: f32,
|
||||
/// Max absolute sample value in received audio.
|
||||
pub peak_amplitude: i16,
|
||||
/// Whether the window contains silence (no signal detected).
|
||||
pub is_silent: bool,
|
||||
}
|
||||
|
||||
/// Full echo test results.
|
||||
#[derive(Debug)]
|
||||
pub struct EchoTestResult {
|
||||
pub duration_secs: f64,
|
||||
pub total_frames_sent: u64,
|
||||
pub total_frames_received: u64,
|
||||
pub total_packets_sent: u64,
|
||||
pub total_packets_received: u64,
|
||||
pub overall_loss_pct: f32,
|
||||
pub windows: Vec<WindowResult>,
|
||||
/// Jitter buffer stats at end.
|
||||
pub jitter_depth_final: usize,
|
||||
pub jitter_packets_lost: u64,
|
||||
pub jitter_packets_late: u64,
|
||||
}
|
||||
|
||||
/// Generate a sine wave frame at a given frequency.
|
||||
fn sine_frame(freq_hz: f32, frame_offset: u64) -> Vec<i16> {
|
||||
let start = frame_offset * FRAME_SAMPLES as u64;
|
||||
(0..FRAME_SAMPLES)
|
||||
.map(|i| {
|
||||
let t = (start + i as u64) as f32 / SAMPLE_RATE as f32;
|
||||
(f32::sin(2.0 * std::f32::consts::PI * freq_hz * t) * 16000.0) as i16
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Compute signal-to-noise ratio between original and received PCM.
|
||||
fn compute_snr(original: &[i16], received: &[i16]) -> f32 {
|
||||
if original.is_empty() || received.is_empty() {
|
||||
return 0.0;
|
||||
}
|
||||
let len = original.len().min(received.len());
|
||||
let mut signal_power: f64 = 0.0;
|
||||
let mut noise_power: f64 = 0.0;
|
||||
for i in 0..len {
|
||||
let s = original[i] as f64;
|
||||
let n = (received[i] as f64) - s;
|
||||
signal_power += s * s;
|
||||
noise_power += n * n;
|
||||
}
|
||||
if noise_power < 1.0 {
|
||||
return 99.0; // essentially perfect
|
||||
}
|
||||
(10.0 * (signal_power / noise_power).log10()) as f32
|
||||
}
|
||||
|
||||
/// Compute normalized cross-correlation between two signals.
|
||||
fn cross_correlation(a: &[i16], b: &[i16]) -> f32 {
|
||||
if a.is_empty() || b.is_empty() {
|
||||
return 0.0;
|
||||
}
|
||||
let len = a.len().min(b.len());
|
||||
let mut sum_ab: f64 = 0.0;
|
||||
let mut sum_aa: f64 = 0.0;
|
||||
let mut sum_bb: f64 = 0.0;
|
||||
for i in 0..len {
|
||||
let x = a[i] as f64;
|
||||
let y = b[i] as f64;
|
||||
sum_ab += x * y;
|
||||
sum_aa += x * x;
|
||||
sum_bb += y * y;
|
||||
}
|
||||
let denom = (sum_aa * sum_bb).sqrt();
|
||||
if denom < 1.0 {
|
||||
return 0.0;
|
||||
}
|
||||
(sum_ab / denom) as f32
|
||||
}
|
||||
|
||||
/// Run an automated echo quality test.
|
||||
///
|
||||
/// Sends `duration_secs` of 440Hz tone through the transport (expects echo mode relay),
|
||||
/// records the response, and analyzes quality in `window_secs`-second windows.
|
||||
pub async fn run_echo_test(
|
||||
transport: &(dyn MediaTransport + Send + Sync),
|
||||
duration_secs: u32,
|
||||
window_secs: f64,
|
||||
) -> anyhow::Result<EchoTestResult> {
|
||||
let config = CallConfig::default();
|
||||
let mut encoder = CallEncoder::new(&config);
|
||||
let mut decoder = CallDecoder::new(&config);
|
||||
|
||||
let total_frames = (duration_secs as u64) * 50; // 50 fps at 20ms
|
||||
let frames_per_window = ((window_secs * 50.0) as u64).max(1);
|
||||
|
||||
// Storage for sent and received PCM per window
|
||||
let mut sent_pcm: Vec<i16> = Vec::new();
|
||||
let mut recv_pcm: Vec<i16> = Vec::new();
|
||||
let mut windows: Vec<WindowResult> = Vec::new();
|
||||
let mut pcm_buf = vec![0i16; FRAME_SAMPLES];
|
||||
|
||||
let mut total_packets_sent = 0u64;
|
||||
let mut total_packets_received = 0u64;
|
||||
let mut window_frames_sent = 0u32;
|
||||
let mut window_frames_received = 0u32;
|
||||
let mut window_idx = 0usize;
|
||||
|
||||
let start = Instant::now();
|
||||
let frame_duration = Duration::from_millis(20);
|
||||
|
||||
info!(
|
||||
duration = duration_secs,
|
||||
window = format!("{window_secs}s"),
|
||||
"starting echo quality test"
|
||||
);
|
||||
|
||||
for frame_idx in 0..total_frames {
|
||||
// Generate and send tone
|
||||
let pcm = sine_frame(440.0, frame_idx);
|
||||
sent_pcm.extend_from_slice(&pcm);
|
||||
|
||||
let packets = encoder.encode_frame(&pcm)?;
|
||||
for pkt in &packets {
|
||||
transport.send_media(pkt).await?;
|
||||
total_packets_sent += 1;
|
||||
}
|
||||
window_frames_sent += 1;
|
||||
|
||||
// Try to receive echo (non-blocking-ish: short timeout)
|
||||
let recv_deadline = Instant::now() + Duration::from_millis(5);
|
||||
loop {
|
||||
if Instant::now() >= recv_deadline {
|
||||
break;
|
||||
}
|
||||
match tokio::time::timeout(Duration::from_millis(2), transport.recv_media()).await {
|
||||
Ok(Ok(Some(pkt))) => {
|
||||
total_packets_received += 1;
|
||||
let is_repair = pkt.header.is_repair;
|
||||
decoder.ingest(pkt);
|
||||
if !is_repair {
|
||||
if let Some(n) = decoder.decode_next(&mut pcm_buf) {
|
||||
recv_pcm.extend_from_slice(&pcm_buf[..n]);
|
||||
window_frames_received += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
_ => break,
|
||||
}
|
||||
}
|
||||
|
||||
// Analyze window
|
||||
if (frame_idx + 1) % frames_per_window == 0 || frame_idx == total_frames - 1 {
|
||||
let time_offset = start.elapsed().as_secs_f64();
|
||||
|
||||
// Compare sent vs received for this window
|
||||
let sent_start = (window_idx as u64 * frames_per_window * FRAME_SAMPLES as u64) as usize;
|
||||
let sent_end = sent_start + (window_frames_sent as usize * FRAME_SAMPLES);
|
||||
let sent_window = if sent_end <= sent_pcm.len() {
|
||||
&sent_pcm[sent_start..sent_end]
|
||||
} else {
|
||||
&sent_pcm[sent_start..]
|
||||
};
|
||||
|
||||
let recv_start = recv_pcm.len().saturating_sub(window_frames_received as usize * FRAME_SAMPLES);
|
||||
let recv_window = &recv_pcm[recv_start..];
|
||||
|
||||
let peak = recv_window.iter().map(|s| s.abs()).max().unwrap_or(0);
|
||||
let is_silent = peak < 100;
|
||||
|
||||
let snr = if !is_silent && !sent_window.is_empty() && !recv_window.is_empty() {
|
||||
compute_snr(sent_window, recv_window)
|
||||
} else {
|
||||
0.0
|
||||
};
|
||||
|
||||
let corr = if !is_silent && !sent_window.is_empty() && !recv_window.is_empty() {
|
||||
cross_correlation(sent_window, recv_window)
|
||||
} else {
|
||||
0.0
|
||||
};
|
||||
|
||||
let loss = if window_frames_sent > 0 {
|
||||
(1.0 - window_frames_received as f32 / window_frames_sent as f32) * 100.0
|
||||
} else {
|
||||
0.0
|
||||
};
|
||||
|
||||
let result = WindowResult {
|
||||
index: window_idx,
|
||||
time_offset_secs: time_offset,
|
||||
frames_sent: window_frames_sent,
|
||||
frames_received: window_frames_received,
|
||||
loss_pct: loss.max(0.0),
|
||||
snr_db: snr,
|
||||
correlation: corr,
|
||||
peak_amplitude: peak,
|
||||
is_silent,
|
||||
};
|
||||
|
||||
info!(
|
||||
window = window_idx,
|
||||
time = format!("{:.1}s", time_offset),
|
||||
sent = window_frames_sent,
|
||||
recv = window_frames_received,
|
||||
loss = format!("{:.1}%", result.loss_pct),
|
||||
snr = format!("{:.1}dB", snr),
|
||||
corr = format!("{:.3}", corr),
|
||||
peak = peak,
|
||||
"window analysis"
|
||||
);
|
||||
|
||||
windows.push(result);
|
||||
window_idx += 1;
|
||||
window_frames_sent = 0;
|
||||
window_frames_received = 0;
|
||||
}
|
||||
|
||||
tokio::time::sleep(frame_duration).await;
|
||||
}
|
||||
|
||||
// Drain remaining received packets
|
||||
info!("draining remaining packets...");
|
||||
let drain_deadline = Instant::now() + Duration::from_secs(3);
|
||||
while Instant::now() < drain_deadline {
|
||||
match tokio::time::timeout(Duration::from_millis(100), transport.recv_media()).await {
|
||||
Ok(Ok(Some(pkt))) => {
|
||||
total_packets_received += 1;
|
||||
let is_repair = pkt.header.is_repair;
|
||||
decoder.ingest(pkt);
|
||||
if !is_repair {
|
||||
decoder.decode_next(&mut pcm_buf);
|
||||
}
|
||||
}
|
||||
_ => break,
|
||||
}
|
||||
}
|
||||
|
||||
let jitter_stats = decoder.stats().clone();
|
||||
let total_frames_received = recv_pcm.len() as u64 / FRAME_SAMPLES as u64;
|
||||
let overall_loss = if total_frames > 0 {
|
||||
(1.0 - total_frames_received as f32 / total_frames as f32) * 100.0
|
||||
} else {
|
||||
0.0
|
||||
};
|
||||
|
||||
Ok(EchoTestResult {
|
||||
duration_secs: start.elapsed().as_secs_f64(),
|
||||
total_frames_sent: total_frames,
|
||||
total_frames_received,
|
||||
total_packets_sent,
|
||||
total_packets_received,
|
||||
overall_loss_pct: overall_loss.max(0.0),
|
||||
windows,
|
||||
jitter_depth_final: jitter_stats.current_depth,
|
||||
jitter_packets_lost: jitter_stats.packets_lost,
|
||||
jitter_packets_late: jitter_stats.packets_late,
|
||||
})
|
||||
}
|
||||
|
||||
/// Print a summary report of the echo test.
|
||||
pub fn print_report(result: &EchoTestResult) {
|
||||
println!();
|
||||
println!("=== Echo Quality Test Report ===");
|
||||
println!();
|
||||
println!("Duration: {:.1}s", result.duration_secs);
|
||||
println!("Frames sent: {}", result.total_frames_sent);
|
||||
println!("Frames received: {}", result.total_frames_received);
|
||||
println!("Packets sent: {}", result.total_packets_sent);
|
||||
println!("Packets received: {}", result.total_packets_received);
|
||||
println!("Overall loss: {:.1}%", result.overall_loss_pct);
|
||||
println!("Jitter buf depth: {}", result.jitter_depth_final);
|
||||
println!("Jitter buf lost: {}", result.jitter_packets_lost);
|
||||
println!("Jitter buf late: {}", result.jitter_packets_late);
|
||||
println!();
|
||||
println!("┌───────┬─────────┬──────┬──────┬─────────┬───────┬───────┐");
|
||||
println!("│ Win │ Time │ Sent │ Recv │ Loss │ SNR │ Corr │");
|
||||
println!("├───────┼─────────┼──────┼──────┼─────────┼───────┼───────┤");
|
||||
for w in &result.windows {
|
||||
let status = if w.is_silent { " !" } else { " " };
|
||||
println!(
|
||||
"│ {:>3}{} │ {:>5.1}s │ {:>4} │ {:>4} │ {:>5.1}% │ {:>5.1} │ {:.3} │",
|
||||
w.index, status, w.time_offset_secs, w.frames_sent, w.frames_received,
|
||||
w.loss_pct, w.snr_db, w.correlation
|
||||
);
|
||||
}
|
||||
println!("└───────┴─────────┴──────┴──────┴─────────┴───────┴───────┘");
|
||||
|
||||
// Detect degradation trend
|
||||
if result.windows.len() >= 4 {
|
||||
let first_half: Vec<_> = result.windows[..result.windows.len() / 2].to_vec();
|
||||
let second_half: Vec<_> = result.windows[result.windows.len() / 2..].to_vec();
|
||||
|
||||
let avg_loss_first = first_half.iter().map(|w| w.loss_pct).sum::<f32>() / first_half.len() as f32;
|
||||
let avg_loss_second = second_half.iter().map(|w| w.loss_pct).sum::<f32>() / second_half.len() as f32;
|
||||
let avg_corr_first = first_half.iter().map(|w| w.correlation).sum::<f32>() / first_half.len() as f32;
|
||||
let avg_corr_second = second_half.iter().map(|w| w.correlation).sum::<f32>() / second_half.len() as f32;
|
||||
|
||||
println!();
|
||||
if avg_loss_second > avg_loss_first + 5.0 {
|
||||
println!("WARNING: Quality degradation detected!");
|
||||
println!(" Loss increased from {:.1}% to {:.1}% over time", avg_loss_first, avg_loss_second);
|
||||
}
|
||||
if avg_corr_second < avg_corr_first - 0.1 {
|
||||
println!("WARNING: Signal correlation dropped from {:.3} to {:.3}", avg_corr_first, avg_corr_second);
|
||||
}
|
||||
if avg_loss_second <= avg_loss_first + 5.0 && avg_corr_second >= avg_corr_first - 0.1 {
|
||||
println!("Quality is STABLE over the test duration.");
|
||||
}
|
||||
}
|
||||
println!();
|
||||
}
|
||||
162
crates/wzp-client/src/featherchat.rs
Normal file
162
crates/wzp-client/src/featherchat.rs
Normal file
@@ -0,0 +1,162 @@
|
||||
//! featherChat signaling bridge.
|
||||
//!
|
||||
//! Sends WZP call signaling (Offer/Answer/Hangup) through featherChat's
|
||||
//! E2E encrypted WebSocket channel as `WireMessage::CallSignal`.
|
||||
//!
|
||||
//! Flow:
|
||||
//! 1. Client connects to featherChat WS with bearer token
|
||||
//! 2. Sends CallOffer as CallSignal(signal_type=Offer, payload=JSON SignalMessage)
|
||||
//! 3. Receives CallAnswer as CallSignal(signal_type=Answer, payload=JSON SignalMessage)
|
||||
//! 4. Extracts relay address from the answer
|
||||
//! 5. Connects QUIC to relay for media
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
use wzp_proto::packet::SignalMessage;
|
||||
|
||||
/// featherChat CallSignal types (mirrors warzone-protocol::message::CallSignalType).
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
pub enum CallSignalType {
|
||||
Offer,
|
||||
Answer,
|
||||
IceCandidate,
|
||||
Hangup,
|
||||
Reject,
|
||||
Ringing,
|
||||
Busy,
|
||||
Hold,
|
||||
Unhold,
|
||||
Mute,
|
||||
Unmute,
|
||||
Transfer,
|
||||
}
|
||||
|
||||
/// A CallSignal as sent through featherChat's WireMessage.
|
||||
/// This is what goes in the `payload` field of `WireMessage::CallSignal`.
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
pub struct WzpCallPayload {
|
||||
/// The WZP SignalMessage (CallOffer, CallAnswer, etc.) serialized as JSON.
|
||||
pub signal: SignalMessage,
|
||||
/// The relay address to connect to for media (host:port).
|
||||
pub relay_addr: Option<String>,
|
||||
/// Room name on the relay.
|
||||
pub room: Option<String>,
|
||||
}
|
||||
|
||||
/// Parameters for initiating a call through featherChat.
|
||||
pub struct CallInitParams {
|
||||
/// featherChat server URL (e.g., "wss://chat.example.com/ws").
|
||||
pub server_url: String,
|
||||
/// Bearer token for authentication.
|
||||
pub token: String,
|
||||
/// Target peer fingerprint (who to call).
|
||||
pub target_fingerprint: String,
|
||||
/// Relay address for media transport.
|
||||
pub relay_addr: String,
|
||||
/// Room name on the relay.
|
||||
pub room: String,
|
||||
/// Our identity seed for crypto.
|
||||
pub seed: [u8; 32],
|
||||
}
|
||||
|
||||
/// Result of a successful call setup.
|
||||
pub struct CallSetupResult {
|
||||
/// Relay address to connect to.
|
||||
pub relay_addr: String,
|
||||
/// Room name.
|
||||
pub room: String,
|
||||
/// The peer's CallAnswer signal (contains ephemeral key, etc.)
|
||||
pub answer: SignalMessage,
|
||||
}
|
||||
|
||||
/// Serialize a WZP SignalMessage into a featherChat CallSignal payload string.
|
||||
pub fn encode_call_payload(
|
||||
signal: &SignalMessage,
|
||||
relay_addr: Option<&str>,
|
||||
room: Option<&str>,
|
||||
) -> String {
|
||||
let payload = WzpCallPayload {
|
||||
signal: signal.clone(),
|
||||
relay_addr: relay_addr.map(|s| s.to_string()),
|
||||
room: room.map(|s| s.to_string()),
|
||||
};
|
||||
serde_json::to_string(&payload).unwrap_or_default()
|
||||
}
|
||||
|
||||
/// Deserialize a featherChat CallSignal payload back to WZP types.
|
||||
pub fn decode_call_payload(payload: &str) -> Result<WzpCallPayload, String> {
|
||||
serde_json::from_str(payload).map_err(|e| format!("invalid call payload: {e}"))
|
||||
}
|
||||
|
||||
/// Map WZP SignalMessage type to featherChat CallSignalType.
|
||||
pub fn signal_to_call_type(signal: &SignalMessage) -> CallSignalType {
|
||||
match signal {
|
||||
SignalMessage::CallOffer { .. } => CallSignalType::Offer,
|
||||
SignalMessage::CallAnswer { .. } => CallSignalType::Answer,
|
||||
SignalMessage::IceCandidate { .. } => CallSignalType::IceCandidate,
|
||||
SignalMessage::Hangup { .. } => CallSignalType::Hangup,
|
||||
SignalMessage::Rekey { .. } => CallSignalType::Offer, // reuse
|
||||
SignalMessage::QualityUpdate { .. } => CallSignalType::Offer, // reuse
|
||||
SignalMessage::Ping { .. } | SignalMessage::Pong { .. } => CallSignalType::Offer,
|
||||
SignalMessage::AuthToken { .. } => CallSignalType::Offer,
|
||||
SignalMessage::Hold => CallSignalType::Hold,
|
||||
SignalMessage::Unhold => CallSignalType::Unhold,
|
||||
SignalMessage::Mute => CallSignalType::Mute,
|
||||
SignalMessage::Unmute => CallSignalType::Unmute,
|
||||
SignalMessage::Transfer { .. } => CallSignalType::Transfer,
|
||||
SignalMessage::TransferAck => CallSignalType::Offer, // reuse
|
||||
SignalMessage::PresenceUpdate { .. } => CallSignalType::Offer, // reuse
|
||||
SignalMessage::RouteQuery { .. } => CallSignalType::Offer, // reuse
|
||||
SignalMessage::RouteResponse { .. } => CallSignalType::Offer, // reuse
|
||||
SignalMessage::SessionForward { .. } => CallSignalType::Offer, // reuse
|
||||
SignalMessage::SessionForwardAck { .. } => CallSignalType::Offer, // reuse
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn payload_roundtrip() {
|
||||
let signal = SignalMessage::CallOffer {
|
||||
identity_pub: [1u8; 32],
|
||||
ephemeral_pub: [2u8; 32],
|
||||
signature: vec![3u8; 64],
|
||||
supported_profiles: vec![QualityProfile::GOOD],
|
||||
};
|
||||
|
||||
let encoded = encode_call_payload(&signal, Some("relay.example.com:4433"), Some("myroom"));
|
||||
let decoded = decode_call_payload(&encoded).unwrap();
|
||||
|
||||
assert_eq!(decoded.relay_addr.unwrap(), "relay.example.com:4433");
|
||||
assert_eq!(decoded.room.unwrap(), "myroom");
|
||||
assert!(matches!(decoded.signal, SignalMessage::CallOffer { .. }));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn signal_type_mapping() {
|
||||
let offer = SignalMessage::CallOffer {
|
||||
identity_pub: [0; 32],
|
||||
ephemeral_pub: [0; 32],
|
||||
signature: vec![],
|
||||
supported_profiles: vec![],
|
||||
};
|
||||
assert!(matches!(signal_to_call_type(&offer), CallSignalType::Offer));
|
||||
|
||||
let hangup = SignalMessage::Hangup {
|
||||
reason: wzp_proto::HangupReason::Normal,
|
||||
};
|
||||
assert!(matches!(signal_to_call_type(&hangup), CallSignalType::Hangup));
|
||||
|
||||
assert!(matches!(signal_to_call_type(&SignalMessage::Hold), CallSignalType::Hold));
|
||||
assert!(matches!(signal_to_call_type(&SignalMessage::Unhold), CallSignalType::Unhold));
|
||||
assert!(matches!(signal_to_call_type(&SignalMessage::Mute), CallSignalType::Mute));
|
||||
assert!(matches!(signal_to_call_type(&SignalMessage::Unmute), CallSignalType::Unmute));
|
||||
|
||||
let transfer = SignalMessage::Transfer {
|
||||
target_fingerprint: "abc".to_string(),
|
||||
relay_addr: None,
|
||||
};
|
||||
assert!(matches!(signal_to_call_type(&transfer), CallSignalType::Transfer));
|
||||
}
|
||||
}
|
||||
@@ -6,11 +6,18 @@
|
||||
//!
|
||||
//! Targets: Android (JNI), Windows desktop, macOS/Linux (testing)
|
||||
|
||||
#[cfg(feature = "audio")]
|
||||
pub mod audio_io;
|
||||
pub mod bench;
|
||||
pub mod call;
|
||||
pub mod drift_test;
|
||||
pub mod echo_test;
|
||||
pub mod featherchat;
|
||||
pub mod handshake;
|
||||
pub mod metrics;
|
||||
pub mod sweep;
|
||||
|
||||
#[cfg(feature = "audio")]
|
||||
pub use audio_io::{AudioCapture, AudioPlayback};
|
||||
pub use call::{CallConfig, CallDecoder, CallEncoder};
|
||||
pub use handshake::perform_handshake;
|
||||
|
||||
186
crates/wzp-client/src/metrics.rs
Normal file
186
crates/wzp-client/src/metrics.rs
Normal file
@@ -0,0 +1,186 @@
|
||||
//! Client-side JSONL metrics export.
|
||||
//!
|
||||
//! When `--metrics-file <path>` is passed, the client writes one JSON object
|
||||
//! per second to the specified file. Each line is a self-contained JSON object
|
||||
//! (JSONL format) containing jitter buffer stats, loss, and quality profile.
|
||||
|
||||
use std::fs::{File, OpenOptions};
|
||||
use std::io::Write;
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
use serde::Serialize;
|
||||
|
||||
use wzp_proto::jitter::JitterStats;
|
||||
|
||||
/// A single metrics snapshot written as one JSONL line.
|
||||
#[derive(Serialize)]
|
||||
pub struct ClientMetricsSnapshot {
|
||||
pub ts: String,
|
||||
pub buffer_depth: usize,
|
||||
pub underruns: u64,
|
||||
pub overruns: u64,
|
||||
pub loss_pct: f64,
|
||||
pub rtt_ms: u64,
|
||||
pub jitter_ms: u64,
|
||||
pub frames_sent: u64,
|
||||
pub frames_received: u64,
|
||||
pub quality_profile: String,
|
||||
}
|
||||
|
||||
/// Periodic JSONL writer that respects a configurable interval.
|
||||
pub struct MetricsWriter {
|
||||
file: File,
|
||||
interval: Duration,
|
||||
last_write: Instant,
|
||||
}
|
||||
|
||||
impl MetricsWriter {
|
||||
/// Create a new `MetricsWriter` that appends JSONL to the given path.
|
||||
///
|
||||
/// The file is created (or truncated) immediately.
|
||||
pub fn new(path: &str, interval_secs: u64) -> Result<Self, anyhow::Error> {
|
||||
let file = OpenOptions::new()
|
||||
.create(true)
|
||||
.write(true)
|
||||
.truncate(true)
|
||||
.open(path)?;
|
||||
Ok(Self {
|
||||
file,
|
||||
interval: Duration::from_secs(interval_secs),
|
||||
// Set last_write far in the past so the first call writes immediately.
|
||||
last_write: Instant::now() - Duration::from_secs(interval_secs + 1),
|
||||
})
|
||||
}
|
||||
|
||||
/// Write a JSONL line if the interval has elapsed since the last write.
|
||||
///
|
||||
/// Returns `Ok(true)` when a line was written, `Ok(false)` when skipped.
|
||||
pub fn maybe_write(&mut self, snapshot: &ClientMetricsSnapshot) -> Result<bool, anyhow::Error> {
|
||||
let now = Instant::now();
|
||||
if now.duration_since(self.last_write) >= self.interval {
|
||||
let line = serde_json::to_string(snapshot)?;
|
||||
writeln!(self.file, "{}", line)?;
|
||||
self.file.flush()?;
|
||||
self.last_write = now;
|
||||
Ok(true)
|
||||
} else {
|
||||
Ok(false)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Build a `ClientMetricsSnapshot` from jitter buffer stats and a quality profile name.
|
||||
///
|
||||
/// Fields not available from `JitterStats` alone (rtt_ms, jitter_ms, frames_sent)
|
||||
/// are set to zero — the caller can override them if the data is available.
|
||||
pub fn snapshot_from_stats(stats: &JitterStats, profile: &str) -> ClientMetricsSnapshot {
|
||||
let loss_pct = if stats.packets_received > 0 {
|
||||
(stats.packets_lost as f64 / stats.packets_received as f64) * 100.0
|
||||
} else {
|
||||
0.0
|
||||
};
|
||||
ClientMetricsSnapshot {
|
||||
ts: chrono::Utc::now().to_rfc3339_opts(chrono::SecondsFormat::Secs, true),
|
||||
buffer_depth: stats.current_depth,
|
||||
underruns: stats.underruns,
|
||||
overruns: stats.overruns,
|
||||
loss_pct,
|
||||
rtt_ms: 0,
|
||||
jitter_ms: 0,
|
||||
frames_sent: 0,
|
||||
frames_received: stats.total_decoded,
|
||||
quality_profile: profile.to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
fn make_test_stats() -> JitterStats {
|
||||
JitterStats {
|
||||
packets_received: 100,
|
||||
packets_played: 95,
|
||||
packets_lost: 5,
|
||||
packets_late: 2,
|
||||
packets_duplicate: 0,
|
||||
current_depth: 8,
|
||||
total_decoded: 93,
|
||||
underruns: 1,
|
||||
overruns: 0,
|
||||
max_depth_seen: 12,
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn snapshot_serializes_to_json() {
|
||||
let stats = make_test_stats();
|
||||
let snap = snapshot_from_stats(&stats, "GOOD");
|
||||
let json = serde_json::to_string(&snap).unwrap();
|
||||
|
||||
// Verify expected fields are present in the JSON string.
|
||||
assert!(json.contains("\"ts\""));
|
||||
assert!(json.contains("\"buffer_depth\":8"));
|
||||
assert!(json.contains("\"underruns\":1"));
|
||||
assert!(json.contains("\"overruns\":0"));
|
||||
assert!(json.contains("\"loss_pct\":5."));
|
||||
assert!(json.contains("\"rtt_ms\":0"));
|
||||
assert!(json.contains("\"jitter_ms\":0"));
|
||||
assert!(json.contains("\"frames_sent\":0"));
|
||||
assert!(json.contains("\"frames_received\":93"));
|
||||
assert!(json.contains("\"quality_profile\":\"GOOD\""));
|
||||
|
||||
// Verify it round-trips as valid JSON.
|
||||
let value: serde_json::Value = serde_json::from_str(&json).unwrap();
|
||||
assert_eq!(value["buffer_depth"], 8);
|
||||
assert_eq!(value["quality_profile"], "GOOD");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn metrics_writer_creates_file() {
|
||||
let dir = std::env::temp_dir();
|
||||
let path = dir.join("wzp_metrics_test.jsonl");
|
||||
let path_str = path.to_str().unwrap();
|
||||
|
||||
let mut writer = MetricsWriter::new(path_str, 1).unwrap();
|
||||
let stats = make_test_stats();
|
||||
let snap = snapshot_from_stats(&stats, "DEGRADED");
|
||||
|
||||
let wrote = writer.maybe_write(&snap).unwrap();
|
||||
assert!(wrote, "first write should succeed immediately");
|
||||
|
||||
// Read the file back and verify it contains valid JSONL.
|
||||
let contents = std::fs::read_to_string(&path).unwrap();
|
||||
let lines: Vec<&str> = contents.lines().collect();
|
||||
assert_eq!(lines.len(), 1, "should have exactly one JSONL line");
|
||||
|
||||
let value: serde_json::Value = serde_json::from_str(lines[0]).unwrap();
|
||||
assert_eq!(value["quality_profile"], "DEGRADED");
|
||||
assert_eq!(value["buffer_depth"], 8);
|
||||
|
||||
// Clean up.
|
||||
let _ = std::fs::remove_file(&path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn metrics_writer_respects_interval() {
|
||||
let dir = std::env::temp_dir();
|
||||
let path = dir.join("wzp_metrics_interval_test.jsonl");
|
||||
let path_str = path.to_str().unwrap();
|
||||
|
||||
let mut writer = MetricsWriter::new(path_str, 60).unwrap();
|
||||
let stats = make_test_stats();
|
||||
let snap = snapshot_from_stats(&stats, "GOOD");
|
||||
|
||||
// First write succeeds (last_write is set far in the past).
|
||||
let first = writer.maybe_write(&snap).unwrap();
|
||||
assert!(first, "first write should succeed");
|
||||
|
||||
// Immediate second write should be skipped (60s interval).
|
||||
let second = writer.maybe_write(&snap).unwrap();
|
||||
assert!(!second, "second write should be skipped — interval not elapsed");
|
||||
|
||||
// Clean up.
|
||||
let _ = std::fs::remove_file(&path);
|
||||
}
|
||||
}
|
||||
254
crates/wzp-client/src/sweep.rs
Normal file
254
crates/wzp-client/src/sweep.rs
Normal file
@@ -0,0 +1,254 @@
|
||||
//! Parameter sweep tool for jitter buffer configurations.
|
||||
//!
|
||||
//! Tests different (target_depth, max_depth) combinations in a local
|
||||
//! encoder-to-decoder pipeline (no network) and reports frame loss,
|
||||
//! estimated latency, underruns, and overruns for each configuration.
|
||||
|
||||
use crate::call::{CallConfig, CallDecoder, CallEncoder};
|
||||
use wzp_proto::QualityProfile;
|
||||
|
||||
const FRAME_SAMPLES: usize = 960; // 20ms @ 48kHz
|
||||
const SAMPLE_RATE: u32 = 48_000;
|
||||
const FRAME_DURATION_MS: u32 = 20;
|
||||
|
||||
/// Configuration for a parameter sweep.
|
||||
pub struct SweepConfig {
|
||||
/// Target jitter buffer depths to test (in packets).
|
||||
pub target_depths: Vec<usize>,
|
||||
/// Maximum jitter buffer depths to test (in packets).
|
||||
pub max_depths: Vec<usize>,
|
||||
/// Duration in seconds to run each configuration.
|
||||
pub test_duration_secs: u32,
|
||||
/// Frequency of the test tone in Hz.
|
||||
pub tone_freq_hz: f32,
|
||||
}
|
||||
|
||||
impl Default for SweepConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
target_depths: vec![10, 25, 50, 100, 200],
|
||||
max_depths: vec![50, 100, 250, 500],
|
||||
test_duration_secs: 2,
|
||||
tone_freq_hz: 440.0,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Result from one (target_depth, max_depth) configuration.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct SweepResult {
|
||||
/// Jitter buffer target depth used.
|
||||
pub target_depth: usize,
|
||||
/// Jitter buffer max depth used.
|
||||
pub max_depth: usize,
|
||||
/// Total frames sent into the encoder.
|
||||
pub frames_sent: u64,
|
||||
/// Total frames successfully decoded.
|
||||
pub frames_received: u64,
|
||||
/// Frame loss percentage.
|
||||
pub loss_pct: f64,
|
||||
/// Estimated latency in ms (target_depth * frame_duration).
|
||||
pub avg_latency_ms: f64,
|
||||
/// Number of jitter buffer underruns.
|
||||
pub underruns: u64,
|
||||
/// Number of jitter buffer overruns (packets dropped due to full buffer).
|
||||
pub overruns: u64,
|
||||
}
|
||||
|
||||
/// Generate a sine wave frame at the given frequency and frame offset.
|
||||
fn sine_frame(freq_hz: f32, frame_offset: u64) -> Vec<i16> {
|
||||
let start = frame_offset * FRAME_SAMPLES as u64;
|
||||
(0..FRAME_SAMPLES)
|
||||
.map(|i| {
|
||||
let t = (start + i as u64) as f32 / SAMPLE_RATE as f32;
|
||||
(f32::sin(2.0 * std::f32::consts::PI * freq_hz * t) * 16000.0) as i16
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Run a local parameter sweep (no network).
|
||||
///
|
||||
/// For each (target_depth, max_depth) combination, creates an encoder and
|
||||
/// decoder, pushes frames through the pipeline, and collects statistics.
|
||||
/// Combinations where `target_depth > max_depth` are skipped.
|
||||
pub fn run_local_sweep(config: &SweepConfig) -> Vec<SweepResult> {
|
||||
let frames_per_config =
|
||||
(config.test_duration_secs as u64) * (1000 / FRAME_DURATION_MS as u64);
|
||||
|
||||
let mut results = Vec::new();
|
||||
|
||||
for &target in &config.target_depths {
|
||||
for &max in &config.max_depths {
|
||||
// Skip invalid combinations where target exceeds max.
|
||||
if target > max {
|
||||
continue;
|
||||
}
|
||||
|
||||
let call_cfg = CallConfig {
|
||||
profile: QualityProfile::GOOD,
|
||||
jitter_target: target,
|
||||
jitter_max: max,
|
||||
jitter_min: target.min(3).max(1),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let mut encoder = CallEncoder::new(&call_cfg);
|
||||
let mut decoder = CallDecoder::new(&call_cfg);
|
||||
|
||||
let mut pcm_out = vec![0i16; FRAME_SAMPLES];
|
||||
let mut frames_decoded = 0u64;
|
||||
|
||||
for frame_idx in 0..frames_per_config {
|
||||
// Encode a tone frame.
|
||||
let pcm_in = sine_frame(config.tone_freq_hz, frame_idx);
|
||||
let packets = match encoder.encode_frame(&pcm_in) {
|
||||
Ok(p) => p,
|
||||
Err(_) => continue,
|
||||
};
|
||||
|
||||
// Feed all packets (source + repair) into the decoder.
|
||||
for pkt in packets {
|
||||
decoder.ingest(pkt);
|
||||
}
|
||||
|
||||
// Attempt to decode one frame.
|
||||
if decoder.decode_next(&mut pcm_out).is_some() {
|
||||
frames_decoded += 1;
|
||||
}
|
||||
}
|
||||
|
||||
// Drain: keep decoding until the jitter buffer is empty.
|
||||
for _ in 0..max {
|
||||
if decoder.decode_next(&mut pcm_out).is_some() {
|
||||
frames_decoded += 1;
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
let stats = decoder.stats().clone();
|
||||
|
||||
let loss_pct = if frames_per_config > 0 {
|
||||
(1.0 - frames_decoded as f64 / frames_per_config as f64) * 100.0
|
||||
} else {
|
||||
0.0
|
||||
};
|
||||
|
||||
results.push(SweepResult {
|
||||
target_depth: target,
|
||||
max_depth: max,
|
||||
frames_sent: frames_per_config,
|
||||
frames_received: frames_decoded,
|
||||
loss_pct: loss_pct.max(0.0),
|
||||
avg_latency_ms: target as f64 * FRAME_DURATION_MS as f64,
|
||||
underruns: stats.underruns,
|
||||
overruns: stats.overruns,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
results
|
||||
}
|
||||
|
||||
/// Print a formatted ASCII table of sweep results.
|
||||
pub fn print_sweep_table(results: &[SweepResult]) {
|
||||
println!();
|
||||
println!("=== Jitter Buffer Parameter Sweep ===");
|
||||
println!();
|
||||
println!(
|
||||
" {:>6} | {:>4} | {:>6} | {:>6} | {:>6} | {:>10} | {:>9} | {:>8}",
|
||||
"target", "max", "sent", "recv", "loss%", "latency_ms", "underruns", "overruns"
|
||||
);
|
||||
println!(
|
||||
" {:-<6}-+-{:-<4}-+-{:-<6}-+-{:-<6}-+-{:-<6}-+-{:-<10}-+-{:-<9}-+-{:-<8}",
|
||||
"", "", "", "", "", "", "", ""
|
||||
);
|
||||
for r in results {
|
||||
println!(
|
||||
" {:>6} | {:>4} | {:>6} | {:>6} | {:>5.1}% | {:>10.0} | {:>9} | {:>8}",
|
||||
r.target_depth,
|
||||
r.max_depth,
|
||||
r.frames_sent,
|
||||
r.frames_received,
|
||||
r.loss_pct,
|
||||
r.avg_latency_ms,
|
||||
r.underruns,
|
||||
r.overruns,
|
||||
);
|
||||
}
|
||||
println!();
|
||||
}
|
||||
|
||||
/// Run a default sweep and print the results.
|
||||
///
|
||||
/// This is the entry point for the `--sweep` CLI flag.
|
||||
pub fn run_and_print_default_sweep() {
|
||||
let config = SweepConfig::default();
|
||||
let results = run_local_sweep(&config);
|
||||
print_sweep_table(&results);
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn sweep_config_default() {
|
||||
let cfg = SweepConfig::default();
|
||||
assert_eq!(cfg.target_depths.len(), 5);
|
||||
assert_eq!(cfg.max_depths.len(), 4);
|
||||
assert!(cfg.test_duration_secs > 0);
|
||||
assert!(cfg.tone_freq_hz > 0.0);
|
||||
// All default targets should be positive.
|
||||
assert!(cfg.target_depths.iter().all(|&d| d > 0));
|
||||
assert!(cfg.max_depths.iter().all(|&d| d > 0));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn local_sweep_runs() {
|
||||
let cfg = SweepConfig {
|
||||
target_depths: vec![3, 10],
|
||||
max_depths: vec![50, 100],
|
||||
test_duration_secs: 1,
|
||||
tone_freq_hz: 440.0,
|
||||
};
|
||||
let results = run_local_sweep(&cfg);
|
||||
// 2 targets x 2 maxes = 4 configs (all valid since targets < maxes).
|
||||
assert_eq!(results.len(), 4);
|
||||
for r in &results {
|
||||
assert!(r.frames_sent > 0, "frames_sent should be > 0");
|
||||
assert!(r.frames_received > 0, "frames_received should be > 0");
|
||||
assert!(r.avg_latency_ms > 0.0, "latency should be > 0");
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn sweep_table_formats() {
|
||||
// Verify print_sweep_table doesn't panic with various inputs.
|
||||
print_sweep_table(&[]);
|
||||
|
||||
let results = vec![
|
||||
SweepResult {
|
||||
target_depth: 10,
|
||||
max_depth: 50,
|
||||
frames_sent: 100,
|
||||
frames_received: 98,
|
||||
loss_pct: 2.0,
|
||||
avg_latency_ms: 200.0,
|
||||
underruns: 2,
|
||||
overruns: 0,
|
||||
},
|
||||
SweepResult {
|
||||
target_depth: 25,
|
||||
max_depth: 100,
|
||||
frames_sent: 100,
|
||||
frames_received: 100,
|
||||
loss_pct: 0.0,
|
||||
avg_latency_ms: 500.0,
|
||||
underruns: 0,
|
||||
overruns: 0,
|
||||
},
|
||||
];
|
||||
print_sweep_table(&results);
|
||||
}
|
||||
}
|
||||
190
crates/wzp-client/tests/long_session.rs
Normal file
190
crates/wzp-client/tests/long_session.rs
Normal file
@@ -0,0 +1,190 @@
|
||||
//! WZP-P2-T1-S5: 60-second long-session regression tests.
|
||||
//!
|
||||
//! Verifies that the full codec + FEC + jitter buffer pipeline does not drift
|
||||
//! or degrade over a sustained 60-second (3000-frame) session. Runs entirely
|
||||
//! in-process with no network — packets flow directly from encoder to decoder.
|
||||
|
||||
use wzp_client::call::{CallConfig, CallDecoder, CallEncoder};
|
||||
use wzp_proto::QualityProfile;
|
||||
|
||||
const FRAME_SAMPLES: usize = 960; // 20ms @ 48kHz
|
||||
const SAMPLE_RATE: f32 = 48_000.0;
|
||||
const TOTAL_FRAMES: u64 = 3_000; // 60 seconds at 50 fps
|
||||
|
||||
/// Build a CallConfig tuned for direct-loopback testing (no network).
|
||||
///
|
||||
/// Disables silence suppression and noise suppression (which would mangle
|
||||
/// or squelch the synthetic tone), uses a fixed (non-adaptive) jitter buffer
|
||||
/// with min_depth=1 so that packets are played out as soon as they arrive.
|
||||
fn test_config() -> CallConfig {
|
||||
CallConfig {
|
||||
profile: QualityProfile::GOOD,
|
||||
jitter_target: 4,
|
||||
jitter_max: 500,
|
||||
jitter_min: 1,
|
||||
suppression_enabled: false,
|
||||
noise_suppression: false,
|
||||
adaptive_jitter: false,
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
|
||||
/// Generate a 20ms frame of 440 Hz sine tone.
|
||||
fn sine_frame(frame_offset: u64) -> Vec<i16> {
|
||||
let start_sample = frame_offset * FRAME_SAMPLES as u64;
|
||||
(0..FRAME_SAMPLES)
|
||||
.map(|i| {
|
||||
let t = (start_sample + i as u64) as f32 / SAMPLE_RATE;
|
||||
(f32::sin(2.0 * std::f32::consts::PI * 440.0 * t) * 16000.0) as i16
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// 60-second session with a perfect (lossless, in-order) channel.
|
||||
///
|
||||
/// Encodes 3000 frames of 440 Hz tone, feeds every packet directly into the
|
||||
/// decoder, and verifies:
|
||||
/// - frame loss < 5% (>2850 of 3000 source frames decoded or PLC'd)
|
||||
/// - no panics
|
||||
///
|
||||
/// Note: the encoder shares a single sequence counter between source and
|
||||
/// repair packets. Since repair packets are NOT pushed into the jitter
|
||||
/// buffer, each FEC block creates a gap in the playout sequence. GOOD
|
||||
/// profile (5 frames/block, fec_ratio=0.2) generates 1 repair per block,
|
||||
/// so every 6th seq number is a "phantom" Missing in the jitter buffer.
|
||||
/// The jitter buffer correctly fills these gaps with PLC. We call
|
||||
/// `decode_next` once per encode tick; the buffer stays shallow because
|
||||
/// PLC frames consume the phantom seqs at the same rate they're created.
|
||||
#[test]
|
||||
fn long_session_no_drift() {
|
||||
let config = test_config();
|
||||
let mut encoder = CallEncoder::new(&config);
|
||||
let mut decoder = CallDecoder::new(&config);
|
||||
|
||||
let mut frames_decoded = 0u64;
|
||||
let mut pcm_buf = vec![0i16; FRAME_SAMPLES];
|
||||
|
||||
for i in 0..TOTAL_FRAMES {
|
||||
let pcm = sine_frame(i);
|
||||
let packets = encoder.encode_frame(&pcm).expect("encode should not fail");
|
||||
|
||||
for pkt in packets {
|
||||
decoder.ingest(pkt);
|
||||
}
|
||||
|
||||
// Decode one frame per tick (mirrors real-time 50 fps cadence).
|
||||
if decoder.decode_next(&mut pcm_buf).is_some() {
|
||||
frames_decoded += 1;
|
||||
}
|
||||
}
|
||||
|
||||
let stats = decoder.stats();
|
||||
|
||||
println!(
|
||||
"long_session_no_drift: decoded={frames_decoded}/{TOTAL_FRAMES}, \
|
||||
underruns={}, overruns={}, depth={}, max_depth={}, late={}, lost={}",
|
||||
stats.underruns, stats.overruns, stats.current_depth, stats.max_depth_seen,
|
||||
stats.packets_late, stats.packets_lost,
|
||||
);
|
||||
|
||||
// With 1 decode per tick over 3000 ticks, we expect ~3000 decoded frames
|
||||
// (some via PLC for repair-seq gaps). Allow up to 5% gap.
|
||||
assert!(
|
||||
frames_decoded > 2850,
|
||||
"frame loss too high: decoded {frames_decoded}/3000 (need >2850 = <5% loss)"
|
||||
);
|
||||
}
|
||||
|
||||
/// 60-second session with simulated 5% packet loss and reordering.
|
||||
///
|
||||
/// Every 20th source packet is dropped; pairs of adjacent packets are swapped
|
||||
/// every 7 frames. Verifies that FEC + jitter buffer recover gracefully:
|
||||
/// - frame loss < 10% (FEC should recover some of the 5% artificial loss)
|
||||
/// - no panics
|
||||
#[test]
|
||||
fn long_session_with_simulated_loss() {
|
||||
let config = test_config();
|
||||
let mut encoder = CallEncoder::new(&config);
|
||||
let mut decoder = CallDecoder::new(&config);
|
||||
|
||||
let mut frames_decoded = 0u64;
|
||||
let mut pcm_buf = vec![0i16; FRAME_SAMPLES];
|
||||
|
||||
for i in 0..TOTAL_FRAMES {
|
||||
let pcm = sine_frame(i);
|
||||
let packets = encoder.encode_frame(&pcm).expect("encode should not fail");
|
||||
|
||||
let mut batch: Vec<_> = packets.into_iter().collect();
|
||||
|
||||
// Simulate reordering: swap first two packets in the batch every 7 frames.
|
||||
if i % 7 == 0 && batch.len() >= 2 {
|
||||
batch.swap(0, 1);
|
||||
}
|
||||
|
||||
for (j, pkt) in batch.into_iter().enumerate() {
|
||||
// Drop every 20th *source* (non-repair) packet to simulate ~5% loss.
|
||||
if !pkt.header.is_repair && i % 20 == 0 && j == 0 {
|
||||
continue; // drop this packet
|
||||
}
|
||||
decoder.ingest(pkt);
|
||||
}
|
||||
|
||||
if decoder.decode_next(&mut pcm_buf).is_some() {
|
||||
frames_decoded += 1;
|
||||
}
|
||||
}
|
||||
|
||||
let stats = decoder.stats();
|
||||
|
||||
println!(
|
||||
"long_session_with_simulated_loss: decoded={frames_decoded}/{TOTAL_FRAMES}, \
|
||||
underruns={}, overruns={}, depth={}, max_depth={}, late={}, lost={}",
|
||||
stats.underruns, stats.overruns, stats.current_depth, stats.max_depth_seen,
|
||||
stats.packets_late, stats.packets_lost,
|
||||
);
|
||||
|
||||
// With 5% artificial loss + FEC recovery + PLC, we should still get >90% decoded.
|
||||
assert!(
|
||||
frames_decoded > 2700,
|
||||
"frame loss too high under simulated loss: decoded {frames_decoded}/3000 (need >2700 = <10%)"
|
||||
);
|
||||
}
|
||||
|
||||
/// Verify that the jitter buffer's decoded-frame count is consistent with its
|
||||
/// own internal statistics over a long session.
|
||||
#[test]
|
||||
fn long_session_stats_consistency() {
|
||||
let config = test_config();
|
||||
let mut encoder = CallEncoder::new(&config);
|
||||
let mut decoder = CallDecoder::new(&config);
|
||||
|
||||
let mut frames_decoded = 0u64;
|
||||
let mut pcm_buf = vec![0i16; FRAME_SAMPLES];
|
||||
|
||||
for i in 0..TOTAL_FRAMES {
|
||||
let pcm = sine_frame(i);
|
||||
let packets = encoder.encode_frame(&pcm).expect("encode");
|
||||
|
||||
for pkt in packets {
|
||||
decoder.ingest(pkt);
|
||||
}
|
||||
if decoder.decode_next(&mut pcm_buf).is_some() {
|
||||
frames_decoded += 1;
|
||||
}
|
||||
}
|
||||
|
||||
let stats = decoder.stats();
|
||||
|
||||
// total_decoded should match our manual counter.
|
||||
assert_eq!(
|
||||
stats.total_decoded, frames_decoded,
|
||||
"stats.total_decoded ({}) != manually counted frames_decoded ({frames_decoded})",
|
||||
stats.total_decoded,
|
||||
);
|
||||
|
||||
// packets_received should be > 0.
|
||||
assert!(
|
||||
stats.packets_received > 0,
|
||||
"stats.packets_received should be > 0"
|
||||
);
|
||||
}
|
||||
@@ -16,4 +16,10 @@ audiopus = { workspace = true }
|
||||
# Pure-Rust Codec2 implementation
|
||||
codec2 = { workspace = true }
|
||||
|
||||
# RNG for comfort noise generation
|
||||
rand = { workspace = true }
|
||||
|
||||
# ML-based noise suppression (pure-Rust port of RNNoise)
|
||||
nnnoiseless = "0.5"
|
||||
|
||||
[dev-dependencies]
|
||||
|
||||
183
crates/wzp-codec/src/denoise.rs
Normal file
183
crates/wzp-codec/src/denoise.rs
Normal file
@@ -0,0 +1,183 @@
|
||||
//! ML-based noise suppression using nnnoiseless (pure-Rust RNNoise port).
|
||||
//!
|
||||
//! RNNoise operates on 480-sample frames at 48 kHz (10 ms). Our codec pipeline
|
||||
//! uses 960-sample frames (20 ms), so each call processes two halves.
|
||||
|
||||
use nnnoiseless::DenoiseState;
|
||||
|
||||
/// Wraps [`DenoiseState`] to provide noise suppression on 960-sample (20 ms) PCM
|
||||
/// frames at 48 kHz.
|
||||
pub struct NoiseSupressor {
|
||||
state: Box<DenoiseState<'static>>,
|
||||
enabled: bool,
|
||||
}
|
||||
|
||||
impl NoiseSupressor {
|
||||
/// Create a new noise suppressor (enabled by default).
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
state: DenoiseState::new(),
|
||||
enabled: true,
|
||||
}
|
||||
}
|
||||
|
||||
/// Process a 960-sample frame of 48 kHz mono PCM **in place**.
|
||||
///
|
||||
/// nnnoiseless expects f32 samples in the range roughly [-32768, 32767].
|
||||
/// We convert i16 → f32, process two 480-sample halves, then convert back.
|
||||
pub fn process(&mut self, pcm: &mut [i16]) {
|
||||
if !self.enabled {
|
||||
return;
|
||||
}
|
||||
|
||||
debug_assert!(
|
||||
pcm.len() >= 960,
|
||||
"NoiseSupressor::process expects at least 960 samples, got {}",
|
||||
pcm.len()
|
||||
);
|
||||
|
||||
// Process in two 480-sample halves.
|
||||
for half in 0..2 {
|
||||
let offset = half * 480;
|
||||
let end = offset + 480;
|
||||
if end > pcm.len() {
|
||||
break;
|
||||
}
|
||||
|
||||
// i16 → f32
|
||||
let mut float_buf = [0.0f32; 480];
|
||||
for (i, &sample) in pcm[offset..end].iter().enumerate() {
|
||||
float_buf[i] = sample as f32;
|
||||
}
|
||||
|
||||
// nnnoiseless processes in-place, returns VAD probability (unused here).
|
||||
let mut output = [0.0f32; 480];
|
||||
let _vad = self.state.process_frame(&mut output, &float_buf);
|
||||
|
||||
// f32 → i16 with clamping
|
||||
for (i, &val) in output.iter().enumerate() {
|
||||
let clamped = val.max(-32768.0).min(32767.0);
|
||||
pcm[offset + i] = clamped as i16;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Enable or disable noise suppression.
|
||||
pub fn set_enabled(&mut self, enabled: bool) {
|
||||
self.enabled = enabled;
|
||||
}
|
||||
|
||||
/// Returns `true` if noise suppression is currently enabled.
|
||||
pub fn is_enabled(&self) -> bool {
|
||||
self.enabled
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for NoiseSupressor {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn denoiser_creates() {
|
||||
let ns = NoiseSupressor::new();
|
||||
assert!(ns.is_enabled());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn denoiser_processes_frame() {
|
||||
let mut ns = NoiseSupressor::new();
|
||||
let mut pcm = vec![0i16; 960];
|
||||
// Fill with a simple pattern so we have something to process.
|
||||
for (i, s) in pcm.iter_mut().enumerate() {
|
||||
*s = ((i % 100) as i16).wrapping_mul(100);
|
||||
}
|
||||
let original_len = pcm.len();
|
||||
ns.process(&mut pcm);
|
||||
assert_eq!(pcm.len(), original_len, "output length must match input length");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn denoiser_reduces_noise() {
|
||||
let mut ns = NoiseSupressor::new();
|
||||
|
||||
// Generate a 440 Hz sine tone + white noise at 48 kHz.
|
||||
// We need multiple frames for the RNN to converge.
|
||||
let sample_rate = 48000.0f64;
|
||||
let freq = 440.0f64;
|
||||
let amplitude = 10000.0f64;
|
||||
let noise_amplitude = 3000.0f64;
|
||||
|
||||
// Use a simple PRNG for reproducibility.
|
||||
let mut rng_state: u32 = 12345;
|
||||
let mut next_noise = || -> f64 {
|
||||
// xorshift32
|
||||
rng_state ^= rng_state << 13;
|
||||
rng_state ^= rng_state >> 17;
|
||||
rng_state ^= rng_state << 5;
|
||||
// Map to [-1, 1]
|
||||
(rng_state as f64 / u32::MAX as f64) * 2.0 - 1.0
|
||||
};
|
||||
|
||||
// Feed several frames to let the RNN warm up, then measure the last one.
|
||||
let num_warmup_frames = 20;
|
||||
let mut last_input = vec![0i16; 960];
|
||||
let mut last_output = vec![0i16; 960];
|
||||
|
||||
for frame_idx in 0..=num_warmup_frames {
|
||||
let mut pcm = vec![0i16; 960];
|
||||
for (i, s) in pcm.iter_mut().enumerate() {
|
||||
let t = (frame_idx * 960 + i) as f64 / sample_rate;
|
||||
let sine = amplitude * (2.0 * std::f64::consts::PI * freq * t).sin();
|
||||
let noise = noise_amplitude * next_noise();
|
||||
*s = (sine + noise).max(-32768.0).min(32767.0) as i16;
|
||||
}
|
||||
|
||||
if frame_idx == num_warmup_frames {
|
||||
last_input = pcm.clone();
|
||||
}
|
||||
|
||||
ns.process(&mut pcm);
|
||||
|
||||
if frame_idx == num_warmup_frames {
|
||||
last_output = pcm;
|
||||
}
|
||||
}
|
||||
|
||||
// Compute RMS of input and output.
|
||||
let rms = |buf: &[i16]| -> f64 {
|
||||
let sum: f64 = buf.iter().map(|&s| (s as f64) * (s as f64)).sum();
|
||||
(sum / buf.len() as f64).sqrt()
|
||||
};
|
||||
|
||||
let input_rms = rms(&last_input);
|
||||
let output_rms = rms(&last_output);
|
||||
|
||||
// The denoiser should not amplify the signal beyond input.
|
||||
// More importantly, the output should have measurably lower noise.
|
||||
// We verify the output RMS is less than the input RMS (noise was reduced).
|
||||
assert!(
|
||||
output_rms < input_rms,
|
||||
"expected output RMS ({output_rms:.1}) < input RMS ({input_rms:.1}); \
|
||||
denoiser should reduce noise"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn denoiser_passthrough_when_disabled() {
|
||||
let mut ns = NoiseSupressor::new();
|
||||
ns.set_enabled(false);
|
||||
assert!(!ns.is_enabled());
|
||||
|
||||
let original: Vec<i16> = (0..960).map(|i| (i * 10) as i16).collect();
|
||||
let mut pcm = original.clone();
|
||||
ns.process(&mut pcm);
|
||||
|
||||
assert_eq!(pcm, original, "disabled denoiser must not alter input");
|
||||
}
|
||||
}
|
||||
@@ -12,11 +12,15 @@
|
||||
pub mod adaptive;
|
||||
pub mod codec2_dec;
|
||||
pub mod codec2_enc;
|
||||
pub mod denoise;
|
||||
pub mod opus_dec;
|
||||
pub mod opus_enc;
|
||||
pub mod resample;
|
||||
pub mod silence;
|
||||
|
||||
pub use adaptive::{AdaptiveDecoder, AdaptiveEncoder};
|
||||
pub use denoise::NoiseSupressor;
|
||||
pub use silence::{ComfortNoise, SilenceDetector};
|
||||
pub use wzp_proto::{AudioDecoder, AudioEncoder, CodecId, QualityProfile};
|
||||
|
||||
/// Create an adaptive encoder starting at the given quality profile.
|
||||
|
||||
191
crates/wzp-codec/src/silence.rs
Normal file
191
crates/wzp-codec/src/silence.rs
Normal file
@@ -0,0 +1,191 @@
|
||||
//! Silence suppression and comfort noise generation.
|
||||
//!
|
||||
//! During silent periods (~50% of a typical call), full encoded frames waste
|
||||
//! bandwidth. [`SilenceDetector`] detects silent audio based on RMS energy,
|
||||
//! and [`ComfortNoise`] generates low-level background noise to fill gaps on
|
||||
//! the decoder side.
|
||||
|
||||
use rand::Rng;
|
||||
|
||||
/// Detects silence in PCM audio using RMS energy with a hangover period.
|
||||
///
|
||||
/// The hangover prevents clipping the onset of speech: after silence is first
|
||||
/// detected, the detector continues reporting "not silent" for `hangover_frames`
|
||||
/// additional frames before transitioning to suppression.
|
||||
pub struct SilenceDetector {
|
||||
/// RMS threshold below which audio is considered silent (for i16 samples).
|
||||
threshold_rms: f64,
|
||||
/// Number of frames to keep sending after silence starts (prevents speech clipping).
|
||||
hangover_frames: u32,
|
||||
/// Count of consecutive frames whose RMS is below the threshold.
|
||||
silent_frames: u32,
|
||||
/// Whether suppression is currently active.
|
||||
is_suppressing: bool,
|
||||
}
|
||||
|
||||
impl SilenceDetector {
|
||||
/// Create a new silence detector.
|
||||
///
|
||||
/// * `threshold_rms` — RMS energy below which a frame is silent (default: 100.0 for i16).
|
||||
/// * `hangover_frames` — frames to keep sending after silence onset (default: 5 = 100ms at 20ms frames).
|
||||
pub fn new(threshold_rms: f64, hangover_frames: u32) -> Self {
|
||||
Self {
|
||||
threshold_rms,
|
||||
hangover_frames,
|
||||
silent_frames: 0,
|
||||
is_suppressing: false,
|
||||
}
|
||||
}
|
||||
|
||||
/// Compute the RMS (root mean square) energy of a PCM buffer.
|
||||
pub fn rms(pcm: &[i16]) -> f64 {
|
||||
if pcm.is_empty() {
|
||||
return 0.0;
|
||||
}
|
||||
let sum_sq: f64 = pcm.iter().map(|&s| (s as f64) * (s as f64)).sum();
|
||||
(sum_sq / pcm.len() as f64).sqrt()
|
||||
}
|
||||
|
||||
/// Returns `true` if the frame should be suppressed (i.e. is silence past
|
||||
/// the hangover period).
|
||||
///
|
||||
/// Call once per frame. The detector tracks consecutive silent frames
|
||||
/// internally and only reports suppression after the hangover expires.
|
||||
pub fn is_silent(&mut self, pcm: &[i16]) -> bool {
|
||||
let energy = Self::rms(pcm);
|
||||
|
||||
if energy < self.threshold_rms {
|
||||
self.silent_frames = self.silent_frames.saturating_add(1);
|
||||
|
||||
if self.silent_frames > self.hangover_frames {
|
||||
self.is_suppressing = true;
|
||||
}
|
||||
} else {
|
||||
// Speech detected — reset.
|
||||
self.silent_frames = 0;
|
||||
self.is_suppressing = false;
|
||||
}
|
||||
|
||||
self.is_suppressing
|
||||
}
|
||||
|
||||
/// Whether the detector is currently in the suppressing state.
|
||||
pub fn suppressing(&self) -> bool {
|
||||
self.is_suppressing
|
||||
}
|
||||
}
|
||||
|
||||
/// Generates low-level comfort noise to fill silent periods.
|
||||
///
|
||||
/// When the decoder receives a comfort-noise descriptor (or detects a gap
|
||||
/// caused by silence suppression), it uses this to produce a natural-sounding
|
||||
/// background hiss instead of dead silence.
|
||||
pub struct ComfortNoise {
|
||||
/// Peak amplitude of the generated noise (default: 50).
|
||||
level: i16,
|
||||
}
|
||||
|
||||
impl ComfortNoise {
|
||||
/// Create a comfort noise generator with the given amplitude level.
|
||||
pub fn new(level: i16) -> Self {
|
||||
Self { level }
|
||||
}
|
||||
|
||||
/// Fill `pcm` with low-level random noise in the range `[-level, level]`.
|
||||
pub fn generate(&self, pcm: &mut [i16]) {
|
||||
let mut rng = rand::thread_rng();
|
||||
for sample in pcm.iter_mut() {
|
||||
*sample = rng.gen_range(-self.level..=self.level);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn silence_detector_detects_silence() {
|
||||
let mut det = SilenceDetector::new(100.0, 5);
|
||||
let silence = vec![0i16; 960];
|
||||
|
||||
// First 5 frames are hangover — should NOT suppress yet.
|
||||
for _ in 0..5 {
|
||||
assert!(!det.is_silent(&silence));
|
||||
}
|
||||
// Frame 6 onward: past hangover, should suppress.
|
||||
assert!(det.is_silent(&silence));
|
||||
assert!(det.is_silent(&silence));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn silence_detector_detects_speech() {
|
||||
let mut det = SilenceDetector::new(100.0, 5);
|
||||
|
||||
// Generate a 1kHz sine wave at decent amplitude.
|
||||
let pcm: Vec<i16> = (0..960)
|
||||
.map(|i| {
|
||||
let t = i as f64 / 48000.0;
|
||||
(10000.0 * (2.0 * std::f64::consts::PI * 1000.0 * t).sin()) as i16
|
||||
})
|
||||
.collect();
|
||||
|
||||
// Should never report silent.
|
||||
for _ in 0..20 {
|
||||
assert!(!det.is_silent(&pcm));
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn silence_detector_hangover() {
|
||||
let mut det = SilenceDetector::new(100.0, 3);
|
||||
let silence = vec![0i16; 960];
|
||||
let speech: Vec<i16> = (0..960)
|
||||
.map(|i| {
|
||||
let t = i as f64 / 48000.0;
|
||||
(5000.0 * (2.0 * std::f64::consts::PI * 440.0 * t).sin()) as i16
|
||||
})
|
||||
.collect();
|
||||
|
||||
// Feed silence past hangover to enter suppression.
|
||||
for _ in 0..4 {
|
||||
det.is_silent(&silence);
|
||||
}
|
||||
assert!(det.is_silent(&silence), "should be suppressing after hangover");
|
||||
|
||||
// Speech arrives — should immediately stop suppressing.
|
||||
assert!(!det.is_silent(&speech));
|
||||
assert!(!det.is_silent(&speech));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn comfort_noise_generates_nonzero() {
|
||||
let cn = ComfortNoise::new(50);
|
||||
let mut pcm = vec![0i16; 960];
|
||||
cn.generate(&mut pcm);
|
||||
|
||||
// At least some samples should be non-zero.
|
||||
assert!(pcm.iter().any(|&s| s != 0), "CN output should not be all zeros");
|
||||
|
||||
// All samples should be within [-50, 50].
|
||||
assert!(pcm.iter().all(|&s| s.abs() <= 50), "CN samples out of range");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn rms_calculation() {
|
||||
// All zeros → RMS 0.
|
||||
assert_eq!(SilenceDetector::rms(&[0i16; 100]), 0.0);
|
||||
|
||||
// Constant value: RMS of [v, v, v, ...] = |v|.
|
||||
let pcm = vec![100i16; 100];
|
||||
let rms = SilenceDetector::rms(&pcm);
|
||||
assert!((rms - 100.0).abs() < 0.01, "RMS of constant 100 should be 100, got {rms}");
|
||||
|
||||
// Known pattern: [3, 4] → sqrt((9+16)/2) = sqrt(12.5) ≈ 3.5355
|
||||
let rms2 = SilenceDetector::rms(&[3, 4]);
|
||||
assert!((rms2 - 3.5355).abs() < 0.01, "RMS of [3,4] should be ~3.5355, got {rms2}");
|
||||
|
||||
// Empty buffer → 0.
|
||||
assert_eq!(SilenceDetector::rms(&[]), 0.0);
|
||||
}
|
||||
}
|
||||
@@ -15,5 +15,18 @@ hkdf = { workspace = true }
|
||||
sha2 = { workspace = true }
|
||||
rand = { workspace = true }
|
||||
tracing = { workspace = true }
|
||||
bip39 = "2"
|
||||
hex = "0.4"
|
||||
|
||||
# featherChat identity — the source of truth for Seed, IdentityKeyPair, Fingerprint
|
||||
warzone-protocol = { path = "../../deps/featherchat/warzone/crates/warzone-protocol" }
|
||||
|
||||
[dev-dependencies]
|
||||
ed25519-dalek = { workspace = true }
|
||||
warzone-protocol = { path = "../../deps/featherchat/warzone/crates/warzone-protocol" }
|
||||
wzp-proto = { workspace = true }
|
||||
wzp-client = { path = "../wzp-client" }
|
||||
wzp-relay = { path = "../wzp-relay" }
|
||||
serde_json = "1"
|
||||
serde = { workspace = true }
|
||||
bincode = "1"
|
||||
|
||||
@@ -33,13 +33,13 @@ impl KeyExchange for WarzoneKeyExchange {
|
||||
// Derive Ed25519 signing key via HKDF
|
||||
let hk = Hkdf::<Sha256>::new(None, seed);
|
||||
let mut ed25519_bytes = [0u8; 32];
|
||||
hk.expand(b"warzone-ed25519-identity", &mut ed25519_bytes)
|
||||
hk.expand(b"warzone-ed25519", &mut ed25519_bytes)
|
||||
.expect("HKDF expand for Ed25519 should not fail");
|
||||
let signing_key = SigningKey::from_bytes(&ed25519_bytes);
|
||||
|
||||
// Derive X25519 static key via HKDF
|
||||
let mut x25519_bytes = [0u8; 32];
|
||||
hk.expand(b"warzone-x25519-identity", &mut x25519_bytes)
|
||||
hk.expand(b"warzone-x25519", &mut x25519_bytes)
|
||||
.expect("HKDF expand for X25519 should not fail");
|
||||
let x25519_static_secret = StaticSecret::from(x25519_bytes);
|
||||
let x25519_static_public = X25519PublicKey::from(&x25519_static_secret);
|
||||
|
||||
281
crates/wzp-crypto/src/identity.rs
Normal file
281
crates/wzp-crypto/src/identity.rs
Normal file
@@ -0,0 +1,281 @@
|
||||
//! featherChat-compatible identity module.
|
||||
//!
|
||||
//! Mirrors `warzone-protocol/src/identity.rs` and `warzone-protocol/src/mnemonic.rs`
|
||||
//! from featherChat. Same seed → same keys → same fingerprint in both codebases.
|
||||
//!
|
||||
//! Source of truth: deps/featherchat/warzone/crates/warzone-protocol/src/identity.rs
|
||||
|
||||
use ed25519_dalek::{SigningKey, VerifyingKey};
|
||||
use hkdf::Hkdf;
|
||||
use sha2::{Digest, Sha256};
|
||||
use x25519_dalek::StaticSecret;
|
||||
|
||||
/// The root secret — 32 bytes from which all keys are derived.
|
||||
/// Displayed to users as a BIP39 mnemonic (24 words).
|
||||
///
|
||||
/// Mirrors: `warzone-protocol::identity::Seed`
|
||||
pub struct Seed(pub [u8; 32]);
|
||||
|
||||
impl Seed {
|
||||
/// Generate a new random seed.
|
||||
pub fn generate() -> Self {
|
||||
let mut bytes = [0u8; 32];
|
||||
rand::RngCore::fill_bytes(&mut rand::rngs::OsRng, &mut bytes);
|
||||
Seed(bytes)
|
||||
}
|
||||
|
||||
/// Create seed from raw bytes.
|
||||
pub fn from_bytes(bytes: [u8; 32]) -> Self {
|
||||
Seed(bytes)
|
||||
}
|
||||
|
||||
/// Create seed from hex string (64 hex chars).
|
||||
pub fn from_hex(hex_str: &str) -> Result<Self, String> {
|
||||
let bytes = hex::decode(hex_str).map_err(|e| format!("invalid hex: {e}"))?;
|
||||
if bytes.len() != 32 {
|
||||
return Err(format!("expected 32 bytes, got {}", bytes.len()));
|
||||
}
|
||||
let mut seed = [0u8; 32];
|
||||
seed.copy_from_slice(&bytes);
|
||||
Ok(Seed(seed))
|
||||
}
|
||||
|
||||
/// Derive the full identity keypair from this seed.
|
||||
///
|
||||
/// Uses identical HKDF derivation as featherChat:
|
||||
/// - Ed25519: `HKDF(seed, salt=None, info="warzone-ed25519")`
|
||||
/// - X25519: `HKDF(seed, salt=None, info="warzone-x25519")`
|
||||
pub fn derive_identity(&self) -> IdentityKeyPair {
|
||||
let hk = Hkdf::<Sha256>::new(None, &self.0);
|
||||
|
||||
let mut ed_bytes = [0u8; 32];
|
||||
hk.expand(b"warzone-ed25519", &mut ed_bytes)
|
||||
.expect("HKDF expand for Ed25519");
|
||||
let signing = SigningKey::from_bytes(&ed_bytes);
|
||||
ed_bytes.fill(0);
|
||||
|
||||
let mut x_bytes = [0u8; 32];
|
||||
hk.expand(b"warzone-x25519", &mut x_bytes)
|
||||
.expect("HKDF expand for X25519");
|
||||
let encryption = StaticSecret::from(x_bytes);
|
||||
x_bytes.fill(0);
|
||||
|
||||
IdentityKeyPair {
|
||||
signing,
|
||||
encryption,
|
||||
}
|
||||
}
|
||||
|
||||
/// Convert to BIP39 mnemonic (24 words).
|
||||
///
|
||||
/// Mirrors: `warzone-protocol::mnemonic::seed_to_mnemonic`
|
||||
pub fn to_mnemonic(&self) -> String {
|
||||
let mnemonic =
|
||||
bip39::Mnemonic::from_entropy(&self.0).expect("32 bytes is valid BIP39 entropy");
|
||||
mnemonic.to_string()
|
||||
}
|
||||
|
||||
/// Recover seed from BIP39 mnemonic (24 words).
|
||||
///
|
||||
/// Mirrors: `warzone-protocol::mnemonic::mnemonic_to_seed`
|
||||
pub fn from_mnemonic(words: &str) -> Result<Self, String> {
|
||||
let mnemonic: bip39::Mnemonic = words.parse().map_err(|e| format!("invalid mnemonic: {e}"))?;
|
||||
let entropy = mnemonic.to_entropy();
|
||||
if entropy.len() != 32 {
|
||||
return Err(format!("expected 32 bytes entropy, got {}", entropy.len()));
|
||||
}
|
||||
let mut seed = [0u8; 32];
|
||||
seed.copy_from_slice(&entropy);
|
||||
Ok(Seed(seed))
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for Seed {
|
||||
fn drop(&mut self) {
|
||||
self.0.fill(0); // zeroize on drop
|
||||
}
|
||||
}
|
||||
|
||||
/// The full identity keypair derived from a seed.
|
||||
///
|
||||
/// Mirrors: `warzone-protocol::identity::IdentityKeyPair`
|
||||
pub struct IdentityKeyPair {
|
||||
pub signing: SigningKey,
|
||||
pub encryption: StaticSecret,
|
||||
}
|
||||
|
||||
impl IdentityKeyPair {
|
||||
/// Get the public identity (safe to share).
|
||||
pub fn public_identity(&self) -> PublicIdentity {
|
||||
let verifying = self.signing.verifying_key();
|
||||
let encryption_pub = x25519_dalek::PublicKey::from(&self.encryption);
|
||||
let fingerprint = Fingerprint::from_verifying_key(&verifying);
|
||||
|
||||
PublicIdentity {
|
||||
signing: verifying,
|
||||
encryption: encryption_pub,
|
||||
fingerprint,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Truncated SHA-256 hash of the Ed25519 public key (16 bytes).
|
||||
/// Displayed as `xxxx:xxxx:xxxx:xxxx:xxxx:xxxx:xxxx:xxxx`.
|
||||
///
|
||||
/// Mirrors: `warzone-protocol::types::Fingerprint`
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Hash)]
|
||||
pub struct Fingerprint(pub [u8; 16]);
|
||||
|
||||
impl Fingerprint {
|
||||
pub fn from_verifying_key(key: &VerifyingKey) -> Self {
|
||||
let hash = Sha256::digest(key.as_bytes());
|
||||
let mut fp = [0u8; 16];
|
||||
fp.copy_from_slice(&hash[..16]);
|
||||
Fingerprint(fp)
|
||||
}
|
||||
|
||||
/// Parse from hex string (with or without colons).
|
||||
pub fn from_hex(s: &str) -> Result<Self, String> {
|
||||
let clean: String = s.chars().filter(|c| c.is_ascii_hexdigit()).collect();
|
||||
let bytes = hex::decode(&clean).map_err(|e| format!("invalid hex: {e}"))?;
|
||||
if bytes.len() < 16 {
|
||||
return Err("fingerprint too short".to_string());
|
||||
}
|
||||
let mut fp = [0u8; 16];
|
||||
fp.copy_from_slice(&bytes[..16]);
|
||||
Ok(Fingerprint(fp))
|
||||
}
|
||||
|
||||
/// As raw bytes.
|
||||
pub fn as_bytes(&self) -> &[u8; 16] {
|
||||
&self.0
|
||||
}
|
||||
|
||||
/// As hex string without colons.
|
||||
pub fn to_hex(&self) -> String {
|
||||
hex::encode(self.0)
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for Fingerprint {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(
|
||||
f,
|
||||
"{:04x}:{:04x}:{:04x}:{:04x}:{:04x}:{:04x}:{:04x}:{:04x}",
|
||||
u16::from_be_bytes([self.0[0], self.0[1]]),
|
||||
u16::from_be_bytes([self.0[2], self.0[3]]),
|
||||
u16::from_be_bytes([self.0[4], self.0[5]]),
|
||||
u16::from_be_bytes([self.0[6], self.0[7]]),
|
||||
u16::from_be_bytes([self.0[8], self.0[9]]),
|
||||
u16::from_be_bytes([self.0[10], self.0[11]]),
|
||||
u16::from_be_bytes([self.0[12], self.0[13]]),
|
||||
u16::from_be_bytes([self.0[14], self.0[15]]),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for Fingerprint {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "Fingerprint({})", self)
|
||||
}
|
||||
}
|
||||
|
||||
/// The public portion of an identity — safe to share with anyone.
|
||||
pub struct PublicIdentity {
|
||||
pub signing: VerifyingKey,
|
||||
pub encryption: x25519_dalek::PublicKey,
|
||||
pub fingerprint: Fingerprint,
|
||||
}
|
||||
|
||||
/// Hash a human-readable room/group name into an opaque hex string.
|
||||
/// Used as QUIC SNI to prevent leaking group names to network observers.
|
||||
///
|
||||
/// `hash_room_name("my-group")` → 32 hex chars (16 bytes of SHA-256).
|
||||
///
|
||||
/// Mirrors the convention in featherChat WZP-FC-5:
|
||||
/// `SHA-256("featherchat-group:" + group_name)[:16]`
|
||||
pub fn hash_room_name(group_name: &str) -> String {
|
||||
use sha2::{Digest, Sha256};
|
||||
let mut hasher = Sha256::new();
|
||||
hasher.update(b"featherchat-group:");
|
||||
hasher.update(group_name.as_bytes());
|
||||
let hash = hasher.finalize();
|
||||
hex::encode(&hash[..16])
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn deterministic_derivation() {
|
||||
let seed = Seed::from_bytes([42u8; 32]);
|
||||
let id1 = seed.derive_identity();
|
||||
let id2 = seed.derive_identity();
|
||||
assert_eq!(
|
||||
id1.signing.verifying_key().as_bytes(),
|
||||
id2.signing.verifying_key().as_bytes(),
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn mnemonic_roundtrip() {
|
||||
let seed = Seed::generate();
|
||||
let words = seed.to_mnemonic();
|
||||
let word_count = words.split_whitespace().count();
|
||||
assert_eq!(word_count, 24);
|
||||
let recovered = Seed::from_mnemonic(&words).unwrap();
|
||||
assert_eq!(seed.0, recovered.0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn hex_roundtrip() {
|
||||
let seed = Seed::generate();
|
||||
let hex_str = hex::encode(seed.0);
|
||||
let recovered = Seed::from_hex(&hex_str).unwrap();
|
||||
assert_eq!(seed.0, recovered.0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn fingerprint_format() {
|
||||
let seed = Seed::generate();
|
||||
let id = seed.derive_identity();
|
||||
let pub_id = id.public_identity();
|
||||
let fp_str = pub_id.fingerprint.to_string();
|
||||
// Format: xxxx:xxxx:xxxx:xxxx:xxxx:xxxx:xxxx:xxxx
|
||||
assert_eq!(fp_str.len(), 39);
|
||||
assert_eq!(fp_str.chars().filter(|c| *c == ':').count(), 7);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn hash_room_name_deterministic() {
|
||||
let h1 = hash_room_name("my-group");
|
||||
let h2 = hash_room_name("my-group");
|
||||
assert_eq!(h1, h2);
|
||||
assert_eq!(h1.len(), 32); // 16 bytes = 32 hex chars
|
||||
assert!(h1.chars().all(|c| c.is_ascii_hexdigit()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn hash_room_name_different_inputs() {
|
||||
assert_ne!(hash_room_name("alpha"), hash_room_name("beta"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn matches_handshake_derivation() {
|
||||
use wzp_proto::KeyExchange;
|
||||
// Verify identity module matches the KeyExchange trait implementation
|
||||
let seed = [99u8; 32];
|
||||
let id = Seed::from_bytes(seed).derive_identity();
|
||||
let kx = crate::WarzoneKeyExchange::from_identity_seed(&seed);
|
||||
|
||||
assert_eq!(
|
||||
id.signing.verifying_key().as_bytes(),
|
||||
&kx.identity_public_key(),
|
||||
);
|
||||
assert_eq!(
|
||||
id.public_identity().fingerprint.as_bytes(),
|
||||
&kx.fingerprint(),
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -9,12 +9,14 @@
|
||||
|
||||
pub mod anti_replay;
|
||||
pub mod handshake;
|
||||
pub mod identity;
|
||||
pub mod nonce;
|
||||
pub mod rekey;
|
||||
pub mod session;
|
||||
|
||||
pub use anti_replay::AntiReplayWindow;
|
||||
pub use handshake::WarzoneKeyExchange;
|
||||
pub use identity::{hash_room_name, Fingerprint, IdentityKeyPair, PublicIdentity, Seed};
|
||||
pub use nonce::{build_nonce, Direction};
|
||||
pub use rekey::RekeyManager;
|
||||
pub use session::ChaChaSession;
|
||||
|
||||
571
crates/wzp-crypto/tests/featherchat_compat.rs
Normal file
571
crates/wzp-crypto/tests/featherchat_compat.rs
Normal file
@@ -0,0 +1,571 @@
|
||||
//! Cross-project compatibility tests between WZP and featherChat.
|
||||
//!
|
||||
//! Verifies:
|
||||
//! 1. Identity: same seed → same keys → same fingerprints (WZP-FC-8)
|
||||
//! 2. CallSignal: WZP SignalMessage serializes into FC CallSignal.payload correctly
|
||||
//! 3. Auth: WZP auth module request/response matches FC's /v1/auth/validate contract
|
||||
//! 4. Mnemonic: BIP39 interop between both implementations
|
||||
|
||||
use wzp_proto::KeyExchange;
|
||||
|
||||
// ─── Identity Compatibility (WZP-FC-8) ──────────────────────────────────────
|
||||
|
||||
#[test]
|
||||
fn same_seed_same_ed25519_key() {
|
||||
let seed = [42u8; 32];
|
||||
|
||||
let wzp_kx = wzp_crypto::WarzoneKeyExchange::from_identity_seed(&seed);
|
||||
let wzp_pub = wzp_kx.identity_public_key();
|
||||
|
||||
let fc_seed = warzone_protocol::identity::Seed::from_bytes(seed);
|
||||
let fc_id = fc_seed.derive_identity();
|
||||
let fc_pub = fc_id.signing.verifying_key();
|
||||
|
||||
assert_eq!(&wzp_pub, fc_pub.as_bytes(), "Ed25519 keys must match");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn same_seed_same_fingerprint() {
|
||||
let seed = [99u8; 32];
|
||||
|
||||
let wzp_kx = wzp_crypto::WarzoneKeyExchange::from_identity_seed(&seed);
|
||||
let wzp_fp = wzp_kx.fingerprint();
|
||||
|
||||
let fc_seed = warzone_protocol::identity::Seed::from_bytes(seed);
|
||||
let fc_fp = fc_seed.derive_identity().public_identity().fingerprint.0;
|
||||
|
||||
assert_eq!(wzp_fp, fc_fp, "Fingerprints must match");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn wzp_identity_module_matches_featherchat() {
|
||||
let seed = [0xAB; 32];
|
||||
|
||||
let wzp_pub = wzp_crypto::Seed::from_bytes(seed)
|
||||
.derive_identity()
|
||||
.public_identity();
|
||||
|
||||
let fc_pub = warzone_protocol::identity::Seed::from_bytes(seed)
|
||||
.derive_identity()
|
||||
.public_identity();
|
||||
|
||||
assert_eq!(wzp_pub.signing.as_bytes(), fc_pub.signing.as_bytes());
|
||||
assert_eq!(wzp_pub.encryption.as_bytes(), fc_pub.encryption.as_bytes());
|
||||
assert_eq!(wzp_pub.fingerprint.0, fc_pub.fingerprint.0);
|
||||
assert_eq!(wzp_pub.fingerprint.to_string(), fc_pub.fingerprint.to_string());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn random_seed_identity_match() {
|
||||
let fc_seed = warzone_protocol::identity::Seed::generate();
|
||||
let raw = fc_seed.0;
|
||||
|
||||
let fc_fp = fc_seed.derive_identity().public_identity().fingerprint.0;
|
||||
let wzp_fp = wzp_crypto::WarzoneKeyExchange::from_identity_seed(&raw).fingerprint();
|
||||
|
||||
assert_eq!(wzp_fp, fc_fp);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn hkdf_derive_matches() {
|
||||
let seed = [0x55; 32];
|
||||
|
||||
let fc_ed = warzone_protocol::crypto::hkdf_derive(&seed, b"", b"warzone-ed25519", 32);
|
||||
let fc_signing = ed25519_dalek::SigningKey::from_bytes(&fc_ed.try_into().unwrap());
|
||||
let fc_pub = fc_signing.verifying_key();
|
||||
|
||||
let wzp_pub = wzp_crypto::WarzoneKeyExchange::from_identity_seed(&seed).identity_public_key();
|
||||
|
||||
assert_eq!(&wzp_pub, fc_pub.as_bytes());
|
||||
}
|
||||
|
||||
// ─── BIP39 Mnemonic Interop ─────────────────────────────────────────────────
|
||||
|
||||
#[test]
|
||||
fn mnemonic_roundtrip_fc_to_wzp() {
|
||||
let seed = [0x77; 32];
|
||||
let fc_mnemonic = warzone_protocol::identity::Seed::from_bytes(seed).to_mnemonic();
|
||||
let wzp_recovered = wzp_crypto::Seed::from_mnemonic(&fc_mnemonic).unwrap();
|
||||
assert_eq!(wzp_recovered.0, seed);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn mnemonic_roundtrip_wzp_to_fc() {
|
||||
let seed = [0x33; 32];
|
||||
let wzp_mnemonic = wzp_crypto::Seed::from_bytes(seed).to_mnemonic();
|
||||
let fc_recovered = warzone_protocol::identity::Seed::from_mnemonic(&wzp_mnemonic).unwrap();
|
||||
assert_eq!(fc_recovered.0, seed);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn mnemonic_strings_identical() {
|
||||
let seed = [0xDE; 32];
|
||||
let fc_words = warzone_protocol::identity::Seed::from_bytes(seed).to_mnemonic();
|
||||
let wzp_words = wzp_crypto::Seed::from_bytes(seed).to_mnemonic();
|
||||
assert_eq!(fc_words, wzp_words);
|
||||
}
|
||||
|
||||
// ─── CallSignal Payload Interop ─────────────────────────────────────────────
|
||||
|
||||
#[test]
|
||||
fn wzp_signal_serializes_into_fc_callsignal_payload() {
|
||||
// WZP creates a CallOffer SignalMessage
|
||||
let offer = wzp_proto::SignalMessage::CallOffer {
|
||||
identity_pub: [1u8; 32],
|
||||
ephemeral_pub: [2u8; 32],
|
||||
signature: vec![3u8; 64],
|
||||
supported_profiles: vec![wzp_proto::QualityProfile::GOOD],
|
||||
};
|
||||
|
||||
// Encode as featherChat CallSignal payload
|
||||
let payload = wzp_client::featherchat::encode_call_payload(
|
||||
&offer,
|
||||
Some("relay.example.com:4433"),
|
||||
Some("myroom"),
|
||||
);
|
||||
|
||||
// Verify it's valid JSON
|
||||
let parsed: serde_json::Value = serde_json::from_str(&payload).unwrap();
|
||||
assert!(parsed.get("signal").is_some());
|
||||
assert_eq!(parsed["relay_addr"], "relay.example.com:4433");
|
||||
assert_eq!(parsed["room"], "myroom");
|
||||
|
||||
// featherChat would put this in WireMessage::CallSignal { payload, ... }
|
||||
// Verify the FC side can create a CallSignal with this payload
|
||||
let fc_msg = warzone_protocol::message::WireMessage::CallSignal {
|
||||
id: "call-123".to_string(),
|
||||
sender_fingerprint: "abcd1234".to_string(),
|
||||
signal_type: warzone_protocol::message::CallSignalType::Offer,
|
||||
payload: payload.clone(),
|
||||
target: "peer-fingerprint".to_string(),
|
||||
};
|
||||
|
||||
// Verify it serializes with bincode (FC's wire format)
|
||||
let encoded = bincode::serialize(&fc_msg).unwrap();
|
||||
assert!(!encoded.is_empty());
|
||||
|
||||
// And deserializes back
|
||||
let decoded: warzone_protocol::message::WireMessage = bincode::deserialize(&encoded).unwrap();
|
||||
if let warzone_protocol::message::WireMessage::CallSignal {
|
||||
id, payload: p, signal_type, ..
|
||||
} = decoded
|
||||
{
|
||||
assert_eq!(id, "call-123");
|
||||
assert!(matches!(signal_type, warzone_protocol::message::CallSignalType::Offer));
|
||||
|
||||
// Decode the WZP payload back
|
||||
let wzp_payload = wzp_client::featherchat::decode_call_payload(&p).unwrap();
|
||||
assert_eq!(wzp_payload.relay_addr.unwrap(), "relay.example.com:4433");
|
||||
assert!(matches!(wzp_payload.signal, wzp_proto::SignalMessage::CallOffer { .. }));
|
||||
} else {
|
||||
panic!("expected CallSignal");
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn wzp_answer_round_trips_through_fc_callsignal() {
|
||||
let answer = wzp_proto::SignalMessage::CallAnswer {
|
||||
identity_pub: [10u8; 32],
|
||||
ephemeral_pub: [20u8; 32],
|
||||
signature: vec![30u8; 64],
|
||||
chosen_profile: wzp_proto::QualityProfile::DEGRADED,
|
||||
};
|
||||
|
||||
let payload = wzp_client::featherchat::encode_call_payload(&answer, None, None);
|
||||
|
||||
let fc_msg = warzone_protocol::message::WireMessage::CallSignal {
|
||||
id: "call-456".to_string(),
|
||||
sender_fingerprint: "efgh5678".to_string(),
|
||||
signal_type: warzone_protocol::message::CallSignalType::Answer,
|
||||
payload,
|
||||
target: "caller-fp".to_string(),
|
||||
};
|
||||
|
||||
let bytes = bincode::serialize(&fc_msg).unwrap();
|
||||
let decoded: warzone_protocol::message::WireMessage = bincode::deserialize(&bytes).unwrap();
|
||||
|
||||
if let warzone_protocol::message::WireMessage::CallSignal { payload, .. } = decoded {
|
||||
let wzp = wzp_client::featherchat::decode_call_payload(&payload).unwrap();
|
||||
if let wzp_proto::SignalMessage::CallAnswer { chosen_profile, .. } = wzp.signal {
|
||||
assert_eq!(chosen_profile.codec, wzp_proto::CodecId::Opus6k);
|
||||
} else {
|
||||
panic!("expected CallAnswer");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn wzp_hangup_round_trips_through_fc_callsignal() {
|
||||
let hangup = wzp_proto::SignalMessage::Hangup {
|
||||
reason: wzp_proto::HangupReason::Normal,
|
||||
};
|
||||
|
||||
let payload = wzp_client::featherchat::encode_call_payload(&hangup, None, None);
|
||||
let signal_type = wzp_client::featherchat::signal_to_call_type(&hangup);
|
||||
assert!(matches!(signal_type, wzp_client::featherchat::CallSignalType::Hangup));
|
||||
|
||||
let fc_msg = warzone_protocol::message::WireMessage::CallSignal {
|
||||
id: "call-789".to_string(),
|
||||
sender_fingerprint: "xyz".to_string(),
|
||||
signal_type: warzone_protocol::message::CallSignalType::Hangup,
|
||||
payload,
|
||||
target: "peer".to_string(),
|
||||
};
|
||||
|
||||
let bytes = bincode::serialize(&fc_msg).unwrap();
|
||||
let decoded: warzone_protocol::message::WireMessage = bincode::deserialize(&bytes).unwrap();
|
||||
|
||||
if let warzone_protocol::message::WireMessage::CallSignal { payload, .. } = decoded {
|
||||
let wzp = wzp_client::featherchat::decode_call_payload(&payload).unwrap();
|
||||
assert!(matches!(wzp.signal, wzp_proto::SignalMessage::Hangup { .. }));
|
||||
}
|
||||
}
|
||||
|
||||
// ─── Auth Token Contract ────────────────────────────────────────────────────
|
||||
|
||||
#[test]
|
||||
fn auth_validate_request_matches_fc_contract() {
|
||||
// WZP sends: { "token": "..." }
|
||||
// FC expects: ValidateRequest { token: String }
|
||||
let wzp_request = serde_json::json!({ "token": "test-token-123" });
|
||||
let json_str = wzp_request.to_string();
|
||||
|
||||
// FC can deserialize this (same shape as their ValidateRequest)
|
||||
#[derive(serde::Deserialize)]
|
||||
struct FcValidateRequest {
|
||||
token: String,
|
||||
}
|
||||
let fc_req: FcValidateRequest = serde_json::from_str(&json_str).unwrap();
|
||||
assert_eq!(fc_req.token, "test-token-123");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn auth_validate_response_matches_wzp_expectations() {
|
||||
// FC returns: { "valid": true, "fingerprint": "...", "alias": "..." }
|
||||
// WZP expects: wzp_relay::auth::ValidateResponse
|
||||
let fc_response = serde_json::json!({
|
||||
"valid": true,
|
||||
"fingerprint": "a3f8:1b2c:3d4e:5f60:7182:93a4:b5c6:d7e8",
|
||||
"alias": "manwe",
|
||||
"eth_address": null
|
||||
});
|
||||
|
||||
let wzp_resp: wzp_relay::auth::ValidateResponse =
|
||||
serde_json::from_value(fc_response).unwrap();
|
||||
assert!(wzp_resp.valid);
|
||||
assert_eq!(
|
||||
wzp_resp.fingerprint.unwrap(),
|
||||
"a3f8:1b2c:3d4e:5f60:7182:93a4:b5c6:d7e8"
|
||||
);
|
||||
assert_eq!(wzp_resp.alias.unwrap(), "manwe");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn auth_invalid_response_matches() {
|
||||
let fc_response = serde_json::json!({ "valid": false });
|
||||
let wzp_resp: wzp_relay::auth::ValidateResponse =
|
||||
serde_json::from_value(fc_response).unwrap();
|
||||
assert!(!wzp_resp.valid);
|
||||
assert!(wzp_resp.fingerprint.is_none());
|
||||
}
|
||||
|
||||
// ─── Signal Type Mapping ────────────────────────────────────────────────────
|
||||
|
||||
#[test]
|
||||
fn all_signal_types_map_correctly() {
|
||||
use wzp_client::featherchat::{signal_to_call_type, CallSignalType};
|
||||
|
||||
let cases: Vec<(wzp_proto::SignalMessage, &str)> = vec![
|
||||
(
|
||||
wzp_proto::SignalMessage::CallOffer {
|
||||
identity_pub: [0; 32], ephemeral_pub: [0; 32],
|
||||
signature: vec![], supported_profiles: vec![],
|
||||
},
|
||||
"Offer",
|
||||
),
|
||||
(
|
||||
wzp_proto::SignalMessage::CallAnswer {
|
||||
identity_pub: [0; 32], ephemeral_pub: [0; 32],
|
||||
signature: vec![],
|
||||
chosen_profile: wzp_proto::QualityProfile::GOOD,
|
||||
},
|
||||
"Answer",
|
||||
),
|
||||
(
|
||||
wzp_proto::SignalMessage::IceCandidate {
|
||||
candidate: "candidate:1".to_string(),
|
||||
},
|
||||
"IceCandidate",
|
||||
),
|
||||
(
|
||||
wzp_proto::SignalMessage::Hangup {
|
||||
reason: wzp_proto::HangupReason::Normal,
|
||||
},
|
||||
"Hangup",
|
||||
),
|
||||
];
|
||||
|
||||
for (signal, expected_name) in cases {
|
||||
let ct = signal_to_call_type(&signal);
|
||||
let name = format!("{ct:?}");
|
||||
assert_eq!(name, expected_name, "signal type mapping for {expected_name}");
|
||||
}
|
||||
}
|
||||
|
||||
// ─── Room Hashing + Access Control ─────────────────────────────────────────
|
||||
|
||||
#[test]
|
||||
fn hash_room_name_deterministic() {
|
||||
let h1 = wzp_crypto::hash_room_name("ops-channel");
|
||||
let h2 = wzp_crypto::hash_room_name("ops-channel");
|
||||
assert_eq!(h1, h2, "same input must produce same hash");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn hash_room_name_is_32_hex_chars() {
|
||||
let h = wzp_crypto::hash_room_name("test-room");
|
||||
assert_eq!(h.len(), 32, "hash must be 32 hex chars (16 bytes)");
|
||||
assert!(
|
||||
h.chars().all(|c| c.is_ascii_hexdigit()),
|
||||
"hash must contain only hex characters, got: {h}"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn hash_room_name_different_inputs() {
|
||||
let h1 = wzp_crypto::hash_room_name("alpha");
|
||||
let h2 = wzp_crypto::hash_room_name("beta");
|
||||
let h3 = wzp_crypto::hash_room_name("alpha-2");
|
||||
assert_ne!(h1, h2, "different names must produce different hashes");
|
||||
assert_ne!(h1, h3);
|
||||
assert_ne!(h2, h3);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn hash_room_name_matches_fc_convention() {
|
||||
// Manual SHA-256("featherchat-group:" + name)[:16] using the sha2 crate directly
|
||||
use sha2::{Digest, Sha256};
|
||||
|
||||
let name = "warzone-squad";
|
||||
let mut hasher = Sha256::new();
|
||||
hasher.update(b"featherchat-group:");
|
||||
hasher.update(name.as_bytes());
|
||||
let digest = hasher.finalize();
|
||||
let expected = hex::encode(&digest[..16]);
|
||||
|
||||
let actual = wzp_crypto::hash_room_name(name);
|
||||
assert_eq!(
|
||||
actual, expected,
|
||||
"hash_room_name must equal SHA-256('featherchat-group:' + name)[:16]"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn room_acl_open_mode() {
|
||||
let mgr = wzp_relay::room::RoomManager::new();
|
||||
// Open mode: everyone is authorized regardless of fingerprint presence
|
||||
assert!(mgr.is_authorized("any-room", None));
|
||||
assert!(mgr.is_authorized("any-room", Some("random-fp")));
|
||||
assert!(mgr.is_authorized("another-room", Some("abc:def")));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn room_acl_enforced() {
|
||||
let mgr = wzp_relay::room::RoomManager::with_acl();
|
||||
// ACL enabled but no fingerprint provided => denied
|
||||
assert!(
|
||||
!mgr.is_authorized("room1", None),
|
||||
"ACL mode must reject connections without a fingerprint"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn room_acl_allows_listed() {
|
||||
let mut mgr = wzp_relay::room::RoomManager::with_acl();
|
||||
mgr.allow("secure-room", "alice-fp");
|
||||
mgr.allow("secure-room", "bob-fp");
|
||||
|
||||
assert!(mgr.is_authorized("secure-room", Some("alice-fp")));
|
||||
assert!(mgr.is_authorized("secure-room", Some("bob-fp")));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn room_acl_denies_unlisted() {
|
||||
let mut mgr = wzp_relay::room::RoomManager::with_acl();
|
||||
mgr.allow("secure-room", "alice-fp");
|
||||
|
||||
assert!(
|
||||
!mgr.is_authorized("secure-room", Some("eve-fp")),
|
||||
"unlisted fingerprints must be denied"
|
||||
);
|
||||
assert!(
|
||||
!mgr.is_authorized("secure-room", Some("mallory-fp")),
|
||||
"unlisted fingerprints must be denied"
|
||||
);
|
||||
// No fingerprint at all => also denied
|
||||
assert!(
|
||||
!mgr.is_authorized("secure-room", None),
|
||||
"no fingerprint must be denied in ACL mode"
|
||||
);
|
||||
}
|
||||
|
||||
// ─── Web Bridge Auth + Proto Standalone + S-9 ──────────────────────────────
|
||||
|
||||
/// WZP-S-6: featherChat may include `eth_address` in ValidateResponse.
|
||||
/// WZP's ValidateResponse must handle it gracefully (serde ignores unknown fields).
|
||||
#[test]
|
||||
fn auth_response_with_eth_address() {
|
||||
// FC response with eth_address present (non-null)
|
||||
let with_eth = serde_json::json!({
|
||||
"valid": true,
|
||||
"fingerprint": "a1b2:c3d4:e5f6:7890:abcd:ef01:2345:6789",
|
||||
"alias": "vitalik",
|
||||
"eth_address": "0x1234567890abcdef1234567890abcdef12345678"
|
||||
});
|
||||
let resp: wzp_relay::auth::ValidateResponse =
|
||||
serde_json::from_value(with_eth).unwrap();
|
||||
assert!(resp.valid);
|
||||
assert_eq!(
|
||||
resp.fingerprint.unwrap(),
|
||||
"a1b2:c3d4:e5f6:7890:abcd:ef01:2345:6789"
|
||||
);
|
||||
assert_eq!(resp.alias.unwrap(), "vitalik");
|
||||
|
||||
// FC response with eth_address = null
|
||||
let with_null_eth = serde_json::json!({
|
||||
"valid": true,
|
||||
"fingerprint": "dead:beef:cafe:babe:1234:5678:9abc:def0",
|
||||
"alias": "anon",
|
||||
"eth_address": null
|
||||
});
|
||||
let resp2: wzp_relay::auth::ValidateResponse =
|
||||
serde_json::from_value(with_null_eth).unwrap();
|
||||
assert!(resp2.valid);
|
||||
assert_eq!(
|
||||
resp2.fingerprint.unwrap(),
|
||||
"dead:beef:cafe:babe:1234:5678:9abc:def0"
|
||||
);
|
||||
|
||||
// FC response without eth_address at all
|
||||
let without_eth = serde_json::json!({
|
||||
"valid": false
|
||||
});
|
||||
let resp3: wzp_relay::auth::ValidateResponse =
|
||||
serde_json::from_value(without_eth).unwrap();
|
||||
assert!(!resp3.valid);
|
||||
}
|
||||
|
||||
/// WZP-S-7: SignalMessage::AuthToken { token } exists and round-trips via serde.
|
||||
#[test]
|
||||
fn wzp_proto_has_auth_token_variant() {
|
||||
let msg = wzp_proto::SignalMessage::AuthToken {
|
||||
token: "fc-bearer-token-xyz".to_string(),
|
||||
};
|
||||
|
||||
// Serialize to JSON
|
||||
let json = serde_json::to_string(&msg).unwrap();
|
||||
assert!(json.contains("AuthToken"));
|
||||
assert!(json.contains("fc-bearer-token-xyz"));
|
||||
|
||||
// Deserialize back
|
||||
let decoded: wzp_proto::SignalMessage = serde_json::from_str(&json).unwrap();
|
||||
if let wzp_proto::SignalMessage::AuthToken { token } = decoded {
|
||||
assert_eq!(token, "fc-bearer-token-xyz");
|
||||
} else {
|
||||
panic!("expected AuthToken variant, got: {decoded:?}");
|
||||
}
|
||||
}
|
||||
|
||||
/// WZP-S-6: WZP CallSignalType has all variants matching featherChat's set.
|
||||
#[test]
|
||||
fn all_fc_call_signal_types_representable() {
|
||||
use wzp_client::featherchat::CallSignalType;
|
||||
|
||||
// Verify each FC variant can be constructed and debug-printed
|
||||
let variants: Vec<(CallSignalType, &str)> = vec![
|
||||
(CallSignalType::Offer, "Offer"),
|
||||
(CallSignalType::Answer, "Answer"),
|
||||
(CallSignalType::IceCandidate, "IceCandidate"),
|
||||
(CallSignalType::Hangup, "Hangup"),
|
||||
(CallSignalType::Reject, "Reject"),
|
||||
(CallSignalType::Ringing, "Ringing"),
|
||||
(CallSignalType::Busy, "Busy"),
|
||||
];
|
||||
|
||||
assert_eq!(variants.len(), 7, "featherChat defines exactly 7 call signal types");
|
||||
|
||||
for (variant, expected_name) in &variants {
|
||||
let name = format!("{variant:?}");
|
||||
assert_eq!(&name, expected_name);
|
||||
|
||||
// Each variant should serialize/deserialize cleanly
|
||||
let json = serde_json::to_string(variant).unwrap();
|
||||
let round_tripped: CallSignalType = serde_json::from_str(&json).unwrap();
|
||||
assert_eq!(format!("{round_tripped:?}"), *expected_name);
|
||||
}
|
||||
}
|
||||
|
||||
/// WZP-S-9: hashed room name used as QUIC SNI must be valid — lowercase hex only.
|
||||
#[test]
|
||||
fn hash_room_name_used_as_sni_is_valid() {
|
||||
let long_name = "x".repeat(1000);
|
||||
let test_rooms = [
|
||||
"general",
|
||||
"Voice Room #1",
|
||||
"café-lounge",
|
||||
"a]b[c{d}e",
|
||||
"\u{1f480}\u{1f525}",
|
||||
long_name.as_str(),
|
||||
];
|
||||
|
||||
for room in &test_rooms {
|
||||
let hashed = wzp_crypto::hash_room_name(room);
|
||||
|
||||
// Must be non-empty
|
||||
assert!(!hashed.is_empty(), "hash of '{room}' must not be empty");
|
||||
|
||||
// Must contain only lowercase hex chars (valid for SNI)
|
||||
for ch in hashed.chars() {
|
||||
assert!(
|
||||
ch.is_ascii_hexdigit() && !ch.is_ascii_uppercase(),
|
||||
"hash of '{room}' contains invalid SNI char: '{ch}' (full: {hashed})"
|
||||
);
|
||||
}
|
||||
|
||||
// SHA-256 truncated to 16 bytes -> 32 hex chars
|
||||
assert_eq!(
|
||||
hashed.len(),
|
||||
32,
|
||||
"hash should be 32 hex chars (16 bytes), got {} for '{room}'",
|
||||
hashed.len()
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
/// WZP-S-7: wzp-proto Cargo.toml must be standalone — no `.workspace = true` inheritance.
|
||||
#[test]
|
||||
fn wzp_proto_cargo_toml_is_standalone() {
|
||||
// Try both paths (run from workspace root or from crate directory)
|
||||
let candidates = [
|
||||
"crates/wzp-proto/Cargo.toml",
|
||||
"../wzp-proto/Cargo.toml",
|
||||
];
|
||||
|
||||
let contents = candidates
|
||||
.iter()
|
||||
.find_map(|p| std::fs::read_to_string(p).ok())
|
||||
.expect("could not read crates/wzp-proto/Cargo.toml from any expected path");
|
||||
|
||||
// Must NOT contain ".workspace = true" anywhere — that would break standalone use
|
||||
assert!(
|
||||
!contents.contains(".workspace = true"),
|
||||
"wzp-proto Cargo.toml must not use workspace inheritance (.workspace = true), \
|
||||
found in:\n{contents}"
|
||||
);
|
||||
|
||||
// Sanity: it should still be a valid Cargo.toml with the right package name
|
||||
assert!(
|
||||
contents.contains("name = \"wzp-proto\""),
|
||||
"expected package name 'wzp-proto' in Cargo.toml"
|
||||
);
|
||||
}
|
||||
@@ -1,17 +1,22 @@
|
||||
[package]
|
||||
name = "wzp-proto"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
rust-version.workspace = true
|
||||
version = "0.1.0"
|
||||
edition = "2024"
|
||||
license = "MIT OR Apache-2.0"
|
||||
rust-version = "1.85"
|
||||
description = "WarzonePhone protocol types, traits, and core logic"
|
||||
|
||||
# This crate is designed to be importable standalone — no workspace inheritance.
|
||||
# featherChat and other projects can depend on it directly via git:
|
||||
# wzp-proto = { git = "ssh://git@git.manko.yoga:222/manawenuz/wz-phone.git", path = "crates/wzp-proto" }
|
||||
|
||||
[dependencies]
|
||||
bytes = { workspace = true }
|
||||
thiserror = { workspace = true }
|
||||
async-trait = { workspace = true }
|
||||
serde = { workspace = true }
|
||||
tracing = { workspace = true }
|
||||
bytes = "1"
|
||||
thiserror = "2"
|
||||
async-trait = "0.1"
|
||||
serde = { version = "1", features = ["derive"] }
|
||||
tracing = "0.1"
|
||||
|
||||
[dev-dependencies]
|
||||
tokio = { workspace = true }
|
||||
tokio = { version = "1", features = ["full"] }
|
||||
serde_json = "1"
|
||||
|
||||
454
crates/wzp-proto/src/bandwidth.rs
Normal file
454
crates/wzp-proto/src/bandwidth.rs
Normal file
@@ -0,0 +1,454 @@
|
||||
//! GCC-style bandwidth estimation and congestion control.
|
||||
//!
|
||||
//! Tracks available bandwidth using delay-based and loss-based signals,
|
||||
//! then adjusts the sending bitrate to avoid congestion. The estimator
|
||||
//! uses multiplicative decrease (15%) on congestion and additive increase
|
||||
//! (5%) during underuse, following the general shape of Google Congestion
|
||||
//! Control (GCC).
|
||||
|
||||
use std::collections::VecDeque;
|
||||
use std::time::Instant;
|
||||
|
||||
use crate::packet::QualityReport;
|
||||
use crate::QualityProfile;
|
||||
|
||||
/// Network congestion state derived from delay and loss signals.
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
|
||||
pub enum CongestionState {
|
||||
/// Network is fine, can increase bandwidth.
|
||||
Underuse,
|
||||
/// Normal operation.
|
||||
Normal,
|
||||
/// Congestion detected, should decrease bandwidth.
|
||||
Overuse,
|
||||
}
|
||||
|
||||
/// Detects congestion from increasing RTT using an exponential moving average.
|
||||
///
|
||||
/// Maintains a baseline RTT (minimum observed) and compares the smoothed RTT
|
||||
/// against it. If `rtt_ema > baseline * threshold_ratio`, congestion is detected.
|
||||
/// The baseline slowly drifts upward to handle route changes.
|
||||
struct DelayBasedDetector {
|
||||
/// Baseline RTT (minimum observed).
|
||||
baseline_rtt_ms: f64,
|
||||
/// EMA of recent RTT.
|
||||
rtt_ema: f64,
|
||||
/// EMA smoothing factor.
|
||||
alpha: f64,
|
||||
/// Threshold: if rtt_ema > baseline * threshold_ratio, congestion detected.
|
||||
threshold_ratio: f64,
|
||||
/// Current state.
|
||||
state: CongestionState,
|
||||
/// Whether we have received any RTT sample yet.
|
||||
initialized: bool,
|
||||
/// Drift factor: baseline slowly increases each update to track route changes.
|
||||
baseline_drift: f64,
|
||||
}
|
||||
|
||||
impl DelayBasedDetector {
|
||||
fn new() -> Self {
|
||||
Self {
|
||||
baseline_rtt_ms: f64::MAX,
|
||||
rtt_ema: 0.0,
|
||||
alpha: 0.3,
|
||||
threshold_ratio: 1.5,
|
||||
state: CongestionState::Normal,
|
||||
initialized: false,
|
||||
baseline_drift: 0.001,
|
||||
}
|
||||
}
|
||||
|
||||
/// Update the detector with a new RTT sample.
|
||||
fn update(&mut self, rtt_ms: f64) {
|
||||
if !self.initialized {
|
||||
self.baseline_rtt_ms = rtt_ms;
|
||||
self.rtt_ema = rtt_ms;
|
||||
self.initialized = true;
|
||||
self.state = CongestionState::Normal;
|
||||
return;
|
||||
}
|
||||
|
||||
// Track minimum RTT as baseline.
|
||||
if rtt_ms < self.baseline_rtt_ms {
|
||||
self.baseline_rtt_ms = rtt_ms;
|
||||
} else {
|
||||
// Slowly drift baseline upward to handle route changes.
|
||||
self.baseline_rtt_ms += self.baseline_drift * (rtt_ms - self.baseline_rtt_ms);
|
||||
}
|
||||
|
||||
// Update EMA.
|
||||
self.rtt_ema = self.alpha * rtt_ms + (1.0 - self.alpha) * self.rtt_ema;
|
||||
|
||||
// Determine state.
|
||||
let overuse_threshold = self.baseline_rtt_ms * self.threshold_ratio;
|
||||
let underuse_threshold = self.baseline_rtt_ms * 1.1;
|
||||
|
||||
if self.rtt_ema > overuse_threshold {
|
||||
self.state = CongestionState::Overuse;
|
||||
} else if self.rtt_ema < underuse_threshold {
|
||||
self.state = CongestionState::Underuse;
|
||||
} else {
|
||||
self.state = CongestionState::Normal;
|
||||
}
|
||||
}
|
||||
|
||||
fn state(&self) -> CongestionState {
|
||||
self.state
|
||||
}
|
||||
}
|
||||
|
||||
/// Detects congestion from packet loss using a sliding window average.
|
||||
struct LossBasedDetector {
|
||||
/// Recent loss percentages (sliding window).
|
||||
loss_window: VecDeque<f64>,
|
||||
/// Maximum window size.
|
||||
window_size: usize,
|
||||
/// Loss threshold for congestion (default 5%).
|
||||
threshold_pct: f64,
|
||||
}
|
||||
|
||||
impl LossBasedDetector {
|
||||
fn new() -> Self {
|
||||
Self {
|
||||
loss_window: VecDeque::with_capacity(10),
|
||||
window_size: 10,
|
||||
threshold_pct: 5.0,
|
||||
}
|
||||
}
|
||||
|
||||
/// Add a loss percentage sample to the window.
|
||||
fn update(&mut self, loss_pct: f64) {
|
||||
if self.loss_window.len() >= self.window_size {
|
||||
self.loss_window.pop_front();
|
||||
}
|
||||
self.loss_window.push_back(loss_pct);
|
||||
}
|
||||
|
||||
/// Returns true if the average loss in the window exceeds the threshold.
|
||||
fn is_congested(&self) -> bool {
|
||||
if self.loss_window.is_empty() {
|
||||
return false;
|
||||
}
|
||||
let avg = self.loss_window.iter().sum::<f64>() / self.loss_window.len() as f64;
|
||||
avg > self.threshold_pct
|
||||
}
|
||||
}
|
||||
|
||||
// ─── BandwidthEstimator ─────────────────────────────────────────────────────
|
||||
|
||||
/// GCC-style bandwidth estimator that tracks available bandwidth using
|
||||
/// delay-based and loss-based congestion signals.
|
||||
///
|
||||
/// # Algorithm
|
||||
///
|
||||
/// - **Overuse** (delay or loss): multiplicative decrease by 15%.
|
||||
/// - **Underuse** (delay) with no loss congestion: additive increase by 5%.
|
||||
/// - **Normal**: hold steady.
|
||||
/// - Result is always clamped to `[min_bw_kbps, max_bw_kbps]`.
|
||||
pub struct BandwidthEstimator {
|
||||
/// Current estimated bandwidth in kbps.
|
||||
estimated_bw_kbps: f64,
|
||||
/// Minimum bandwidth floor (don't go below this).
|
||||
min_bw_kbps: f64,
|
||||
/// Maximum bandwidth ceiling.
|
||||
max_bw_kbps: f64,
|
||||
/// Delay-based detector state.
|
||||
delay_detector: DelayBasedDetector,
|
||||
/// Loss-based detector state.
|
||||
loss_detector: LossBasedDetector,
|
||||
/// Last update timestamp.
|
||||
last_update: Option<Instant>,
|
||||
}
|
||||
|
||||
/// Multiplicative decrease factor applied on congestion (15% reduction).
|
||||
const DECREASE_FACTOR: f64 = 0.85;
|
||||
/// Additive increase factor applied during underuse (5% of current estimate).
|
||||
const INCREASE_FACTOR: f64 = 0.05;
|
||||
|
||||
impl BandwidthEstimator {
|
||||
/// Create a new bandwidth estimator.
|
||||
///
|
||||
/// - `initial_bw_kbps`: starting bandwidth estimate.
|
||||
/// - `min`: minimum bandwidth floor in kbps.
|
||||
/// - `max`: maximum bandwidth ceiling in kbps.
|
||||
pub fn new(initial_bw_kbps: f64, min: f64, max: f64) -> Self {
|
||||
Self {
|
||||
estimated_bw_kbps: initial_bw_kbps,
|
||||
min_bw_kbps: min,
|
||||
max_bw_kbps: max,
|
||||
delay_detector: DelayBasedDetector::new(),
|
||||
loss_detector: LossBasedDetector::new(),
|
||||
last_update: None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Update the estimator with new network observations.
|
||||
///
|
||||
/// Returns the new estimated bandwidth in kbps.
|
||||
///
|
||||
/// - If delay overuse OR loss congested: decrease by 15% (multiplicative decrease).
|
||||
/// - If delay underuse AND not loss congested: increase by 5% (additive increase).
|
||||
/// - If normal: hold steady.
|
||||
/// - Result is clamped to `[min, max]`.
|
||||
pub fn update(&mut self, rtt_ms: f64, loss_pct: f64, _jitter_ms: f64) -> f64 {
|
||||
self.delay_detector.update(rtt_ms);
|
||||
self.loss_detector.update(loss_pct);
|
||||
self.last_update = Some(Instant::now());
|
||||
|
||||
let delay_state = self.delay_detector.state();
|
||||
let loss_congested = self.loss_detector.is_congested();
|
||||
|
||||
if delay_state == CongestionState::Overuse || loss_congested {
|
||||
// Multiplicative decrease.
|
||||
self.estimated_bw_kbps *= DECREASE_FACTOR;
|
||||
} else if delay_state == CongestionState::Underuse && !loss_congested {
|
||||
// Additive increase.
|
||||
self.estimated_bw_kbps += self.estimated_bw_kbps * INCREASE_FACTOR;
|
||||
}
|
||||
// Normal: hold steady — no change.
|
||||
|
||||
// Clamp to [min, max].
|
||||
self.estimated_bw_kbps = self
|
||||
.estimated_bw_kbps
|
||||
.clamp(self.min_bw_kbps, self.max_bw_kbps);
|
||||
|
||||
self.estimated_bw_kbps
|
||||
}
|
||||
|
||||
/// Current estimated bandwidth in kbps.
|
||||
pub fn estimated_kbps(&self) -> f64 {
|
||||
self.estimated_bw_kbps
|
||||
}
|
||||
|
||||
/// Current congestion state (derived from delay detector).
|
||||
pub fn congestion_state(&self) -> CongestionState {
|
||||
self.delay_detector.state()
|
||||
}
|
||||
|
||||
/// Convenience method: update from a `QualityReport`.
|
||||
///
|
||||
/// Extracts RTT, loss, and jitter from the report and feeds them into
|
||||
/// the estimator.
|
||||
pub fn from_quality_report(&mut self, report: &QualityReport) -> f64 {
|
||||
let rtt_ms = report.rtt_ms() as f64;
|
||||
let loss_pct = report.loss_percent() as f64;
|
||||
let jitter_ms = report.jitter_ms as f64;
|
||||
self.update(rtt_ms, loss_pct, jitter_ms)
|
||||
}
|
||||
|
||||
/// Recommend a `QualityProfile` based on the current bandwidth estimate.
|
||||
///
|
||||
/// - bw >= 25 kbps -> GOOD (Opus 24k + 20% FEC = ~28.8 kbps total)
|
||||
/// - bw >= 8 kbps -> DEGRADED (Opus 6k + 50% FEC = ~9.0 kbps)
|
||||
/// - bw < 8 kbps -> CATASTROPHIC (Codec2 1.2k + 100% FEC = ~2.4 kbps)
|
||||
pub fn recommended_profile(&self) -> QualityProfile {
|
||||
if self.estimated_bw_kbps >= 25.0 {
|
||||
QualityProfile::GOOD
|
||||
} else if self.estimated_bw_kbps >= 8.0 {
|
||||
QualityProfile::DEGRADED
|
||||
} else {
|
||||
QualityProfile::CATASTROPHIC
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn initial_bandwidth() {
|
||||
let bwe = BandwidthEstimator::new(50.0, 2.0, 100.0);
|
||||
assert!((bwe.estimated_kbps() - 50.0).abs() < f64::EPSILON);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn stable_network_holds_bandwidth() {
|
||||
let mut bwe = BandwidthEstimator::new(50.0, 2.0, 100.0);
|
||||
// Feed stable, low RTT and 0% loss — after initial sample sets baseline,
|
||||
// subsequent identical RTT should be underuse (rtt_ema < baseline * 1.1),
|
||||
// causing slow increases. The bandwidth should stay near initial or grow slightly.
|
||||
let initial = bwe.estimated_kbps();
|
||||
for _ in 0..20 {
|
||||
bwe.update(30.0, 0.0, 5.0);
|
||||
}
|
||||
// Should not have decreased significantly.
|
||||
assert!(
|
||||
bwe.estimated_kbps() >= initial,
|
||||
"bandwidth should not decrease on stable network: got {} vs initial {}",
|
||||
bwe.estimated_kbps(),
|
||||
initial
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn high_rtt_decreases_bandwidth() {
|
||||
let mut bwe = BandwidthEstimator::new(50.0, 2.0, 100.0);
|
||||
// Establish a low baseline.
|
||||
for _ in 0..5 {
|
||||
bwe.update(20.0, 0.0, 2.0);
|
||||
}
|
||||
let before = bwe.estimated_kbps();
|
||||
|
||||
// Now feed high RTT to trigger overuse.
|
||||
for _ in 0..10 {
|
||||
bwe.update(200.0, 0.0, 10.0);
|
||||
}
|
||||
assert!(
|
||||
bwe.estimated_kbps() < before,
|
||||
"bandwidth should decrease on high RTT: got {} vs before {}",
|
||||
bwe.estimated_kbps(),
|
||||
before
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn high_loss_decreases_bandwidth() {
|
||||
let mut bwe = BandwidthEstimator::new(50.0, 2.0, 100.0);
|
||||
let before = bwe.estimated_kbps();
|
||||
|
||||
// Feed 10% loss repeatedly (above the 5% threshold).
|
||||
for _ in 0..15 {
|
||||
bwe.update(20.0, 10.0, 2.0);
|
||||
}
|
||||
assert!(
|
||||
bwe.estimated_kbps() < before,
|
||||
"bandwidth should decrease on high loss: got {} vs before {}",
|
||||
bwe.estimated_kbps(),
|
||||
before
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn recovery_increases_bandwidth() {
|
||||
let mut bwe = BandwidthEstimator::new(50.0, 2.0, 100.0);
|
||||
|
||||
// Drive bandwidth down with high RTT.
|
||||
for _ in 0..5 {
|
||||
bwe.update(20.0, 0.0, 2.0);
|
||||
}
|
||||
for _ in 0..20 {
|
||||
bwe.update(200.0, 0.0, 10.0);
|
||||
}
|
||||
let low_bw = bwe.estimated_kbps();
|
||||
assert!(low_bw < 50.0, "should have decreased");
|
||||
|
||||
// Now feed good conditions — low RTT should be underuse, causing increase.
|
||||
// Reset the baseline by feeding very low RTT.
|
||||
for _ in 0..30 {
|
||||
bwe.update(10.0, 0.0, 1.0);
|
||||
}
|
||||
assert!(
|
||||
bwe.estimated_kbps() > low_bw,
|
||||
"bandwidth should recover: got {} vs low {}",
|
||||
bwe.estimated_kbps(),
|
||||
low_bw
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn bandwidth_clamped_to_min() {
|
||||
let mut bwe = BandwidthEstimator::new(10.0, 5.0, 100.0);
|
||||
// Keep feeding congestion to drive bandwidth down.
|
||||
for _ in 0..5 {
|
||||
bwe.update(20.0, 0.0, 2.0);
|
||||
}
|
||||
for _ in 0..100 {
|
||||
bwe.update(500.0, 50.0, 100.0);
|
||||
}
|
||||
assert!(
|
||||
(bwe.estimated_kbps() - 5.0).abs() < f64::EPSILON,
|
||||
"bandwidth should be clamped to min: got {}",
|
||||
bwe.estimated_kbps()
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn bandwidth_clamped_to_max() {
|
||||
let mut bwe = BandwidthEstimator::new(90.0, 2.0, 100.0);
|
||||
// Keep feeding great conditions to drive bandwidth up.
|
||||
for _ in 0..200 {
|
||||
bwe.update(5.0, 0.0, 1.0);
|
||||
}
|
||||
assert!(
|
||||
bwe.estimated_kbps() <= 100.0,
|
||||
"bandwidth should be clamped to max: got {}",
|
||||
bwe.estimated_kbps()
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn recommended_profile_thresholds() {
|
||||
// At boundary: >= 25 kbps => GOOD
|
||||
let bwe_good = BandwidthEstimator::new(25.0, 2.0, 100.0);
|
||||
assert_eq!(bwe_good.recommended_profile(), QualityProfile::GOOD);
|
||||
|
||||
// Just below 25 => DEGRADED
|
||||
let bwe_degraded = BandwidthEstimator::new(24.9, 2.0, 100.0);
|
||||
assert_eq!(bwe_degraded.recommended_profile(), QualityProfile::DEGRADED);
|
||||
|
||||
// At boundary: >= 8 kbps => DEGRADED
|
||||
let bwe_degraded2 = BandwidthEstimator::new(8.0, 2.0, 100.0);
|
||||
assert_eq!(
|
||||
bwe_degraded2.recommended_profile(),
|
||||
QualityProfile::DEGRADED
|
||||
);
|
||||
|
||||
// Below 8 => CATASTROPHIC
|
||||
let bwe_cat = BandwidthEstimator::new(7.9, 2.0, 100.0);
|
||||
assert_eq!(
|
||||
bwe_cat.recommended_profile(),
|
||||
QualityProfile::CATASTROPHIC
|
||||
);
|
||||
|
||||
// High bandwidth
|
||||
let bwe_high = BandwidthEstimator::new(80.0, 2.0, 100.0);
|
||||
assert_eq!(bwe_high.recommended_profile(), QualityProfile::GOOD);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn from_quality_report_integration() {
|
||||
let mut bwe = BandwidthEstimator::new(50.0, 2.0, 100.0);
|
||||
|
||||
// Build a QualityReport with moderate loss and RTT.
|
||||
let report = QualityReport {
|
||||
loss_pct: (10.0_f32 / 100.0 * 255.0) as u8, // ~10% loss
|
||||
rtt_4ms: 25, // 100ms RTT
|
||||
jitter_ms: 10,
|
||||
bitrate_cap_kbps: 200,
|
||||
};
|
||||
|
||||
let new_bw = bwe.from_quality_report(&report);
|
||||
// Should return a valid bandwidth value.
|
||||
assert!(new_bw > 0.0);
|
||||
assert!(new_bw <= 100.0);
|
||||
// The estimator should have been updated.
|
||||
assert!((bwe.estimated_kbps() - new_bw).abs() < f64::EPSILON);
|
||||
}
|
||||
|
||||
// ── Additional detector unit tests ──────────────────────────────────
|
||||
|
||||
#[test]
|
||||
fn delay_detector_starts_normal() {
|
||||
let det = DelayBasedDetector::new();
|
||||
assert_eq!(det.state(), CongestionState::Normal);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn loss_detector_below_threshold() {
|
||||
let mut det = LossBasedDetector::new();
|
||||
for _ in 0..10 {
|
||||
det.update(2.0); // 2% loss, well below 5% threshold
|
||||
}
|
||||
assert!(!det.is_congested());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn loss_detector_above_threshold() {
|
||||
let mut det = LossBasedDetector::new();
|
||||
for _ in 0..10 {
|
||||
det.update(8.0); // 8% loss, above 5% threshold
|
||||
}
|
||||
assert!(det.is_congested());
|
||||
}
|
||||
}
|
||||
@@ -16,6 +16,8 @@ pub enum CodecId {
|
||||
Codec2_3200 = 3,
|
||||
/// Codec2 at 1200bps (catastrophic conditions)
|
||||
Codec2_1200 = 4,
|
||||
/// Comfort noise descriptor (silence suppression)
|
||||
ComfortNoise = 5,
|
||||
}
|
||||
|
||||
impl CodecId {
|
||||
@@ -27,6 +29,7 @@ impl CodecId {
|
||||
Self::Opus6k => 6_000,
|
||||
Self::Codec2_3200 => 3_200,
|
||||
Self::Codec2_1200 => 1_200,
|
||||
Self::ComfortNoise => 0,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -38,6 +41,7 @@ impl CodecId {
|
||||
Self::Opus6k => 40,
|
||||
Self::Codec2_3200 => 20,
|
||||
Self::Codec2_1200 => 40,
|
||||
Self::ComfortNoise => 20,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -46,6 +50,7 @@ impl CodecId {
|
||||
match self {
|
||||
Self::Opus24k | Self::Opus16k | Self::Opus6k => 48_000,
|
||||
Self::Codec2_3200 | Self::Codec2_1200 => 8_000,
|
||||
Self::ComfortNoise => 48_000,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -57,6 +62,7 @@ impl CodecId {
|
||||
2 => Some(Self::Opus6k),
|
||||
3 => Some(Self::Codec2_3200),
|
||||
4 => Some(Self::Codec2_1200),
|
||||
5 => Some(Self::ComfortNoise),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,6 +2,97 @@ use std::collections::BTreeMap;
|
||||
|
||||
use crate::packet::MediaPacket;
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Adaptive playout delay (NetEq-inspired)
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Adaptive playout delay estimator based on observed inter-arrival jitter.
|
||||
///
|
||||
/// Inspired by WebRTC NetEq and IAX2 adaptive jitter buffering. Tracks an
|
||||
/// exponential moving average (EMA) of inter-packet arrival jitter and
|
||||
/// converts it to a target buffer depth in packets.
|
||||
pub struct AdaptivePlayoutDelay {
|
||||
/// Current target delay in packets (equivalent to target_depth).
|
||||
target_delay: usize,
|
||||
/// Minimum allowed delay.
|
||||
min_delay: usize,
|
||||
/// Maximum allowed delay.
|
||||
max_delay: usize,
|
||||
/// Exponential moving average of inter-packet arrival jitter (ms).
|
||||
jitter_ema: f64,
|
||||
/// EMA smoothing factor (0.0-1.0, lower = smoother).
|
||||
alpha: f64,
|
||||
/// Last packet arrival timestamp (for computing inter-arrival jitter).
|
||||
last_arrival_ms: Option<u64>,
|
||||
/// Last packet expected timestamp.
|
||||
last_expected_ms: Option<u64>,
|
||||
}
|
||||
|
||||
/// Frame duration in milliseconds (20ms Opus/Codec2 frames).
|
||||
const FRAME_DURATION_MS: f64 = 20.0;
|
||||
/// Safety margin added to jitter-derived target (in packets).
|
||||
const SAFETY_MARGIN_PACKETS: f64 = 2.0;
|
||||
/// Default EMA smoothing factor.
|
||||
const DEFAULT_ALPHA: f64 = 0.05;
|
||||
|
||||
impl AdaptivePlayoutDelay {
|
||||
/// Create a new adaptive playout delay estimator.
|
||||
///
|
||||
/// - `min_delay`: minimum target delay in packets
|
||||
/// - `max_delay`: maximum target delay in packets
|
||||
pub fn new(min_delay: usize, max_delay: usize) -> Self {
|
||||
Self {
|
||||
target_delay: min_delay,
|
||||
min_delay,
|
||||
max_delay,
|
||||
jitter_ema: 0.0,
|
||||
alpha: DEFAULT_ALPHA,
|
||||
last_arrival_ms: None,
|
||||
last_expected_ms: None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Update with a new packet arrival. Returns the new target delay.
|
||||
///
|
||||
/// - `arrival_ms`: when the packet actually arrived (wall clock)
|
||||
/// - `expected_ms`: when it should have arrived (based on sequence * 20ms)
|
||||
pub fn update(&mut self, arrival_ms: u64, expected_ms: u64) -> usize {
|
||||
if let (Some(last_arrival), Some(last_expected)) =
|
||||
(self.last_arrival_ms, self.last_expected_ms)
|
||||
{
|
||||
let actual_delta = arrival_ms as f64 - last_arrival as f64;
|
||||
let expected_delta = expected_ms as f64 - last_expected as f64;
|
||||
let jitter = (actual_delta - expected_delta).abs();
|
||||
|
||||
// Update EMA
|
||||
self.jitter_ema = self.alpha * jitter + (1.0 - self.alpha) * self.jitter_ema;
|
||||
|
||||
// Convert jitter estimate to target delay in packets
|
||||
let raw_target = (self.jitter_ema / FRAME_DURATION_MS).ceil() + SAFETY_MARGIN_PACKETS;
|
||||
self.target_delay =
|
||||
(raw_target as usize).clamp(self.min_delay, self.max_delay);
|
||||
}
|
||||
|
||||
self.last_arrival_ms = Some(arrival_ms);
|
||||
self.last_expected_ms = Some(expected_ms);
|
||||
self.target_delay
|
||||
}
|
||||
|
||||
/// Get current target delay in packets.
|
||||
pub fn target_delay(&self) -> usize {
|
||||
self.target_delay
|
||||
}
|
||||
|
||||
/// Get current jitter estimate in ms.
|
||||
pub fn jitter_estimate_ms(&self) -> f64 {
|
||||
self.jitter_ema
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Jitter buffer
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Adaptive jitter buffer that reorders packets by sequence number.
|
||||
///
|
||||
/// Designed for the lossy relay link with up to 5 seconds of buffering depth.
|
||||
@@ -21,6 +112,8 @@ pub struct JitterBuffer {
|
||||
initialized: bool,
|
||||
/// Statistics.
|
||||
stats: JitterStats,
|
||||
/// Optional adaptive playout delay estimator.
|
||||
adaptive: Option<AdaptivePlayoutDelay>,
|
||||
}
|
||||
|
||||
/// Jitter buffer statistics.
|
||||
@@ -32,6 +125,14 @@ pub struct JitterStats {
|
||||
pub packets_late: u64,
|
||||
pub packets_duplicate: u64,
|
||||
pub current_depth: usize,
|
||||
/// Total frames decoded by the consumer (tracked externally via `record_decode`).
|
||||
pub total_decoded: u64,
|
||||
/// Number of times the consumer tried to decode but the buffer was empty/not-ready.
|
||||
pub underruns: u64,
|
||||
/// Number of packets dropped because the buffer exceeded max depth.
|
||||
pub overruns: u64,
|
||||
/// High water mark — maximum buffer depth observed.
|
||||
pub max_depth_seen: usize,
|
||||
}
|
||||
|
||||
/// Result of attempting to get the next packet for playout.
|
||||
@@ -60,6 +161,27 @@ impl JitterBuffer {
|
||||
min_depth,
|
||||
initialized: false,
|
||||
stats: JitterStats::default(),
|
||||
adaptive: None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a jitter buffer with adaptive playout delay.
|
||||
///
|
||||
/// The target depth will be automatically adjusted based on observed
|
||||
/// inter-arrival jitter (NetEq-inspired algorithm).
|
||||
///
|
||||
/// - `min_delay`: minimum target delay in packets
|
||||
/// - `max_delay`: maximum target delay in packets (also used as max_depth)
|
||||
pub fn new_adaptive(min_delay: usize, max_delay: usize) -> Self {
|
||||
Self {
|
||||
buffer: BTreeMap::new(),
|
||||
next_playout_seq: 0,
|
||||
max_depth: max_delay,
|
||||
target_depth: min_delay,
|
||||
min_depth: min_delay,
|
||||
initialized: false,
|
||||
stats: JitterStats::default(),
|
||||
adaptive: Some(AdaptivePlayoutDelay::new(min_delay, max_delay)),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -99,12 +221,35 @@ impl JitterBuffer {
|
||||
self.next_playout_seq = seq;
|
||||
}
|
||||
|
||||
// Update adaptive playout delay if enabled.
|
||||
// Use the packet's timestamp as expected_ms and compute a simple wall-clock
|
||||
// proxy from the header timestamp (arrival_ms is approximated as timestamp
|
||||
// + observed jitter, but since we don't have real wall-clock here we use
|
||||
// the receive order with the header timestamp as the expected baseline).
|
||||
if let Some(ref mut adaptive) = self.adaptive {
|
||||
// expected_ms derived from sequence-implied timing: seq * frame_duration
|
||||
let expected_ms = packet.header.timestamp as u64;
|
||||
// For arrival_ms, use the actual receive timestamp. In the absence of
|
||||
// a wall-clock parameter, we use std::time for a monotonic approximation.
|
||||
// However, to keep the API simple, we compute arrival from the packet
|
||||
// stats: the Nth received packet "arrives" at N * frame_duration as a
|
||||
// baseline, and real network jitter shows in the deviation.
|
||||
// NOTE: In production, the caller should pass real wall-clock time.
|
||||
// For now, we use the header timestamp as-is (callers with adaptive
|
||||
// mode should feed arrival time via push_with_arrival).
|
||||
let arrival_ms = expected_ms; // no-op for basic push; use push_with_arrival
|
||||
adaptive.update(arrival_ms, expected_ms);
|
||||
self.target_depth = adaptive.target_delay();
|
||||
self.min_depth = self.min_depth.min(self.target_depth);
|
||||
}
|
||||
|
||||
self.buffer.insert(seq, packet);
|
||||
|
||||
// Evict oldest if over max depth
|
||||
while self.buffer.len() > self.max_depth {
|
||||
if let Some((&oldest_seq, _)) = self.buffer.first_key_value() {
|
||||
self.buffer.remove(&oldest_seq);
|
||||
self.stats.overruns += 1;
|
||||
// Advance playout seq past evicted packet
|
||||
if seq_before(self.next_playout_seq, oldest_seq.wrapping_add(1)) {
|
||||
self.next_playout_seq = oldest_seq.wrapping_add(1);
|
||||
@@ -114,6 +259,9 @@ impl JitterBuffer {
|
||||
}
|
||||
|
||||
self.stats.current_depth = self.buffer.len();
|
||||
if self.stats.current_depth > self.stats.max_depth_seen {
|
||||
self.stats.max_depth_seen = self.stats.current_depth;
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the next packet for playout.
|
||||
@@ -163,6 +311,86 @@ impl JitterBuffer {
|
||||
self.stats = JitterStats::default();
|
||||
}
|
||||
|
||||
/// Record that the consumer attempted to decode but the buffer was empty/not-ready.
|
||||
pub fn record_underrun(&mut self) {
|
||||
self.stats.underruns += 1;
|
||||
}
|
||||
|
||||
/// Record a successful frame decode by the consumer.
|
||||
pub fn record_decode(&mut self) {
|
||||
self.stats.total_decoded += 1;
|
||||
}
|
||||
|
||||
/// Reset statistics counters (preserves buffer contents and playout state).
|
||||
pub fn reset_stats(&mut self) {
|
||||
self.stats = JitterStats {
|
||||
current_depth: self.buffer.len(),
|
||||
..JitterStats::default()
|
||||
};
|
||||
}
|
||||
|
||||
/// Push a received packet with an explicit wall-clock arrival time.
|
||||
///
|
||||
/// This is the preferred entry point when adaptive playout delay is enabled,
|
||||
/// since the estimator needs real arrival timestamps.
|
||||
pub fn push_with_arrival(&mut self, packet: MediaPacket, arrival_ms: u64) {
|
||||
let expected_ms = packet.header.timestamp as u64;
|
||||
let seq = packet.header.seq;
|
||||
self.stats.packets_received += 1;
|
||||
|
||||
if !self.initialized {
|
||||
self.next_playout_seq = seq;
|
||||
self.initialized = true;
|
||||
}
|
||||
|
||||
// Check for duplicates
|
||||
if self.buffer.contains_key(&seq) {
|
||||
self.stats.packets_duplicate += 1;
|
||||
return;
|
||||
}
|
||||
|
||||
// Check if packet is too old (already played out)
|
||||
if self.stats.packets_played > 0 && seq_before(seq, self.next_playout_seq) {
|
||||
self.stats.packets_late += 1;
|
||||
return;
|
||||
}
|
||||
|
||||
// If we haven't started playout yet, adjust next_playout_seq to earliest known
|
||||
if self.stats.packets_played == 0 && seq_before(seq, self.next_playout_seq) {
|
||||
self.next_playout_seq = seq;
|
||||
}
|
||||
|
||||
// Update adaptive playout delay if enabled.
|
||||
if let Some(ref mut adaptive) = self.adaptive {
|
||||
adaptive.update(arrival_ms, expected_ms);
|
||||
self.target_depth = adaptive.target_delay();
|
||||
}
|
||||
|
||||
self.buffer.insert(seq, packet);
|
||||
|
||||
// Evict oldest if over max depth
|
||||
while self.buffer.len() > self.max_depth {
|
||||
if let Some((&oldest_seq, _)) = self.buffer.first_key_value() {
|
||||
self.buffer.remove(&oldest_seq);
|
||||
self.stats.overruns += 1;
|
||||
if seq_before(self.next_playout_seq, oldest_seq.wrapping_add(1)) {
|
||||
self.next_playout_seq = oldest_seq.wrapping_add(1);
|
||||
self.stats.packets_lost += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
self.stats.current_depth = self.buffer.len();
|
||||
if self.stats.current_depth > self.stats.max_depth_seen {
|
||||
self.stats.max_depth_seen = self.stats.current_depth;
|
||||
}
|
||||
}
|
||||
|
||||
/// Get a reference to the adaptive playout delay estimator, if enabled.
|
||||
pub fn adaptive_delay(&self) -> Option<&AdaptivePlayoutDelay> {
|
||||
self.adaptive.as_ref()
|
||||
}
|
||||
|
||||
/// Adjust target depth based on observed jitter.
|
||||
pub fn set_target_depth(&mut self, depth: usize) {
|
||||
self.target_depth = depth.min(self.max_depth);
|
||||
@@ -304,4 +532,192 @@ mod tests {
|
||||
other => panic!("expected packet 0, got {:?}", other),
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// AdaptivePlayoutDelay tests
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
#[test]
|
||||
fn adaptive_delay_stable() {
|
||||
// Feed packets with consistent 20ms spacing — target should stay at minimum.
|
||||
let mut apd = AdaptivePlayoutDelay::new(3, 50);
|
||||
|
||||
for i in 0u64..200 {
|
||||
let arrival_ms = i * 20;
|
||||
let expected_ms = i * 20;
|
||||
apd.update(arrival_ms, expected_ms);
|
||||
}
|
||||
|
||||
// With zero jitter, target should be min_delay (ceil(0/20) + 2 = 2,
|
||||
// clamped to min_delay=3).
|
||||
assert_eq!(apd.target_delay(), 3);
|
||||
assert!(
|
||||
apd.jitter_estimate_ms() < 1.0,
|
||||
"jitter estimate should be near zero, got {}",
|
||||
apd.jitter_estimate_ms()
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn adaptive_delay_increases_on_jitter() {
|
||||
// Feed packets with variable spacing (±10ms jitter).
|
||||
let mut apd = AdaptivePlayoutDelay::new(3, 50);
|
||||
|
||||
// Alternate: arrive 10ms early / 10ms late
|
||||
for i in 0u64..200 {
|
||||
let expected_ms = i * 20;
|
||||
let jitter_offset: i64 = if i % 2 == 0 { 10 } else { -10 };
|
||||
let arrival_ms = (expected_ms as i64 + jitter_offset).max(0) as u64;
|
||||
apd.update(arrival_ms, expected_ms);
|
||||
}
|
||||
|
||||
// Inter-arrival jitter should be ~20ms (swing of 10 to -10 = delta 20).
|
||||
// target = ceil(~20/20) + 2 = 3, but EMA converges near 20 so target >= 3.
|
||||
assert!(
|
||||
apd.target_delay() >= 3,
|
||||
"target should increase with jitter, got {}",
|
||||
apd.target_delay()
|
||||
);
|
||||
assert!(
|
||||
apd.jitter_estimate_ms() > 5.0,
|
||||
"jitter estimate should be significant, got {}",
|
||||
apd.jitter_estimate_ms()
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn adaptive_delay_decreases_on_recovery() {
|
||||
let mut apd = AdaptivePlayoutDelay::new(3, 50);
|
||||
|
||||
// Phase 1: high jitter (±30ms)
|
||||
for i in 0u64..200 {
|
||||
let expected_ms = i * 20;
|
||||
let offset: i64 = if i % 2 == 0 { 30 } else { -30 };
|
||||
let arrival_ms = (expected_ms as i64 + offset).max(0) as u64;
|
||||
apd.update(arrival_ms, expected_ms);
|
||||
}
|
||||
let high_target = apd.target_delay();
|
||||
let high_jitter = apd.jitter_estimate_ms();
|
||||
|
||||
// Phase 2: stable (no jitter) — target should decrease via EMA decay
|
||||
for i in 200u64..600 {
|
||||
let t = i * 20;
|
||||
apd.update(t, t);
|
||||
}
|
||||
let low_target = apd.target_delay();
|
||||
let low_jitter = apd.jitter_estimate_ms();
|
||||
|
||||
assert!(
|
||||
low_target <= high_target,
|
||||
"target should decrease after recovery: {} -> {}",
|
||||
high_target,
|
||||
low_target
|
||||
);
|
||||
assert!(
|
||||
low_jitter < high_jitter,
|
||||
"jitter estimate should decrease: {} -> {}",
|
||||
high_jitter,
|
||||
low_jitter
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn adaptive_delay_clamped() {
|
||||
let mut apd = AdaptivePlayoutDelay::new(3, 10);
|
||||
|
||||
// Extreme jitter: packets arrive with huge variance
|
||||
for i in 0u64..500 {
|
||||
let expected_ms = i * 20;
|
||||
let offset: i64 = if i % 2 == 0 { 500 } else { -500 };
|
||||
let arrival_ms = (expected_ms as i64 + offset).max(0) as u64;
|
||||
apd.update(arrival_ms, expected_ms);
|
||||
}
|
||||
|
||||
assert!(
|
||||
apd.target_delay() <= 10,
|
||||
"target should not exceed max_delay=10, got {}",
|
||||
apd.target_delay()
|
||||
);
|
||||
assert!(
|
||||
apd.target_delay() >= 3,
|
||||
"target should not go below min_delay=3, got {}",
|
||||
apd.target_delay()
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn adaptive_jitter_estimate() {
|
||||
let mut apd = AdaptivePlayoutDelay::new(3, 50);
|
||||
|
||||
// Initial jitter estimate should be zero
|
||||
assert_eq!(apd.jitter_estimate_ms(), 0.0);
|
||||
|
||||
// After one packet, still zero (no delta yet)
|
||||
apd.update(0, 0);
|
||||
assert_eq!(apd.jitter_estimate_ms(), 0.0);
|
||||
|
||||
// Second packet with 5ms jitter
|
||||
apd.update(25, 20); // arrived 5ms late
|
||||
assert!(
|
||||
apd.jitter_estimate_ms() > 0.0,
|
||||
"jitter estimate should be positive after jittery packet"
|
||||
);
|
||||
assert!(
|
||||
apd.jitter_estimate_ms() <= 5.0,
|
||||
"first jitter sample of 5ms with alpha=0.05 should not exceed 5ms, got {}",
|
||||
apd.jitter_estimate_ms()
|
||||
);
|
||||
|
||||
// Feed many packets with ~15ms jitter — EMA should converge
|
||||
for i in 2u64..500 {
|
||||
let expected_ms = i * 20;
|
||||
let arrival_ms = expected_ms + 15; // consistently 15ms late
|
||||
apd.update(arrival_ms, expected_ms);
|
||||
}
|
||||
// Steady-state: inter-arrival jitter = |35 - 20| = 0 actually,
|
||||
// because if every packet is 15ms late, delta_actual = 35-35 = 20,
|
||||
// same as expected. So jitter should converge toward 0.
|
||||
// Let's use variable jitter instead for a better test.
|
||||
let mut apd2 = AdaptivePlayoutDelay::new(3, 50);
|
||||
for i in 0u64..500 {
|
||||
let expected_ms = i * 20;
|
||||
// Alternate 0ms and 15ms late
|
||||
let extra = if i % 2 == 0 { 0 } else { 15 };
|
||||
let arrival_ms = expected_ms + extra;
|
||||
apd2.update(arrival_ms, expected_ms);
|
||||
}
|
||||
let est = apd2.jitter_estimate_ms();
|
||||
assert!(
|
||||
est > 5.0 && est < 20.0,
|
||||
"jitter estimate should converge near 15ms with alternating 0/15ms offsets, got {}",
|
||||
est
|
||||
);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// JitterBuffer with adaptive mode tests
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
#[test]
|
||||
fn jitter_buffer_adaptive_constructor() {
|
||||
let jb = JitterBuffer::new_adaptive(5, 250);
|
||||
assert!(jb.adaptive_delay().is_some());
|
||||
assert_eq!(jb.adaptive_delay().unwrap().target_delay(), 5);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn jitter_buffer_adaptive_push_with_arrival() {
|
||||
let mut jb = JitterBuffer::new_adaptive(3, 50);
|
||||
|
||||
// Push packets with consistent timing
|
||||
for i in 0u16..20 {
|
||||
let pkt = make_packet(i);
|
||||
let arrival_ms = i as u64 * 20;
|
||||
jb.push_with_arrival(pkt, arrival_ms);
|
||||
}
|
||||
|
||||
// With zero jitter, target should stay at min
|
||||
let ad = jb.adaptive_delay().unwrap();
|
||||
assert_eq!(ad.target_delay(), 3);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,6 +12,7 @@
|
||||
//! - Identity = 32-byte seed → HKDF → Ed25519 (signing) + X25519 (encryption)
|
||||
//! - Fingerprint = SHA-256(Ed25519 pub)[:16]
|
||||
|
||||
pub mod bandwidth;
|
||||
pub mod codec_id;
|
||||
pub mod error;
|
||||
pub mod jitter;
|
||||
@@ -23,7 +24,11 @@ pub mod traits;
|
||||
// Re-export key types at crate root for convenience.
|
||||
pub use codec_id::{CodecId, QualityProfile};
|
||||
pub use error::*;
|
||||
pub use packet::{HangupReason, MediaHeader, MediaPacket, QualityReport, SignalMessage};
|
||||
pub use packet::{
|
||||
HangupReason, MediaHeader, MediaPacket, MiniFrameContext, MiniHeader, QualityReport,
|
||||
SignalMessage, TrunkEntry, TrunkFrame, FRAME_TYPE_FULL, FRAME_TYPE_MINI,
|
||||
};
|
||||
pub use bandwidth::{BandwidthEstimator, CongestionState};
|
||||
pub use quality::{AdaptiveQualityController, Tier};
|
||||
pub use session::{Session, SessionEvent, SessionState};
|
||||
pub use traits::*;
|
||||
|
||||
@@ -46,6 +46,23 @@ impl MediaHeader {
|
||||
/// Header size in bytes on the wire.
|
||||
pub const WIRE_SIZE: usize = 12;
|
||||
|
||||
/// Create a default header for raw PCM relay (used by WebSocket bridge).
|
||||
pub fn default_pcm() -> Self {
|
||||
Self {
|
||||
version: 0,
|
||||
is_repair: false,
|
||||
codec_id: CodecId::Opus24k,
|
||||
has_quality_report: false,
|
||||
fec_ratio_encoded: 0,
|
||||
seq: 0,
|
||||
timestamp: 0,
|
||||
fec_block: 0,
|
||||
fec_symbol: 0,
|
||||
reserved: 0,
|
||||
csrc_count: 0,
|
||||
}
|
||||
}
|
||||
|
||||
/// Encode the FEC ratio float (0.0-2.0+) to a 7-bit value (0-127).
|
||||
pub fn encode_fec_ratio(ratio: f32) -> u8 {
|
||||
// Map 0.0-2.0 to 0-127, clamping at 127
|
||||
@@ -191,6 +208,9 @@ pub struct MediaPacket {
|
||||
pub quality_report: Option<QualityReport>,
|
||||
}
|
||||
|
||||
/// Maximum number of mini-frames between full headers (1 second at 50 fps).
|
||||
pub const MINI_FRAME_FULL_INTERVAL: u32 = 50;
|
||||
|
||||
impl MediaPacket {
|
||||
/// Serialize the entire packet to bytes.
|
||||
pub fn to_bytes(&self) -> Bytes {
|
||||
@@ -239,6 +259,276 @@ impl MediaPacket {
|
||||
quality_report,
|
||||
})
|
||||
}
|
||||
|
||||
/// Serialize with mini-frame compression.
|
||||
///
|
||||
/// Uses the `MiniFrameContext` to decide whether to emit a compact 4-byte
|
||||
/// mini-header or a full 12-byte header. A full header is forced on the
|
||||
/// first frame and every `MINI_FRAME_FULL_INTERVAL` frames thereafter.
|
||||
pub fn encode_compact(
|
||||
&self,
|
||||
ctx: &mut MiniFrameContext,
|
||||
frames_since_full: &mut u32,
|
||||
) -> Bytes {
|
||||
if *frames_since_full > 0 && *frames_since_full < MINI_FRAME_FULL_INTERVAL {
|
||||
// --- mini frame ---
|
||||
let ts_delta = self
|
||||
.header
|
||||
.timestamp
|
||||
.wrapping_sub(ctx.last_header.unwrap().timestamp)
|
||||
as u16;
|
||||
let mini = MiniHeader {
|
||||
timestamp_delta_ms: ts_delta,
|
||||
payload_len: self.payload.len() as u16,
|
||||
};
|
||||
let total = 1 + MiniHeader::WIRE_SIZE + self.payload.len();
|
||||
let mut buf = BytesMut::with_capacity(total);
|
||||
buf.put_u8(FRAME_TYPE_MINI);
|
||||
mini.write_to(&mut buf);
|
||||
buf.put(self.payload.clone());
|
||||
// Advance the context so the next mini-frame delta is relative
|
||||
// to this frame, mirroring what expand() does on the decoder side.
|
||||
ctx.update(&self.header);
|
||||
*frames_since_full += 1;
|
||||
buf.freeze()
|
||||
} else {
|
||||
// --- full frame ---
|
||||
let qr_size = if self.quality_report.is_some() {
|
||||
QualityReport::WIRE_SIZE
|
||||
} else {
|
||||
0
|
||||
};
|
||||
let total = 1 + MediaHeader::WIRE_SIZE + self.payload.len() + qr_size;
|
||||
let mut buf = BytesMut::with_capacity(total);
|
||||
buf.put_u8(FRAME_TYPE_FULL);
|
||||
self.header.write_to(&mut buf);
|
||||
buf.put(self.payload.clone());
|
||||
if let Some(ref qr) = self.quality_report {
|
||||
qr.write_to(&mut buf);
|
||||
}
|
||||
ctx.update(&self.header);
|
||||
*frames_since_full = 1; // next frame will be the 1st after full
|
||||
buf.freeze()
|
||||
}
|
||||
}
|
||||
|
||||
/// Decode from compact wire format (auto-detects full vs mini).
|
||||
///
|
||||
/// Returns `None` on malformed input or if a mini-frame arrives before any
|
||||
/// full header baseline has been established.
|
||||
pub fn decode_compact(buf: &[u8], ctx: &mut MiniFrameContext) -> Option<Self> {
|
||||
if buf.is_empty() {
|
||||
return None;
|
||||
}
|
||||
let frame_type = buf[0];
|
||||
let rest = &buf[1..];
|
||||
|
||||
match frame_type {
|
||||
FRAME_TYPE_FULL => {
|
||||
let pkt = Self::from_bytes(Bytes::copy_from_slice(rest))?;
|
||||
ctx.update(&pkt.header);
|
||||
Some(pkt)
|
||||
}
|
||||
FRAME_TYPE_MINI => {
|
||||
if rest.len() < MiniHeader::WIRE_SIZE {
|
||||
return None;
|
||||
}
|
||||
let mut cursor = rest;
|
||||
let mini = MiniHeader::read_from(&mut cursor)?;
|
||||
let payload_start = 1 + MiniHeader::WIRE_SIZE;
|
||||
let payload_end = payload_start + mini.payload_len as usize;
|
||||
if buf.len() < payload_end {
|
||||
return None;
|
||||
}
|
||||
let payload = Bytes::copy_from_slice(&buf[payload_start..payload_end]);
|
||||
let header = ctx.expand(&mini)?;
|
||||
Some(Self {
|
||||
header,
|
||||
payload,
|
||||
quality_report: None,
|
||||
})
|
||||
}
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Trunking — multiplex multiple session packets into one QUIC datagram
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// A single entry inside a [`TrunkFrame`].
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct TrunkEntry {
|
||||
/// 2-byte session identifier (up to 65 536 sessions).
|
||||
pub session_id: [u8; 2],
|
||||
/// Encoded MediaPacket payload (already compressed).
|
||||
pub payload: Bytes,
|
||||
}
|
||||
|
||||
impl TrunkEntry {
|
||||
/// Per-entry wire overhead: 2 (session_id) + 2 (len).
|
||||
pub const OVERHEAD: usize = 4;
|
||||
}
|
||||
|
||||
/// A trunked frame carrying multiple session packets in one datagram.
|
||||
///
|
||||
/// Wire format:
|
||||
/// ```text
|
||||
/// [count:u16] [entry1] [entry2] ...
|
||||
/// ```
|
||||
/// Each entry:
|
||||
/// ```text
|
||||
/// [session_id:2] [len:u16] [payload:len]
|
||||
/// ```
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct TrunkFrame {
|
||||
pub packets: Vec<TrunkEntry>,
|
||||
}
|
||||
|
||||
impl TrunkFrame {
|
||||
/// Create an empty trunk frame.
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
packets: Vec::new(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Append a session packet to the frame.
|
||||
pub fn push(&mut self, session_id: [u8; 2], payload: Bytes) {
|
||||
self.packets.push(TrunkEntry {
|
||||
session_id,
|
||||
payload,
|
||||
});
|
||||
}
|
||||
|
||||
/// Number of entries in the frame.
|
||||
pub fn len(&self) -> usize {
|
||||
self.packets.len()
|
||||
}
|
||||
|
||||
/// Whether the frame is empty.
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.packets.is_empty()
|
||||
}
|
||||
|
||||
/// Total wire size of the encoded frame.
|
||||
pub fn wire_size(&self) -> usize {
|
||||
// 2 bytes for count + each entry
|
||||
2 + self
|
||||
.packets
|
||||
.iter()
|
||||
.map(|e| TrunkEntry::OVERHEAD + e.payload.len())
|
||||
.sum::<usize>()
|
||||
}
|
||||
|
||||
/// Encode to wire bytes.
|
||||
pub fn encode(&self) -> Bytes {
|
||||
let mut buf = BytesMut::with_capacity(self.wire_size());
|
||||
buf.put_u16(self.packets.len() as u16);
|
||||
for entry in &self.packets {
|
||||
buf.put_slice(&entry.session_id);
|
||||
buf.put_u16(entry.payload.len() as u16);
|
||||
buf.put(entry.payload.clone());
|
||||
}
|
||||
buf.freeze()
|
||||
}
|
||||
|
||||
/// Decode from wire bytes. Returns `None` on malformed input.
|
||||
pub fn decode(buf: &[u8]) -> Option<Self> {
|
||||
if buf.len() < 2 {
|
||||
return None;
|
||||
}
|
||||
let mut cursor = &buf[..];
|
||||
let count = cursor.get_u16() as usize;
|
||||
let mut packets = Vec::with_capacity(count);
|
||||
for _ in 0..count {
|
||||
if cursor.remaining() < TrunkEntry::OVERHEAD {
|
||||
return None;
|
||||
}
|
||||
let mut session_id = [0u8; 2];
|
||||
session_id[0] = cursor.get_u8();
|
||||
session_id[1] = cursor.get_u8();
|
||||
let len = cursor.get_u16() as usize;
|
||||
if cursor.remaining() < len {
|
||||
return None;
|
||||
}
|
||||
let payload = Bytes::copy_from_slice(&cursor[..len]);
|
||||
cursor.advance(len);
|
||||
packets.push(TrunkEntry {
|
||||
session_id,
|
||||
payload,
|
||||
});
|
||||
}
|
||||
Some(Self { packets })
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Mini-frames — compact header for steady-state media packets
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Frame type tag: full MediaHeader follows.
|
||||
pub const FRAME_TYPE_FULL: u8 = 0x00;
|
||||
/// Frame type tag: MiniHeader follows (requires prior baseline).
|
||||
pub const FRAME_TYPE_MINI: u8 = 0x01;
|
||||
|
||||
/// Compact 4-byte header used after a full MediaHeader baseline has been
|
||||
/// established. Only the timestamp delta and payload length are transmitted;
|
||||
/// all other fields are inherited from the last full header.
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
|
||||
pub struct MiniHeader {
|
||||
/// Milliseconds elapsed since the last header's timestamp.
|
||||
pub timestamp_delta_ms: u16,
|
||||
/// Length of the payload that follows this header.
|
||||
pub payload_len: u16,
|
||||
}
|
||||
|
||||
impl MiniHeader {
|
||||
/// Header size in bytes on the wire.
|
||||
pub const WIRE_SIZE: usize = 4;
|
||||
|
||||
/// Serialize to a 4-byte buffer.
|
||||
pub fn write_to(&self, buf: &mut impl BufMut) {
|
||||
buf.put_u16(self.timestamp_delta_ms);
|
||||
buf.put_u16(self.payload_len);
|
||||
}
|
||||
|
||||
/// Deserialize from a buffer. Returns `None` if insufficient data.
|
||||
pub fn read_from(buf: &mut impl Buf) -> Option<Self> {
|
||||
if buf.remaining() < Self::WIRE_SIZE {
|
||||
return None;
|
||||
}
|
||||
Some(Self {
|
||||
timestamp_delta_ms: buf.get_u16(),
|
||||
payload_len: buf.get_u16(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// Stateful context that expands [`MiniHeader`]s back into full
|
||||
/// [`MediaHeader`]s by tracking the last baseline header.
|
||||
#[derive(Clone, Debug, Default)]
|
||||
pub struct MiniFrameContext {
|
||||
last_header: Option<MediaHeader>,
|
||||
}
|
||||
|
||||
impl MiniFrameContext {
|
||||
/// Record a full header as the new baseline for subsequent mini-frames.
|
||||
pub fn update(&mut self, header: &MediaHeader) {
|
||||
self.last_header = Some(*header);
|
||||
}
|
||||
|
||||
/// Expand a mini-header into a full [`MediaHeader`] using the stored
|
||||
/// baseline. Returns `None` if no baseline has been set yet.
|
||||
pub fn expand(&mut self, mini: &MiniHeader) -> Option<MediaHeader> {
|
||||
let base = self.last_header.as_ref()?;
|
||||
let mut expanded = *base;
|
||||
expanded.seq = base.seq.wrapping_add(1);
|
||||
expanded.timestamp = base.timestamp.wrapping_add(mini.timestamp_delta_ms as u32);
|
||||
self.last_header = Some(expanded);
|
||||
Some(expanded)
|
||||
}
|
||||
}
|
||||
|
||||
/// Signaling messages sent over the reliable QUIC stream.
|
||||
@@ -297,6 +587,64 @@ pub enum SignalMessage {
|
||||
|
||||
/// End the call.
|
||||
Hangup { reason: HangupReason },
|
||||
|
||||
/// featherChat bearer token for relay authentication.
|
||||
/// Sent as the first signal message when --auth-url is configured.
|
||||
AuthToken { token: String },
|
||||
|
||||
/// Put the call on hold (stop sending media, keep session alive).
|
||||
Hold,
|
||||
/// Resume a held call.
|
||||
Unhold,
|
||||
/// Mute request from the remote side (server-initiated mute, like IAX2 QUELCH).
|
||||
Mute,
|
||||
/// Unmute request from the remote side (like IAX2 UNQUELCH).
|
||||
Unmute,
|
||||
/// Transfer the call to another peer.
|
||||
Transfer {
|
||||
target_fingerprint: String,
|
||||
/// Optional relay address for the transfer target.
|
||||
relay_addr: Option<String>,
|
||||
},
|
||||
/// Acknowledge a transfer request.
|
||||
TransferAck,
|
||||
|
||||
/// Presence update from a peer relay (gossip protocol).
|
||||
/// Sent periodically over probe connections to share which fingerprints
|
||||
/// are connected to the sending relay.
|
||||
PresenceUpdate {
|
||||
/// Fingerprints currently connected to the sending relay.
|
||||
fingerprints: Vec<String>,
|
||||
/// Address of the sending relay (e.g., "192.168.1.10:4433").
|
||||
relay_addr: String,
|
||||
},
|
||||
|
||||
/// Ask a peer relay to look up a fingerprint in its registry.
|
||||
RouteQuery {
|
||||
fingerprint: String,
|
||||
ttl: u8,
|
||||
},
|
||||
/// Response to a route query.
|
||||
RouteResponse {
|
||||
fingerprint: String,
|
||||
found: bool,
|
||||
relay_chain: Vec<String>,
|
||||
},
|
||||
|
||||
/// Request to set up a forwarding session for a specific fingerprint.
|
||||
/// Sent over a relay link (`_relay` SNI) to ask the peer relay to
|
||||
/// create a room and forward media for the given session.
|
||||
SessionForward {
|
||||
session_id: String,
|
||||
target_fingerprint: String,
|
||||
source_relay: String,
|
||||
},
|
||||
/// Confirm that the forwarding session has been set up on the peer relay.
|
||||
/// The `room_name` tells the source relay which room to address media to.
|
||||
SessionForwardAck {
|
||||
session_id: String,
|
||||
room_name: String,
|
||||
},
|
||||
}
|
||||
|
||||
/// Reasons for ending a call.
|
||||
@@ -410,6 +758,112 @@ mod tests {
|
||||
assert_eq!(packet.quality_report, decoded.quality_report);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn hold_unhold_serialize() {
|
||||
let hold = SignalMessage::Hold;
|
||||
let json = serde_json::to_string(&hold).unwrap();
|
||||
let decoded: SignalMessage = serde_json::from_str(&json).unwrap();
|
||||
assert!(matches!(decoded, SignalMessage::Hold));
|
||||
|
||||
let unhold = SignalMessage::Unhold;
|
||||
let json = serde_json::to_string(&unhold).unwrap();
|
||||
let decoded: SignalMessage = serde_json::from_str(&json).unwrap();
|
||||
assert!(matches!(decoded, SignalMessage::Unhold));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn mute_unmute_serialize() {
|
||||
let mute = SignalMessage::Mute;
|
||||
let json = serde_json::to_string(&mute).unwrap();
|
||||
let decoded: SignalMessage = serde_json::from_str(&json).unwrap();
|
||||
assert!(matches!(decoded, SignalMessage::Mute));
|
||||
|
||||
let unmute = SignalMessage::Unmute;
|
||||
let json = serde_json::to_string(&unmute).unwrap();
|
||||
let decoded: SignalMessage = serde_json::from_str(&json).unwrap();
|
||||
assert!(matches!(decoded, SignalMessage::Unmute));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn transfer_serialize() {
|
||||
let transfer = SignalMessage::Transfer {
|
||||
target_fingerprint: "abc123".to_string(),
|
||||
relay_addr: Some("relay.example.com:4433".to_string()),
|
||||
};
|
||||
let json = serde_json::to_string(&transfer).unwrap();
|
||||
let decoded: SignalMessage = serde_json::from_str(&json).unwrap();
|
||||
match decoded {
|
||||
SignalMessage::Transfer {
|
||||
target_fingerprint,
|
||||
relay_addr,
|
||||
} => {
|
||||
assert_eq!(target_fingerprint, "abc123");
|
||||
assert_eq!(relay_addr.unwrap(), "relay.example.com:4433");
|
||||
}
|
||||
_ => panic!("expected Transfer variant"),
|
||||
}
|
||||
|
||||
// Also test with relay_addr = None
|
||||
let transfer_no_relay = SignalMessage::Transfer {
|
||||
target_fingerprint: "def456".to_string(),
|
||||
relay_addr: None,
|
||||
};
|
||||
let json = serde_json::to_string(&transfer_no_relay).unwrap();
|
||||
let decoded: SignalMessage = serde_json::from_str(&json).unwrap();
|
||||
match decoded {
|
||||
SignalMessage::Transfer {
|
||||
target_fingerprint,
|
||||
relay_addr,
|
||||
} => {
|
||||
assert_eq!(target_fingerprint, "def456");
|
||||
assert!(relay_addr.is_none());
|
||||
}
|
||||
_ => panic!("expected Transfer variant"),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn transfer_ack_serialize() {
|
||||
let ack = SignalMessage::TransferAck;
|
||||
let json = serde_json::to_string(&ack).unwrap();
|
||||
let decoded: SignalMessage = serde_json::from_str(&json).unwrap();
|
||||
assert!(matches!(decoded, SignalMessage::TransferAck));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn presence_update_signal_roundtrip() {
|
||||
let msg = SignalMessage::PresenceUpdate {
|
||||
fingerprints: vec!["aabb".to_string(), "ccdd".to_string()],
|
||||
relay_addr: "10.0.0.1:4433".to_string(),
|
||||
};
|
||||
let json = serde_json::to_string(&msg).unwrap();
|
||||
let decoded: SignalMessage = serde_json::from_str(&json).unwrap();
|
||||
match decoded {
|
||||
SignalMessage::PresenceUpdate { fingerprints, relay_addr } => {
|
||||
assert_eq!(fingerprints.len(), 2);
|
||||
assert!(fingerprints.contains(&"aabb".to_string()));
|
||||
assert!(fingerprints.contains(&"ccdd".to_string()));
|
||||
assert_eq!(relay_addr, "10.0.0.1:4433");
|
||||
}
|
||||
_ => panic!("expected PresenceUpdate variant"),
|
||||
}
|
||||
|
||||
// Empty fingerprints list
|
||||
let msg_empty = SignalMessage::PresenceUpdate {
|
||||
fingerprints: vec![],
|
||||
relay_addr: "10.0.0.2:4433".to_string(),
|
||||
};
|
||||
let json = serde_json::to_string(&msg_empty).unwrap();
|
||||
let decoded: SignalMessage = serde_json::from_str(&json).unwrap();
|
||||
match decoded {
|
||||
SignalMessage::PresenceUpdate { fingerprints, relay_addr } => {
|
||||
assert!(fingerprints.is_empty());
|
||||
assert_eq!(relay_addr, "10.0.0.2:4433");
|
||||
}
|
||||
_ => panic!("expected PresenceUpdate variant"),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn fec_ratio_encode_decode() {
|
||||
let ratio = 0.5;
|
||||
@@ -421,4 +875,247 @@ mod tests {
|
||||
let encoded_max = MediaHeader::encode_fec_ratio(ratio_max);
|
||||
assert_eq!(encoded_max, 127);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// TrunkFrame tests
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
#[test]
|
||||
fn trunk_frame_encode_decode() {
|
||||
let mut frame = TrunkFrame::new();
|
||||
frame.push([0, 1], Bytes::from_static(b"hello"));
|
||||
frame.push([0, 2], Bytes::from_static(b"world!"));
|
||||
frame.push([1, 0], Bytes::from_static(b"x"));
|
||||
assert_eq!(frame.len(), 3);
|
||||
|
||||
let encoded = frame.encode();
|
||||
let decoded = TrunkFrame::decode(&encoded).expect("decode failed");
|
||||
assert_eq!(decoded.len(), 3);
|
||||
assert_eq!(decoded.packets[0].session_id, [0, 1]);
|
||||
assert_eq!(decoded.packets[0].payload, Bytes::from_static(b"hello"));
|
||||
assert_eq!(decoded.packets[1].session_id, [0, 2]);
|
||||
assert_eq!(decoded.packets[1].payload, Bytes::from_static(b"world!"));
|
||||
assert_eq!(decoded.packets[2].session_id, [1, 0]);
|
||||
assert_eq!(decoded.packets[2].payload, Bytes::from_static(b"x"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn trunk_frame_empty() {
|
||||
let frame = TrunkFrame::new();
|
||||
assert!(frame.is_empty());
|
||||
assert_eq!(frame.len(), 0);
|
||||
|
||||
let encoded = frame.encode();
|
||||
// Just the 2-byte count header with value 0.
|
||||
assert_eq!(encoded.len(), 2);
|
||||
assert_eq!(&encoded[..], &[0, 0]);
|
||||
|
||||
let decoded = TrunkFrame::decode(&encoded).unwrap();
|
||||
assert!(decoded.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn trunk_entry_wire_size() {
|
||||
// Each entry overhead must be exactly 4 bytes (2 session_id + 2 len).
|
||||
assert_eq!(TrunkEntry::OVERHEAD, 4);
|
||||
|
||||
// Verify empirically: one entry with a 10-byte payload should produce
|
||||
// 2 (count) + 4 (overhead) + 10 (payload) = 16 bytes total.
|
||||
let mut frame = TrunkFrame::new();
|
||||
frame.push([0xAB, 0xCD], Bytes::from(vec![0u8; 10]));
|
||||
let encoded = frame.encode();
|
||||
assert_eq!(encoded.len(), 2 + 4 + 10);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// MiniHeader / MiniFrameContext tests
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
#[test]
|
||||
fn mini_header_encode_decode() {
|
||||
let mini = MiniHeader {
|
||||
timestamp_delta_ms: 20,
|
||||
payload_len: 160,
|
||||
};
|
||||
let mut buf = BytesMut::new();
|
||||
mini.write_to(&mut buf);
|
||||
|
||||
let mut cursor = &buf[..];
|
||||
let decoded = MiniHeader::read_from(&mut cursor).unwrap();
|
||||
assert_eq!(mini, decoded);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn mini_header_wire_size() {
|
||||
let mini = MiniHeader {
|
||||
timestamp_delta_ms: 0xFFFF,
|
||||
payload_len: 0xFFFF,
|
||||
};
|
||||
let mut buf = BytesMut::new();
|
||||
mini.write_to(&mut buf);
|
||||
assert_eq!(buf.len(), 4);
|
||||
assert_eq!(MiniHeader::WIRE_SIZE, 4);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn mini_frame_context_expand() {
|
||||
let baseline = MediaHeader {
|
||||
version: 0,
|
||||
is_repair: false,
|
||||
codec_id: CodecId::Opus24k,
|
||||
has_quality_report: false,
|
||||
fec_ratio_encoded: 10,
|
||||
seq: 100,
|
||||
timestamp: 1000,
|
||||
fec_block: 5,
|
||||
fec_symbol: 0,
|
||||
reserved: 0,
|
||||
csrc_count: 0,
|
||||
};
|
||||
|
||||
let mut ctx = MiniFrameContext::default();
|
||||
ctx.update(&baseline);
|
||||
|
||||
// First expansion
|
||||
let mini1 = MiniHeader {
|
||||
timestamp_delta_ms: 20,
|
||||
payload_len: 80,
|
||||
};
|
||||
let h1 = ctx.expand(&mini1).unwrap();
|
||||
assert_eq!(h1.seq, 101);
|
||||
assert_eq!(h1.timestamp, 1020);
|
||||
assert_eq!(h1.codec_id, CodecId::Opus24k);
|
||||
assert_eq!(h1.fec_block, 5);
|
||||
|
||||
// Second expansion — builds on expanded h1
|
||||
let mini2 = MiniHeader {
|
||||
timestamp_delta_ms: 20,
|
||||
payload_len: 80,
|
||||
};
|
||||
let h2 = ctx.expand(&mini2).unwrap();
|
||||
assert_eq!(h2.seq, 102);
|
||||
assert_eq!(h2.timestamp, 1040);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn mini_frame_context_no_baseline() {
|
||||
let mut ctx = MiniFrameContext::default();
|
||||
let mini = MiniHeader {
|
||||
timestamp_delta_ms: 20,
|
||||
payload_len: 80,
|
||||
};
|
||||
assert!(ctx.expand(&mini).is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn full_vs_mini_size_comparison() {
|
||||
// Full frame on wire: 1 byte type tag + 12 byte MediaHeader = 13
|
||||
let full_size = 1 + MediaHeader::WIRE_SIZE;
|
||||
assert_eq!(full_size, 13);
|
||||
|
||||
// Mini frame on wire: 1 byte type tag + 4 byte MiniHeader = 5
|
||||
let mini_size = 1 + MiniHeader::WIRE_SIZE;
|
||||
assert_eq!(mini_size, 5);
|
||||
|
||||
// Verify the constants match expectations
|
||||
assert_eq!(FRAME_TYPE_FULL, 0x00);
|
||||
assert_eq!(FRAME_TYPE_MINI, 0x01);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// encode_compact / decode_compact tests
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
fn make_media_packet(seq: u16, ts: u32, payload: &[u8]) -> MediaPacket {
|
||||
MediaPacket {
|
||||
header: MediaHeader {
|
||||
version: 0,
|
||||
is_repair: false,
|
||||
codec_id: CodecId::Opus24k,
|
||||
has_quality_report: false,
|
||||
fec_ratio_encoded: 10,
|
||||
seq,
|
||||
timestamp: ts,
|
||||
fec_block: 0,
|
||||
fec_symbol: 0,
|
||||
reserved: 0,
|
||||
csrc_count: 0,
|
||||
},
|
||||
payload: Bytes::from(payload.to_vec()),
|
||||
quality_report: None,
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn mini_frame_encode_decode_sequence() {
|
||||
let mut enc_ctx = MiniFrameContext::default();
|
||||
let mut dec_ctx = MiniFrameContext::default();
|
||||
let mut frames_since_full: u32 = 0;
|
||||
|
||||
let packets: Vec<MediaPacket> = (0..5)
|
||||
.map(|i| make_media_packet(i, i as u32 * 20, b"audio"))
|
||||
.collect();
|
||||
|
||||
for (i, pkt) in packets.iter().enumerate() {
|
||||
let wire = pkt.encode_compact(&mut enc_ctx, &mut frames_since_full);
|
||||
|
||||
if i == 0 {
|
||||
// First frame must be full
|
||||
assert_eq!(wire[0], FRAME_TYPE_FULL, "frame 0 should be FULL");
|
||||
} else {
|
||||
// Subsequent frames should be mini
|
||||
assert_eq!(wire[0], FRAME_TYPE_MINI, "frame {i} should be MINI");
|
||||
// Mini wire: 1 (tag) + 4 (mini header) + payload
|
||||
assert_eq!(wire.len(), 1 + MiniHeader::WIRE_SIZE + pkt.payload.len());
|
||||
}
|
||||
|
||||
let decoded = MediaPacket::decode_compact(&wire, &mut dec_ctx)
|
||||
.unwrap_or_else(|| panic!("decode failed at frame {i}"));
|
||||
assert_eq!(decoded.header.seq, pkt.header.seq);
|
||||
assert_eq!(decoded.header.timestamp, pkt.header.timestamp);
|
||||
assert_eq!(decoded.payload, pkt.payload);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn mini_frame_periodic_full() {
|
||||
let mut ctx = MiniFrameContext::default();
|
||||
let mut frames_since_full: u32 = 0;
|
||||
|
||||
// Encode MINI_FRAME_FULL_INTERVAL + 1 frames. Frame 0 and frame 50
|
||||
// should be FULL, everything in between should be MINI.
|
||||
for i in 0..=MINI_FRAME_FULL_INTERVAL {
|
||||
let pkt = make_media_packet(i as u16, i * 20, b"data");
|
||||
let wire = pkt.encode_compact(&mut ctx, &mut frames_since_full);
|
||||
|
||||
if i == 0 || i == MINI_FRAME_FULL_INTERVAL {
|
||||
assert_eq!(
|
||||
wire[0], FRAME_TYPE_FULL,
|
||||
"frame {i} should be FULL"
|
||||
);
|
||||
} else {
|
||||
assert_eq!(
|
||||
wire[0], FRAME_TYPE_MINI,
|
||||
"frame {i} should be MINI"
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn mini_frame_disabled() {
|
||||
// Simulate disabled mini-frames by always keeping frames_since_full at 0
|
||||
// (which is what the encoder does when the feature is off).
|
||||
let mut ctx = MiniFrameContext::default();
|
||||
|
||||
for i in 0..10u16 {
|
||||
let pkt = make_media_packet(i, i as u32 * 20, b"payload");
|
||||
// When mini-frames are disabled, the encoder always passes
|
||||
// frames_since_full = 0 equivalent by never using encode_compact.
|
||||
// We test the raw path: frames_since_full forced to 0 every time.
|
||||
let mut frames_since_full: u32 = 0;
|
||||
let wire = pkt.encode_compact(&mut ctx, &mut frames_since_full);
|
||||
assert_eq!(wire[0], FRAME_TYPE_FULL, "frame {i} should be FULL when disabled");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -20,9 +20,20 @@ bytes = { workspace = true }
|
||||
serde = { workspace = true }
|
||||
toml = "0.8"
|
||||
anyhow = "1"
|
||||
reqwest = { version = "0.12", features = ["json"] }
|
||||
serde_json = "1"
|
||||
rustls = { version = "0.23", default-features = false, features = ["ring", "std"] }
|
||||
quinn = { workspace = true }
|
||||
prometheus = "0.13"
|
||||
axum = { version = "0.7", default-features = false, features = ["tokio", "http1", "ws"] }
|
||||
tower-http = { version = "0.6", features = ["fs"] }
|
||||
futures-util = "0.3"
|
||||
|
||||
[[bin]]
|
||||
name = "wzp-relay"
|
||||
path = "src/main.rs"
|
||||
|
||||
[dev-dependencies]
|
||||
tokio = { workspace = true, features = ["rt-multi-thread", "macros"] }
|
||||
wzp-transport = { workspace = true }
|
||||
wzp-client = { workspace = true }
|
||||
|
||||
106
crates/wzp-relay/src/auth.rs
Normal file
106
crates/wzp-relay/src/auth.rs
Normal file
@@ -0,0 +1,106 @@
|
||||
//! featherChat token authentication.
|
||||
//!
|
||||
//! When `--auth-url` is configured, the relay validates bearer tokens
|
||||
//! against featherChat's `POST /v1/auth/validate` endpoint before
|
||||
//! allowing clients to join rooms.
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
use tracing::{info, warn};
|
||||
|
||||
/// Request body for featherChat token validation.
|
||||
#[derive(Serialize)]
|
||||
struct ValidateRequest {
|
||||
token: String,
|
||||
}
|
||||
|
||||
/// Response from featherChat token validation.
|
||||
#[derive(Deserialize, Debug)]
|
||||
pub struct ValidateResponse {
|
||||
pub valid: bool,
|
||||
pub fingerprint: Option<String>,
|
||||
pub alias: Option<String>,
|
||||
}
|
||||
|
||||
/// Validated client identity.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct AuthenticatedClient {
|
||||
pub fingerprint: String,
|
||||
pub alias: Option<String>,
|
||||
}
|
||||
|
||||
/// Validate a bearer token against featherChat's auth endpoint.
|
||||
///
|
||||
/// Calls `POST {auth_url}` with `{ "token": "..." }`.
|
||||
/// Returns the client identity if valid, or an error string.
|
||||
pub async fn validate_token(
|
||||
auth_url: &str,
|
||||
token: &str,
|
||||
) -> Result<AuthenticatedClient, String> {
|
||||
let client = reqwest::Client::builder()
|
||||
.timeout(std::time::Duration::from_secs(5))
|
||||
.build()
|
||||
.map_err(|e| format!("http client error: {e}"))?;
|
||||
|
||||
let resp = client
|
||||
.post(auth_url)
|
||||
.json(&ValidateRequest {
|
||||
token: token.to_string(),
|
||||
})
|
||||
.send()
|
||||
.await
|
||||
.map_err(|e| format!("auth request failed: {e}"))?;
|
||||
|
||||
if !resp.status().is_success() {
|
||||
return Err(format!("auth endpoint returned {}", resp.status()));
|
||||
}
|
||||
|
||||
let body: ValidateResponse = resp
|
||||
.json()
|
||||
.await
|
||||
.map_err(|e| format!("invalid auth response: {e}"))?;
|
||||
|
||||
if body.valid {
|
||||
let fingerprint = body
|
||||
.fingerprint
|
||||
.ok_or_else(|| "valid response missing fingerprint".to_string())?;
|
||||
info!(%fingerprint, alias = ?body.alias, "token validated");
|
||||
Ok(AuthenticatedClient {
|
||||
fingerprint,
|
||||
alias: body.alias,
|
||||
})
|
||||
} else {
|
||||
warn!("token validation failed");
|
||||
Err("invalid token".to_string())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn validate_request_serializes() {
|
||||
let req = ValidateRequest {
|
||||
token: "abc123".to_string(),
|
||||
};
|
||||
let json = serde_json::to_string(&req).unwrap();
|
||||
assert!(json.contains("abc123"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn validate_response_deserializes() {
|
||||
let json = r#"{"valid": true, "fingerprint": "abcd1234", "alias": "manwe"}"#;
|
||||
let resp: ValidateResponse = serde_json::from_str(json).unwrap();
|
||||
assert!(resp.valid);
|
||||
assert_eq!(resp.fingerprint.unwrap(), "abcd1234");
|
||||
assert_eq!(resp.alias.unwrap(), "manwe");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn invalid_response_deserializes() {
|
||||
let json = r#"{"valid": false}"#;
|
||||
let resp: ValidateResponse = serde_json::from_str(json).unwrap();
|
||||
assert!(!resp.valid);
|
||||
assert!(resp.fingerprint.is_none());
|
||||
}
|
||||
}
|
||||
@@ -19,6 +19,31 @@ pub struct RelayConfig {
|
||||
pub jitter_max_depth: usize,
|
||||
/// Logging level (trace, debug, info, warn, error).
|
||||
pub log_level: String,
|
||||
/// featherChat auth validation URL (e.g., "https://chat.example.com/v1/auth/validate").
|
||||
/// If set, clients must present a valid token before joining rooms.
|
||||
pub auth_url: Option<String>,
|
||||
/// Port for the Prometheus metrics HTTP endpoint (e.g., 9090).
|
||||
/// If None, the metrics endpoint is disabled.
|
||||
pub metrics_port: Option<u16>,
|
||||
/// Peer relay addresses to probe for health monitoring.
|
||||
/// Each target gets a persistent QUIC connection sending 1 Ping/s.
|
||||
#[serde(default)]
|
||||
pub probe_targets: Vec<SocketAddr>,
|
||||
/// Enable mesh mode: each relay probes all configured targets concurrently.
|
||||
/// Discovery is manual via multiple --probe flags; this flag signals intent.
|
||||
#[serde(default)]
|
||||
pub probe_mesh: bool,
|
||||
/// Enable trunk batching for outgoing media in room mode.
|
||||
/// When true, packets destined for the same receiver are accumulated into
|
||||
/// [`TrunkFrame`]s and flushed every 5 ms (or when the batcher is full),
|
||||
/// reducing per-packet QUIC datagram overhead.
|
||||
#[serde(default)]
|
||||
pub trunking_enabled: bool,
|
||||
/// Port for the WebSocket listener (browser clients connect here).
|
||||
/// If None, WebSocket support is disabled.
|
||||
pub ws_port: Option<u16>,
|
||||
/// Directory to serve static files from (HTML/JS/WASM for web clients).
|
||||
pub static_dir: Option<String>,
|
||||
}
|
||||
|
||||
impl Default for RelayConfig {
|
||||
@@ -30,6 +55,13 @@ impl Default for RelayConfig {
|
||||
jitter_target_depth: 50,
|
||||
jitter_max_depth: 250,
|
||||
log_level: "info".to_string(),
|
||||
auth_url: None,
|
||||
metrics_port: None,
|
||||
probe_targets: Vec::new(),
|
||||
probe_mesh: false,
|
||||
trunking_enabled: false,
|
||||
ws_port: None,
|
||||
static_dir: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7,12 +7,22 @@
|
||||
//! It operates on FEC-protected packets, managing loss recovery and adaptive
|
||||
//! quality transitions.
|
||||
|
||||
pub mod auth;
|
||||
pub mod config;
|
||||
pub mod handshake;
|
||||
pub mod metrics;
|
||||
pub mod pipeline;
|
||||
pub mod presence;
|
||||
pub mod probe;
|
||||
pub mod relay_link;
|
||||
pub mod room;
|
||||
pub mod route;
|
||||
pub mod session_mgr;
|
||||
pub mod trunk;
|
||||
pub mod ws;
|
||||
|
||||
pub use config::RelayConfig;
|
||||
pub use handshake::accept_handshake;
|
||||
pub use pipeline::{PipelineConfig, PipelineStats, RelayPipeline};
|
||||
pub use session_mgr::{RelaySession, SessionId, SessionManager};
|
||||
pub use session_mgr::{RelaySession, SessionId, SessionInfo, SessionManager, SessionState};
|
||||
pub use trunk::TrunkBatcher;
|
||||
|
||||
@@ -1,8 +1,11 @@
|
||||
//! WarzonePhone relay daemon entry point.
|
||||
//!
|
||||
//! Accepts client QUIC connections and optionally forwards media to a remote
|
||||
//! relay. Each client connection spawns two tasks for bidirectional forwarding
|
||||
//! through the relay pipeline (FEC decode -> jitter -> FEC encode).
|
||||
//! Supports two modes:
|
||||
//! - **Room mode** (default): clients join named rooms, packets forwarded to all others (SFU)
|
||||
//! - **Forward mode** (--remote): all traffic forwarded to a remote relay
|
||||
//!
|
||||
//! Room names are passed via the QUIC SNI (server_name) field.
|
||||
//! The web bridge connects with room name as SNI.
|
||||
|
||||
use std::net::SocketAddr;
|
||||
use std::sync::atomic::{AtomicU64, Ordering};
|
||||
@@ -10,16 +13,16 @@ use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use tokio::sync::Mutex;
|
||||
use tracing::{error, info, warn};
|
||||
use tracing::{error, info};
|
||||
|
||||
use wzp_proto::MediaTransport;
|
||||
use wzp_relay::config::RelayConfig;
|
||||
use wzp_relay::metrics::RelayMetrics;
|
||||
use wzp_relay::pipeline::{PipelineConfig, RelayPipeline};
|
||||
use wzp_relay::presence::PresenceRegistry;
|
||||
use wzp_relay::room::{self, RoomManager};
|
||||
use wzp_relay::session_mgr::SessionManager;
|
||||
|
||||
/// Parse CLI arguments using std::env::args().
|
||||
///
|
||||
/// Usage: wzp-relay [--listen <addr>] [--remote <addr>]
|
||||
fn parse_args() -> RelayConfig {
|
||||
let mut config = RelayConfig::default();
|
||||
let args: Vec<String> = std::env::args().collect();
|
||||
@@ -28,39 +31,86 @@ fn parse_args() -> RelayConfig {
|
||||
match args[i].as_str() {
|
||||
"--listen" => {
|
||||
i += 1;
|
||||
if i < args.len() {
|
||||
config.listen_addr = args[i]
|
||||
.parse::<SocketAddr>()
|
||||
.expect("invalid --listen address");
|
||||
} else {
|
||||
eprintln!("--listen requires an address argument");
|
||||
std::process::exit(1);
|
||||
}
|
||||
config.listen_addr = args.get(i).expect("--listen requires an address")
|
||||
.parse().expect("invalid --listen address");
|
||||
}
|
||||
"--remote" => {
|
||||
i += 1;
|
||||
if i < args.len() {
|
||||
config.remote_relay = Some(
|
||||
args[i]
|
||||
.parse::<SocketAddr>()
|
||||
.expect("invalid --remote address"),
|
||||
);
|
||||
} else {
|
||||
eprintln!("--remote requires an address argument");
|
||||
std::process::exit(1);
|
||||
}
|
||||
config.remote_relay = Some(
|
||||
args.get(i).expect("--remote requires an address")
|
||||
.parse().expect("invalid --remote address"),
|
||||
);
|
||||
}
|
||||
"--auth-url" => {
|
||||
i += 1;
|
||||
config.auth_url = Some(
|
||||
args.get(i).expect("--auth-url requires a URL").to_string(),
|
||||
);
|
||||
}
|
||||
"--metrics-port" => {
|
||||
i += 1;
|
||||
config.metrics_port = Some(
|
||||
args.get(i).expect("--metrics-port requires a port number")
|
||||
.parse().expect("invalid --metrics-port number"),
|
||||
);
|
||||
}
|
||||
"--probe" => {
|
||||
i += 1;
|
||||
let addr: SocketAddr = args.get(i)
|
||||
.expect("--probe requires an address")
|
||||
.parse()
|
||||
.expect("invalid --probe address");
|
||||
config.probe_targets.push(addr);
|
||||
}
|
||||
"--probe-mesh" => {
|
||||
config.probe_mesh = true;
|
||||
}
|
||||
"--trunking" => {
|
||||
config.trunking_enabled = true;
|
||||
}
|
||||
"--ws-port" => {
|
||||
i += 1;
|
||||
config.ws_port = Some(
|
||||
args.get(i).expect("--ws-port requires a port number")
|
||||
.parse().expect("invalid --ws-port number"),
|
||||
);
|
||||
}
|
||||
"--static-dir" => {
|
||||
i += 1;
|
||||
config.static_dir = Some(
|
||||
args.get(i).expect("--static-dir requires a directory path").to_string(),
|
||||
);
|
||||
}
|
||||
"--mesh-status" => {
|
||||
// Print mesh table from a fresh registry and exit.
|
||||
// In practice this is useful after the relay has been running;
|
||||
// here we just demonstrate the formatter with an empty registry.
|
||||
let m = RelayMetrics::new();
|
||||
print!("{}", wzp_relay::probe::mesh_summary(m.registry()));
|
||||
std::process::exit(0);
|
||||
}
|
||||
"--help" | "-h" => {
|
||||
eprintln!("Usage: wzp-relay [--listen <addr>] [--remote <addr>]");
|
||||
eprintln!("Usage: wzp-relay [--listen <addr>] [--remote <addr>] [--auth-url <url>] [--metrics-port <port>] [--probe <addr>]... [--probe-mesh] [--mesh-status]");
|
||||
eprintln!();
|
||||
eprintln!("Options:");
|
||||
eprintln!(" --listen <addr> Listen address (default: 0.0.0.0:4433)");
|
||||
eprintln!(" --remote <addr> Remote relay address for forwarding");
|
||||
eprintln!(" --listen <addr> Listen address (default: 0.0.0.0:4433)");
|
||||
eprintln!(" --remote <addr> Remote relay for forwarding (disables room mode)");
|
||||
eprintln!(" --auth-url <url> featherChat auth endpoint (e.g., https://chat.example.com/v1/auth/validate)");
|
||||
eprintln!(" When set, clients must send a bearer token as first signal message.");
|
||||
eprintln!(" --metrics-port <port> Prometheus metrics HTTP port (e.g., 9090). Disabled if not set.");
|
||||
eprintln!(" --probe <addr> Peer relay to probe for health monitoring (repeatable).");
|
||||
eprintln!(" --probe-mesh Enable mesh mode (mark config flag, probes all --probe targets).");
|
||||
eprintln!(" --mesh-status Print mesh health table and exit (diagnostic).");
|
||||
eprintln!(" --trunking Enable trunk batching for outgoing media in room mode.");
|
||||
eprintln!(" --ws-port <port> WebSocket listener port for browser clients (e.g., 8080).");
|
||||
eprintln!(" --static-dir <dir> Directory to serve static files from (HTML/JS/WASM).");
|
||||
eprintln!();
|
||||
eprintln!("Room mode (default):");
|
||||
eprintln!(" Clients join rooms by name. Packets forwarded to all others (SFU).");
|
||||
std::process::exit(0);
|
||||
}
|
||||
other => {
|
||||
eprintln!("unknown argument: {other}");
|
||||
eprintln!("Usage: wzp-relay [--listen <addr>] [--remote <addr>]");
|
||||
std::process::exit(1);
|
||||
}
|
||||
}
|
||||
@@ -69,249 +119,437 @@ fn parse_args() -> RelayConfig {
|
||||
config
|
||||
}
|
||||
|
||||
/// Shared packet counters for periodic logging.
|
||||
struct RelayStats {
|
||||
upstream_packets: AtomicU64,
|
||||
downstream_packets: AtomicU64,
|
||||
}
|
||||
|
||||
/// Run the upstream forwarding task: client -> pipeline -> remote.
|
||||
async fn run_upstream(
|
||||
client_transport: Arc<wzp_transport::QuinnTransport>,
|
||||
remote_transport: Arc<wzp_transport::QuinnTransport>,
|
||||
client: Arc<wzp_transport::QuinnTransport>,
|
||||
remote: Arc<wzp_transport::QuinnTransport>,
|
||||
pipeline: Arc<Mutex<RelayPipeline>>,
|
||||
stats: Arc<RelayStats>,
|
||||
) {
|
||||
loop {
|
||||
let packet = match client_transport.recv_media().await {
|
||||
Ok(Some(pkt)) => pkt,
|
||||
Ok(None) => {
|
||||
info!("client connection closed (upstream)");
|
||||
break;
|
||||
}
|
||||
Err(e) => {
|
||||
error!("upstream recv error: {e}");
|
||||
break;
|
||||
}
|
||||
};
|
||||
|
||||
// Process through pipeline
|
||||
let outbound = {
|
||||
let mut pipe = pipeline.lock().await;
|
||||
let decoded = pipe.ingest(packet);
|
||||
let mut out = Vec::new();
|
||||
for pkt in decoded {
|
||||
out.extend(pipe.prepare_outbound(pkt));
|
||||
}
|
||||
out
|
||||
};
|
||||
|
||||
// Forward to remote
|
||||
for pkt in &outbound {
|
||||
if let Err(e) = remote_transport.send_media(pkt).await {
|
||||
error!("upstream send error: {e}");
|
||||
return;
|
||||
match client.recv_media().await {
|
||||
Ok(Some(pkt)) => {
|
||||
let outbound = {
|
||||
let mut pipe = pipeline.lock().await;
|
||||
let decoded = pipe.ingest(pkt);
|
||||
let mut out = Vec::new();
|
||||
for p in decoded { out.extend(pipe.prepare_outbound(p)); }
|
||||
out
|
||||
};
|
||||
for p in &outbound {
|
||||
if let Err(e) = remote.send_media(p).await {
|
||||
error!("upstream send: {e}");
|
||||
return;
|
||||
}
|
||||
}
|
||||
stats.upstream_packets.fetch_add(outbound.len() as u64, Ordering::Relaxed);
|
||||
}
|
||||
Ok(None) => { info!("client disconnected (upstream)"); break; }
|
||||
Err(e) => { error!("upstream recv: {e}"); break; }
|
||||
}
|
||||
stats
|
||||
.upstream_packets
|
||||
.fetch_add(outbound.len() as u64, Ordering::Relaxed);
|
||||
}
|
||||
}
|
||||
|
||||
/// Run the downstream forwarding task: remote -> pipeline -> client.
|
||||
async fn run_downstream(
|
||||
client_transport: Arc<wzp_transport::QuinnTransport>,
|
||||
remote_transport: Arc<wzp_transport::QuinnTransport>,
|
||||
client: Arc<wzp_transport::QuinnTransport>,
|
||||
remote: Arc<wzp_transport::QuinnTransport>,
|
||||
pipeline: Arc<Mutex<RelayPipeline>>,
|
||||
stats: Arc<RelayStats>,
|
||||
) {
|
||||
loop {
|
||||
let packet = match remote_transport.recv_media().await {
|
||||
Ok(Some(pkt)) => pkt,
|
||||
Ok(None) => {
|
||||
info!("remote connection closed (downstream)");
|
||||
break;
|
||||
}
|
||||
Err(e) => {
|
||||
error!("downstream recv error: {e}");
|
||||
break;
|
||||
}
|
||||
};
|
||||
|
||||
// Process through pipeline
|
||||
let outbound = {
|
||||
let mut pipe = pipeline.lock().await;
|
||||
let decoded = pipe.ingest(packet);
|
||||
let mut out = Vec::new();
|
||||
for pkt in decoded {
|
||||
out.extend(pipe.prepare_outbound(pkt));
|
||||
}
|
||||
out
|
||||
};
|
||||
|
||||
// Forward to client
|
||||
for pkt in &outbound {
|
||||
if let Err(e) = client_transport.send_media(pkt).await {
|
||||
error!("downstream send error: {e}");
|
||||
return;
|
||||
match remote.recv_media().await {
|
||||
Ok(Some(pkt)) => {
|
||||
let outbound = {
|
||||
let mut pipe = pipeline.lock().await;
|
||||
let decoded = pipe.ingest(pkt);
|
||||
let mut out = Vec::new();
|
||||
for p in decoded { out.extend(pipe.prepare_outbound(p)); }
|
||||
out
|
||||
};
|
||||
for p in &outbound {
|
||||
if let Err(e) = client.send_media(p).await {
|
||||
error!("downstream send: {e}");
|
||||
return;
|
||||
}
|
||||
}
|
||||
stats.downstream_packets.fetch_add(outbound.len() as u64, Ordering::Relaxed);
|
||||
}
|
||||
Ok(None) => { info!("remote disconnected (downstream)"); break; }
|
||||
Err(e) => { error!("downstream recv: {e}"); break; }
|
||||
}
|
||||
stats
|
||||
.downstream_packets
|
||||
.fetch_add(outbound.len() as u64, Ordering::Relaxed);
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> anyhow::Result<()> {
|
||||
let config = parse_args();
|
||||
|
||||
tracing_subscriber::fmt().init();
|
||||
rustls::crypto::ring::default_provider()
|
||||
.install_default()
|
||||
.expect("failed to install rustls crypto provider");
|
||||
|
||||
info!(addr = %config.listen_addr, "WarzonePhone relay starting");
|
||||
if let Some(remote) = config.remote_relay {
|
||||
info!(%remote, "will connect to remote relay");
|
||||
// Presence registry
|
||||
let presence = Arc::new(Mutex::new(PresenceRegistry::new()));
|
||||
|
||||
// Route resolver
|
||||
let route_resolver = Arc::new(wzp_relay::route::RouteResolver::new(config.listen_addr));
|
||||
|
||||
// Prometheus metrics
|
||||
let metrics = Arc::new(RelayMetrics::new());
|
||||
if let Some(port) = config.metrics_port {
|
||||
let m = metrics.clone();
|
||||
let p = Some(presence.clone());
|
||||
let rr = Some(route_resolver.clone());
|
||||
tokio::spawn(wzp_relay::metrics::serve_metrics(port, m, p, rr));
|
||||
}
|
||||
|
||||
let (server_config, _cert_der) = wzp_transport::server_config();
|
||||
// Generate ephemeral relay identity for crypto handshake
|
||||
let relay_seed = wzp_crypto::Seed::generate();
|
||||
let relay_fp = relay_seed.derive_identity().public_identity().fingerprint;
|
||||
info!(addr = %config.listen_addr, fingerprint = %relay_fp, "WarzonePhone relay starting");
|
||||
|
||||
let (server_config, _cert) = wzp_transport::server_config();
|
||||
let endpoint = wzp_transport::create_endpoint(config.listen_addr, Some(server_config))?;
|
||||
|
||||
let sessions = Arc::new(Mutex::new(SessionManager::new(config.max_sessions)));
|
||||
|
||||
// If a remote relay is configured, connect to it on startup
|
||||
// Forward mode
|
||||
let remote_transport: Option<Arc<wzp_transport::QuinnTransport>> =
|
||||
if let Some(remote_addr) = config.remote_relay {
|
||||
info!(%remote_addr, "connecting to remote relay");
|
||||
info!(%remote_addr, "forward mode → remote relay");
|
||||
let client_cfg = wzp_transport::client_config();
|
||||
let remote_conn =
|
||||
wzp_transport::connect(&endpoint, remote_addr, "localhost", client_cfg).await?;
|
||||
info!(%remote_addr, "connected to remote relay");
|
||||
Some(Arc::new(wzp_transport::QuinnTransport::new(remote_conn)))
|
||||
let conn = wzp_transport::connect(&endpoint, remote_addr, "localhost", client_cfg).await?;
|
||||
Some(Arc::new(wzp_transport::QuinnTransport::new(conn)))
|
||||
} else {
|
||||
info!("room mode — clients join named rooms (SFU)");
|
||||
None
|
||||
};
|
||||
|
||||
// Room manager (room mode only)
|
||||
let room_mgr = Arc::new(Mutex::new(RoomManager::new()));
|
||||
|
||||
// Session manager — enforces max concurrent sessions
|
||||
let session_mgr = Arc::new(Mutex::new(SessionManager::new(config.max_sessions)));
|
||||
|
||||
// Spawn inter-relay health probes via ProbeMesh coordinator
|
||||
if !config.probe_targets.is_empty() {
|
||||
let mesh = wzp_relay::probe::ProbeMesh::new(
|
||||
config.probe_targets.clone(),
|
||||
metrics.registry(),
|
||||
Some(presence.clone()),
|
||||
);
|
||||
info!(
|
||||
targets = mesh.target_count(),
|
||||
mesh = config.probe_mesh,
|
||||
"spawning probe mesh"
|
||||
);
|
||||
tokio::spawn(async move { mesh.run_all().await });
|
||||
}
|
||||
|
||||
// WebSocket server for browser clients
|
||||
if let Some(ws_port) = config.ws_port {
|
||||
let ws_state = wzp_relay::ws::WsState {
|
||||
room_mgr: room_mgr.clone(),
|
||||
session_mgr: session_mgr.clone(),
|
||||
auth_url: config.auth_url.clone(),
|
||||
metrics: metrics.clone(),
|
||||
presence: presence.clone(),
|
||||
};
|
||||
let static_dir = config.static_dir.clone();
|
||||
tokio::spawn(wzp_relay::ws::run_ws_server(ws_port, ws_state, static_dir));
|
||||
info!(ws_port, "WebSocket listener enabled for browser clients");
|
||||
}
|
||||
|
||||
if let Some(ref url) = config.auth_url {
|
||||
info!(url, "auth enabled — clients must present featherChat token");
|
||||
} else {
|
||||
info!("auth disabled — any client can connect (use --auth-url to enable)");
|
||||
}
|
||||
|
||||
info!("Listening for connections...");
|
||||
|
||||
loop {
|
||||
let connection = match wzp_transport::accept(&endpoint).await {
|
||||
Ok(conn) => conn,
|
||||
Err(e) => {
|
||||
error!("accept error: {e}");
|
||||
continue;
|
||||
}
|
||||
Err(e) => { error!("accept: {e}"); continue; }
|
||||
};
|
||||
|
||||
let sessions = sessions.clone();
|
||||
let remote_transport = remote_transport.clone();
|
||||
let room_mgr = room_mgr.clone();
|
||||
let session_mgr = session_mgr.clone();
|
||||
let auth_url = config.auth_url.clone();
|
||||
let relay_seed_bytes = relay_seed.0;
|
||||
let metrics = metrics.clone();
|
||||
let trunking_enabled = config.trunking_enabled;
|
||||
let presence = presence.clone();
|
||||
let route_resolver = route_resolver.clone();
|
||||
|
||||
tokio::spawn(async move {
|
||||
let remote_addr = connection.remote_address();
|
||||
info!(%remote_addr, "new client connection");
|
||||
let addr = connection.remote_address();
|
||||
|
||||
let client_transport = Arc::new(wzp_transport::QuinnTransport::new(connection));
|
||||
let room_name = connection
|
||||
.handshake_data()
|
||||
.and_then(|hd| {
|
||||
hd.downcast::<quinn::crypto::rustls::HandshakeData>().ok()
|
||||
})
|
||||
.and_then(|hd| hd.server_name.clone())
|
||||
.unwrap_or_else(|| "default".to_string());
|
||||
|
||||
match remote_transport {
|
||||
Some(remote_tx) => {
|
||||
// Create pipelines for both directions
|
||||
let upstream_pipeline =
|
||||
Arc::new(Mutex::new(RelayPipeline::new(PipelineConfig::default())));
|
||||
let downstream_pipeline =
|
||||
Arc::new(Mutex::new(RelayPipeline::new(PipelineConfig::default())));
|
||||
let transport = Arc::new(wzp_transport::QuinnTransport::new(connection));
|
||||
|
||||
// Register session
|
||||
{
|
||||
let mut mgr = sessions.lock().await;
|
||||
let session_id = {
|
||||
let mut id = [0u8; 16];
|
||||
let addr_bytes = remote_addr.to_string();
|
||||
let bytes = addr_bytes.as_bytes();
|
||||
let len = bytes.len().min(16);
|
||||
id[..len].copy_from_slice(&bytes[..len]);
|
||||
id
|
||||
};
|
||||
mgr.create_session(session_id, PipelineConfig::default());
|
||||
}
|
||||
|
||||
let stats = Arc::new(RelayStats {
|
||||
upstream_packets: AtomicU64::new(0),
|
||||
downstream_packets: AtomicU64::new(0),
|
||||
});
|
||||
|
||||
// Spawn periodic stats logger
|
||||
let stats_log = stats.clone();
|
||||
let log_remote = remote_addr;
|
||||
let stats_handle = tokio::spawn(async move {
|
||||
let mut interval = tokio::time::interval(Duration::from_secs(5));
|
||||
loop {
|
||||
interval.tick().await;
|
||||
let up = stats_log.upstream_packets.load(Ordering::Relaxed);
|
||||
let down = stats_log.downstream_packets.load(Ordering::Relaxed);
|
||||
info!(
|
||||
client = %log_remote,
|
||||
upstream = up,
|
||||
downstream = down,
|
||||
"relay stats"
|
||||
);
|
||||
}
|
||||
});
|
||||
|
||||
// Spawn upstream and downstream tasks
|
||||
let up_handle = tokio::spawn(run_upstream(
|
||||
client_transport.clone(),
|
||||
remote_tx.clone(),
|
||||
upstream_pipeline,
|
||||
stats.clone(),
|
||||
));
|
||||
|
||||
let down_handle = tokio::spawn(run_downstream(
|
||||
client_transport.clone(),
|
||||
remote_tx,
|
||||
downstream_pipeline,
|
||||
stats,
|
||||
));
|
||||
|
||||
// Wait for either direction to finish, then clean up
|
||||
tokio::select! {
|
||||
_ = up_handle => {
|
||||
info!(%remote_addr, "upstream task ended");
|
||||
}
|
||||
_ = down_handle => {
|
||||
info!(%remote_addr, "downstream task ended");
|
||||
}
|
||||
}
|
||||
|
||||
// Abort the stats logger and close transport
|
||||
stats_handle.abort();
|
||||
if let Err(e) = client_transport.close().await {
|
||||
warn!(%remote_addr, "error closing client transport: {e}");
|
||||
}
|
||||
info!(%remote_addr, "session ended");
|
||||
}
|
||||
None => {
|
||||
// No remote relay configured — just receive and log (sink mode)
|
||||
warn!("no remote relay configured, running in sink mode");
|
||||
loop {
|
||||
match client_transport.recv_media().await {
|
||||
Ok(Some(packet)) => {
|
||||
tracing::trace!(
|
||||
seq = packet.header.seq,
|
||||
block = packet.header.fec_block,
|
||||
"received media packet (sink)"
|
||||
);
|
||||
}
|
||||
Ok(None) => {
|
||||
info!(%remote_addr, "connection closed");
|
||||
// Probe connections use SNI "_probe" to identify themselves.
|
||||
// They skip auth + handshake and just do Ping->Pong + presence gossip.
|
||||
if room_name == "_probe" {
|
||||
info!(%addr, "probe connection detected, entering Ping/Pong + presence responder");
|
||||
loop {
|
||||
match transport.recv_signal().await {
|
||||
Ok(Some(wzp_proto::SignalMessage::Ping { timestamp_ms })) => {
|
||||
if let Err(e) = transport.send_signal(
|
||||
&wzp_proto::SignalMessage::Pong { timestamp_ms },
|
||||
).await {
|
||||
error!(%addr, "probe pong send error: {e}");
|
||||
break;
|
||||
}
|
||||
}
|
||||
Ok(Some(wzp_proto::SignalMessage::PresenceUpdate { fingerprints, relay_addr })) => {
|
||||
// A peer relay is telling us which fingerprints it has
|
||||
let peer_addr: std::net::SocketAddr = relay_addr.parse().unwrap_or(addr);
|
||||
let fps: std::collections::HashSet<String> = fingerprints.into_iter().collect();
|
||||
{
|
||||
let mut reg = presence.lock().await;
|
||||
reg.update_peer(peer_addr, fps);
|
||||
}
|
||||
// Reply with our own local fingerprints
|
||||
let local_fps: Vec<String> = {
|
||||
let reg = presence.lock().await;
|
||||
reg.local_fingerprints().into_iter().collect()
|
||||
};
|
||||
let reply = wzp_proto::SignalMessage::PresenceUpdate {
|
||||
fingerprints: local_fps,
|
||||
relay_addr: addr.to_string(),
|
||||
};
|
||||
if let Err(e) = transport.send_signal(&reply).await {
|
||||
error!(%addr, "presence reply send error: {e}");
|
||||
break;
|
||||
}
|
||||
}
|
||||
Ok(Some(wzp_proto::SignalMessage::RouteQuery { fingerprint, ttl })) => {
|
||||
// Look up the fingerprint in our local registry
|
||||
let reg = presence.lock().await;
|
||||
let route = route_resolver.resolve(®, &fingerprint);
|
||||
drop(reg);
|
||||
|
||||
let (found, relay_chain) = match route {
|
||||
wzp_relay::route::Route::Local => {
|
||||
(true, vec![route_resolver.local_addr().to_string()])
|
||||
}
|
||||
wzp_relay::route::Route::DirectPeer(peer_addr) => {
|
||||
(true, vec![route_resolver.local_addr().to_string(), peer_addr.to_string()])
|
||||
}
|
||||
_ => {
|
||||
// Not found locally; if ttl > 0 we could forward
|
||||
// to other peers (future multi-hop). For now, reply not found.
|
||||
if ttl > 0 {
|
||||
// TODO: forward RouteQuery to other peers with ttl-1
|
||||
}
|
||||
(false, vec![])
|
||||
}
|
||||
};
|
||||
|
||||
let reply = wzp_proto::SignalMessage::RouteResponse {
|
||||
fingerprint,
|
||||
found,
|
||||
relay_chain,
|
||||
};
|
||||
if let Err(e) = transport.send_signal(&reply).await {
|
||||
error!(%addr, "route response send error: {e}");
|
||||
break;
|
||||
}
|
||||
}
|
||||
Ok(Some(_)) => {
|
||||
// Ignore other signals on probe connections
|
||||
}
|
||||
Ok(None) => {
|
||||
info!(%addr, "probe connection closed");
|
||||
break;
|
||||
}
|
||||
Err(e) => {
|
||||
error!(%addr, "probe recv error: {e}");
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
transport.close().await.ok();
|
||||
return;
|
||||
}
|
||||
|
||||
// Auth check: if --auth-url is set, expect first signal message to be a token
|
||||
// Auth: if --auth-url is set, expect AuthToken as first signal
|
||||
let authenticated_fp: Option<String> = if let Some(ref url) = auth_url {
|
||||
info!(%addr, "waiting for auth token...");
|
||||
match transport.recv_signal().await {
|
||||
Ok(Some(wzp_proto::SignalMessage::AuthToken { token })) => {
|
||||
match wzp_relay::auth::validate_token(url, &token).await {
|
||||
Ok(client) => {
|
||||
metrics.auth_attempts.with_label_values(&["ok"]).inc();
|
||||
info!(
|
||||
%addr,
|
||||
fingerprint = %client.fingerprint,
|
||||
alias = ?client.alias,
|
||||
"authenticated"
|
||||
);
|
||||
Some(client.fingerprint)
|
||||
}
|
||||
Err(e) => {
|
||||
error!(%remote_addr, "recv error: {e}");
|
||||
break;
|
||||
metrics.auth_attempts.with_label_values(&["fail"]).inc();
|
||||
error!(%addr, "auth failed: {e}");
|
||||
transport.close().await.ok();
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(Some(_)) => {
|
||||
error!(%addr, "expected AuthToken as first signal, got something else");
|
||||
transport.close().await.ok();
|
||||
return;
|
||||
}
|
||||
Ok(None) => {
|
||||
error!(%addr, "connection closed before auth");
|
||||
return;
|
||||
}
|
||||
Err(e) => {
|
||||
error!(%addr, "signal recv error during auth: {e}");
|
||||
transport.close().await.ok();
|
||||
return;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
// Crypto handshake: verify client identity + negotiate quality profile
|
||||
let handshake_start = std::time::Instant::now();
|
||||
let (_crypto_session, _chosen_profile) = match wzp_relay::handshake::accept_handshake(
|
||||
&*transport,
|
||||
&relay_seed_bytes,
|
||||
).await {
|
||||
Ok(result) => {
|
||||
let elapsed = handshake_start.elapsed().as_secs_f64();
|
||||
metrics.handshake_duration.observe(elapsed);
|
||||
info!(%addr, elapsed_ms = %(elapsed * 1000.0), "crypto handshake complete");
|
||||
result
|
||||
}
|
||||
Err(e) => {
|
||||
error!(%addr, "handshake failed: {e}");
|
||||
transport.close().await.ok();
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
// Register in presence registry
|
||||
if let Some(ref fp) = authenticated_fp {
|
||||
let mut reg = presence.lock().await;
|
||||
reg.register_local(fp, None, Some(room_name.clone()));
|
||||
}
|
||||
|
||||
info!(%addr, room = %room_name, "client joining");
|
||||
|
||||
if let Some(remote) = remote_transport {
|
||||
// Forward mode — same as before
|
||||
let stats = Arc::new(RelayStats {
|
||||
upstream_packets: AtomicU64::new(0),
|
||||
downstream_packets: AtomicU64::new(0),
|
||||
});
|
||||
let up_pipe = Arc::new(Mutex::new(RelayPipeline::new(PipelineConfig::default())));
|
||||
let dn_pipe = Arc::new(Mutex::new(RelayPipeline::new(PipelineConfig::default())));
|
||||
|
||||
let stats_log = stats.clone();
|
||||
let stats_handle = tokio::spawn(async move {
|
||||
let mut interval = tokio::time::interval(Duration::from_secs(5));
|
||||
loop {
|
||||
interval.tick().await;
|
||||
info!(
|
||||
up = stats_log.upstream_packets.load(Ordering::Relaxed),
|
||||
down = stats_log.downstream_packets.load(Ordering::Relaxed),
|
||||
"forward stats"
|
||||
);
|
||||
}
|
||||
});
|
||||
|
||||
let up = tokio::spawn(run_upstream(transport.clone(), remote.clone(), up_pipe, stats.clone()));
|
||||
let dn = tokio::spawn(run_downstream(transport.clone(), remote.clone(), dn_pipe, stats));
|
||||
|
||||
tokio::select! { _ = up => {} _ = dn => {} }
|
||||
stats_handle.abort();
|
||||
transport.close().await.ok();
|
||||
} else {
|
||||
// Room mode — enforce max sessions, then join room
|
||||
let session_id = {
|
||||
let mut smgr = session_mgr.lock().await;
|
||||
match smgr.create_session(&room_name, authenticated_fp.clone()) {
|
||||
Ok(id) => id,
|
||||
Err(e) => {
|
||||
error!(%addr, room = %room_name, "session rejected: {e}");
|
||||
transport.close().await.ok();
|
||||
return;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
metrics.active_sessions.inc();
|
||||
|
||||
let participant_id = {
|
||||
let mut mgr = room_mgr.lock().await;
|
||||
match mgr.join(&room_name, addr, room::ParticipantSender::Quic(transport.clone()), authenticated_fp.as_deref()) {
|
||||
Ok(id) => {
|
||||
metrics.active_rooms.set(mgr.list().len() as i64);
|
||||
id
|
||||
}
|
||||
Err(e) => {
|
||||
error!(%addr, room = %room_name, "room join denied: {e}");
|
||||
// Clean up the session we just created
|
||||
metrics.active_sessions.dec();
|
||||
let mut smgr = session_mgr.lock().await;
|
||||
smgr.remove_session(session_id);
|
||||
transport.close().await.ok();
|
||||
return;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
let session_id_str: String = session_id
|
||||
.iter()
|
||||
.map(|b| format!("{b:02x}"))
|
||||
.collect();
|
||||
room::run_participant(
|
||||
room_mgr.clone(),
|
||||
room_name,
|
||||
participant_id,
|
||||
transport.clone(),
|
||||
metrics.clone(),
|
||||
&session_id_str,
|
||||
trunking_enabled,
|
||||
).await;
|
||||
|
||||
// Participant disconnected — clean up presence + per-session metrics
|
||||
if let Some(ref fp) = authenticated_fp {
|
||||
let mut reg = presence.lock().await;
|
||||
reg.unregister_local(fp);
|
||||
}
|
||||
metrics.remove_session_metrics(&session_id_str);
|
||||
metrics.active_sessions.dec();
|
||||
{
|
||||
let mgr = room_mgr.lock().await;
|
||||
metrics.active_rooms.set(mgr.list().len() as i64);
|
||||
}
|
||||
{
|
||||
let mut smgr = session_mgr.lock().await;
|
||||
smgr.remove_session(session_id);
|
||||
}
|
||||
|
||||
transport.close().await.ok();
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
412
crates/wzp-relay/src/metrics.rs
Normal file
412
crates/wzp-relay/src/metrics.rs
Normal file
@@ -0,0 +1,412 @@
|
||||
//! Prometheus metrics for the WZP relay daemon.
|
||||
|
||||
use prometheus::{
|
||||
Encoder, GaugeVec, Histogram, HistogramOpts, IntCounter, IntCounterVec, IntGauge, IntGaugeVec,
|
||||
Opts, Registry, TextEncoder,
|
||||
};
|
||||
use wzp_proto::packet::QualityReport;
|
||||
use std::sync::Arc;
|
||||
|
||||
/// All relay-level Prometheus metrics.
|
||||
#[derive(Clone)]
|
||||
pub struct RelayMetrics {
|
||||
pub active_sessions: IntGauge,
|
||||
pub active_rooms: IntGauge,
|
||||
pub packets_forwarded: IntCounter,
|
||||
pub bytes_forwarded: IntCounter,
|
||||
pub auth_attempts: IntCounterVec,
|
||||
pub handshake_duration: Histogram,
|
||||
// Per-session metrics
|
||||
pub session_buffer_depth: IntGaugeVec,
|
||||
pub session_loss_pct: GaugeVec,
|
||||
pub session_rtt_ms: GaugeVec,
|
||||
pub session_underruns: IntCounterVec,
|
||||
pub session_overruns: IntCounterVec,
|
||||
registry: Registry,
|
||||
}
|
||||
|
||||
impl RelayMetrics {
|
||||
/// Create and register all relay metrics with a new registry.
|
||||
pub fn new() -> Self {
|
||||
let registry = Registry::new();
|
||||
|
||||
let active_sessions = IntGauge::with_opts(
|
||||
Opts::new("wzp_relay_active_sessions", "Current active sessions"),
|
||||
)
|
||||
.expect("metric");
|
||||
let active_rooms = IntGauge::with_opts(
|
||||
Opts::new("wzp_relay_active_rooms", "Current active rooms"),
|
||||
)
|
||||
.expect("metric");
|
||||
let packets_forwarded = IntCounter::with_opts(
|
||||
Opts::new("wzp_relay_packets_forwarded_total", "Total packets forwarded"),
|
||||
)
|
||||
.expect("metric");
|
||||
let bytes_forwarded = IntCounter::with_opts(
|
||||
Opts::new("wzp_relay_bytes_forwarded_total", "Total bytes forwarded"),
|
||||
)
|
||||
.expect("metric");
|
||||
let auth_attempts = IntCounterVec::new(
|
||||
Opts::new("wzp_relay_auth_attempts_total", "Auth validation attempts"),
|
||||
&["result"],
|
||||
)
|
||||
.expect("metric");
|
||||
let handshake_duration = Histogram::with_opts(
|
||||
HistogramOpts::new(
|
||||
"wzp_relay_handshake_duration_seconds",
|
||||
"Crypto handshake time",
|
||||
)
|
||||
.buckets(vec![0.001, 0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1.0, 2.5]),
|
||||
)
|
||||
.expect("metric");
|
||||
|
||||
let session_buffer_depth = IntGaugeVec::new(
|
||||
Opts::new(
|
||||
"wzp_relay_session_jitter_buffer_depth",
|
||||
"Buffer depth per session",
|
||||
),
|
||||
&["session_id"],
|
||||
)
|
||||
.expect("metric");
|
||||
let session_loss_pct = GaugeVec::new(
|
||||
Opts::new(
|
||||
"wzp_relay_session_loss_pct",
|
||||
"Packet loss percentage per session",
|
||||
),
|
||||
&["session_id"],
|
||||
)
|
||||
.expect("metric");
|
||||
let session_rtt_ms = GaugeVec::new(
|
||||
Opts::new(
|
||||
"wzp_relay_session_rtt_ms",
|
||||
"Round-trip time per session",
|
||||
),
|
||||
&["session_id"],
|
||||
)
|
||||
.expect("metric");
|
||||
let session_underruns = IntCounterVec::new(
|
||||
Opts::new(
|
||||
"wzp_relay_session_underruns_total",
|
||||
"Jitter buffer underruns per session",
|
||||
),
|
||||
&["session_id"],
|
||||
)
|
||||
.expect("metric");
|
||||
let session_overruns = IntCounterVec::new(
|
||||
Opts::new(
|
||||
"wzp_relay_session_overruns_total",
|
||||
"Jitter buffer overruns per session",
|
||||
),
|
||||
&["session_id"],
|
||||
)
|
||||
.expect("metric");
|
||||
|
||||
registry.register(Box::new(active_sessions.clone())).expect("register");
|
||||
registry.register(Box::new(active_rooms.clone())).expect("register");
|
||||
registry.register(Box::new(packets_forwarded.clone())).expect("register");
|
||||
registry.register(Box::new(bytes_forwarded.clone())).expect("register");
|
||||
registry.register(Box::new(auth_attempts.clone())).expect("register");
|
||||
registry.register(Box::new(handshake_duration.clone())).expect("register");
|
||||
registry.register(Box::new(session_buffer_depth.clone())).expect("register");
|
||||
registry.register(Box::new(session_loss_pct.clone())).expect("register");
|
||||
registry.register(Box::new(session_rtt_ms.clone())).expect("register");
|
||||
registry.register(Box::new(session_underruns.clone())).expect("register");
|
||||
registry.register(Box::new(session_overruns.clone())).expect("register");
|
||||
|
||||
Self {
|
||||
active_sessions,
|
||||
active_rooms,
|
||||
packets_forwarded,
|
||||
bytes_forwarded,
|
||||
auth_attempts,
|
||||
handshake_duration,
|
||||
session_buffer_depth,
|
||||
session_loss_pct,
|
||||
session_rtt_ms,
|
||||
session_underruns,
|
||||
session_overruns,
|
||||
registry,
|
||||
}
|
||||
}
|
||||
|
||||
/// Update per-session quality metrics from a QualityReport.
|
||||
pub fn update_session_quality(&self, session_id: &str, report: &QualityReport) {
|
||||
self.session_loss_pct
|
||||
.with_label_values(&[session_id])
|
||||
.set(report.loss_percent() as f64);
|
||||
self.session_rtt_ms
|
||||
.with_label_values(&[session_id])
|
||||
.set(report.rtt_ms() as f64);
|
||||
}
|
||||
|
||||
/// Update per-session buffer metrics.
|
||||
pub fn update_session_buffer(
|
||||
&self,
|
||||
session_id: &str,
|
||||
depth: usize,
|
||||
underruns: u64,
|
||||
overruns: u64,
|
||||
) {
|
||||
self.session_buffer_depth
|
||||
.with_label_values(&[session_id])
|
||||
.set(depth as i64);
|
||||
// IntCounterVec doesn't have a `set` — we inc by the delta.
|
||||
// Since these are cumulative from the jitter buffer, we use inc_by
|
||||
// with the current totals. To avoid double-counting, callers should
|
||||
// track previous values externally. For simplicity the relay reports
|
||||
// the absolute value each tick; counters only go up so we take the
|
||||
// max(0, new - current) approach.
|
||||
let cur_underruns = self
|
||||
.session_underruns
|
||||
.with_label_values(&[session_id])
|
||||
.get();
|
||||
if underruns > cur_underruns as u64 {
|
||||
self.session_underruns
|
||||
.with_label_values(&[session_id])
|
||||
.inc_by(underruns - cur_underruns as u64);
|
||||
}
|
||||
let cur_overruns = self
|
||||
.session_overruns
|
||||
.with_label_values(&[session_id])
|
||||
.get();
|
||||
if overruns > cur_overruns as u64 {
|
||||
self.session_overruns
|
||||
.with_label_values(&[session_id])
|
||||
.inc_by(overruns - cur_overruns as u64);
|
||||
}
|
||||
}
|
||||
|
||||
/// Remove all per-session label values for a disconnected session.
|
||||
pub fn remove_session_metrics(&self, session_id: &str) {
|
||||
let _ = self.session_buffer_depth.remove_label_values(&[session_id]);
|
||||
let _ = self.session_loss_pct.remove_label_values(&[session_id]);
|
||||
let _ = self.session_rtt_ms.remove_label_values(&[session_id]);
|
||||
let _ = self.session_underruns.remove_label_values(&[session_id]);
|
||||
let _ = self.session_overruns.remove_label_values(&[session_id]);
|
||||
}
|
||||
|
||||
/// Get a reference to the underlying Prometheus registry.
|
||||
/// Probe metrics are registered on this same registry so they appear in /metrics output.
|
||||
pub fn registry(&self) -> &Registry {
|
||||
&self.registry
|
||||
}
|
||||
|
||||
/// Gather all metrics and encode them as Prometheus text format.
|
||||
pub fn metrics_handler(&self) -> String {
|
||||
let encoder = TextEncoder::new();
|
||||
let metric_families = self.registry.gather();
|
||||
let mut buffer = Vec::new();
|
||||
encoder.encode(&metric_families, &mut buffer).expect("encode");
|
||||
String::from_utf8(buffer).expect("utf8")
|
||||
}
|
||||
}
|
||||
|
||||
/// Start an HTTP server serving GET /metrics, GET /mesh, presence, and route endpoints on the given port.
|
||||
pub async fn serve_metrics(
|
||||
port: u16,
|
||||
metrics: Arc<RelayMetrics>,
|
||||
presence: Option<Arc<tokio::sync::Mutex<crate::presence::PresenceRegistry>>>,
|
||||
route_resolver: Option<Arc<crate::route::RouteResolver>>,
|
||||
) {
|
||||
use axum::{extract::Path, routing::get, Router};
|
||||
|
||||
let metrics_clone = metrics.clone();
|
||||
let presence_all = presence.clone();
|
||||
let presence_lookup = presence.clone();
|
||||
let presence_peers = presence.clone();
|
||||
let presence_route = presence;
|
||||
|
||||
let app = Router::new()
|
||||
.route(
|
||||
"/metrics",
|
||||
get(move || {
|
||||
let m = metrics.clone();
|
||||
async move { m.metrics_handler() }
|
||||
}),
|
||||
)
|
||||
.route(
|
||||
"/mesh",
|
||||
get(move || {
|
||||
let m = metrics_clone.clone();
|
||||
async move { crate::probe::mesh_summary(m.registry()) }
|
||||
}),
|
||||
)
|
||||
.route(
|
||||
"/presence",
|
||||
get(move || {
|
||||
let reg = presence_all.clone();
|
||||
async move {
|
||||
match reg {
|
||||
Some(r) => {
|
||||
let r = r.lock().await;
|
||||
let entries: Vec<serde_json::Value> = r.all_known().into_iter().map(|(fp, loc)| {
|
||||
serde_json::json!({ "fingerprint": fp, "location": loc })
|
||||
}).collect();
|
||||
serde_json::to_string_pretty(&entries).unwrap_or_else(|_| "[]".to_string())
|
||||
}
|
||||
None => "[]".to_string(),
|
||||
}
|
||||
}
|
||||
}),
|
||||
)
|
||||
.route(
|
||||
"/presence/:fingerprint",
|
||||
get(move |Path(fingerprint): Path<String>| {
|
||||
let reg = presence_lookup.clone();
|
||||
async move {
|
||||
match reg {
|
||||
Some(r) => {
|
||||
let r = r.lock().await;
|
||||
match r.lookup(&fingerprint) {
|
||||
Some(loc) => serde_json::to_string_pretty(
|
||||
&serde_json::json!({ "fingerprint": fingerprint, "location": loc })
|
||||
).unwrap_or_else(|_| "{}".to_string()),
|
||||
None => serde_json::json!({ "fingerprint": fingerprint, "location": null }).to_string(),
|
||||
}
|
||||
}
|
||||
None => serde_json::json!({ "fingerprint": fingerprint, "location": null }).to_string(),
|
||||
}
|
||||
}
|
||||
}),
|
||||
)
|
||||
.route(
|
||||
"/peers",
|
||||
get(move || {
|
||||
let reg = presence_peers.clone();
|
||||
async move {
|
||||
match reg {
|
||||
Some(r) => {
|
||||
let r = r.lock().await;
|
||||
let peers: Vec<serde_json::Value> = r.peers().iter().map(|(addr, peer)| {
|
||||
serde_json::json!({
|
||||
"addr": addr.to_string(),
|
||||
"fingerprints": peer.fingerprints.iter().collect::<Vec<_>>(),
|
||||
"rtt_ms": peer.rtt_ms,
|
||||
})
|
||||
}).collect();
|
||||
serde_json::to_string_pretty(&peers).unwrap_or_else(|_| "[]".to_string())
|
||||
}
|
||||
None => "[]".to_string(),
|
||||
}
|
||||
}
|
||||
}),
|
||||
)
|
||||
.route(
|
||||
"/route/:fingerprint",
|
||||
get(move |Path(fingerprint): Path<String>| {
|
||||
let reg = presence_route.clone();
|
||||
let resolver = route_resolver.clone();
|
||||
async move {
|
||||
match (reg, resolver) {
|
||||
(Some(r), Some(res)) => {
|
||||
let r = r.lock().await;
|
||||
let route = res.resolve(&r, &fingerprint);
|
||||
let json = res.route_json(&fingerprint, &route);
|
||||
serde_json::to_string_pretty(&json)
|
||||
.unwrap_or_else(|_| "{}".to_string())
|
||||
}
|
||||
_ => {
|
||||
serde_json::json!({
|
||||
"fingerprint": fingerprint,
|
||||
"route": "not_found",
|
||||
"relay_chain": [],
|
||||
})
|
||||
.to_string()
|
||||
}
|
||||
}
|
||||
}
|
||||
}),
|
||||
);
|
||||
|
||||
let addr = std::net::SocketAddr::from(([0, 0, 0, 0], port));
|
||||
let listener = tokio::net::TcpListener::bind(addr)
|
||||
.await
|
||||
.expect("failed to bind metrics port");
|
||||
tracing::info!(%addr, "metrics endpoint serving");
|
||||
axum::serve(listener, app)
|
||||
.await
|
||||
.expect("metrics server error");
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn metrics_register() {
|
||||
let m = RelayMetrics::new();
|
||||
// Touch the CounterVec labels so they appear in output
|
||||
m.auth_attempts.with_label_values(&["ok"]);
|
||||
m.auth_attempts.with_label_values(&["fail"]);
|
||||
let output = m.metrics_handler();
|
||||
// Should contain all registered metric names (as HELP or TYPE lines)
|
||||
assert!(output.contains("wzp_relay_active_sessions"));
|
||||
assert!(output.contains("wzp_relay_active_rooms"));
|
||||
assert!(output.contains("wzp_relay_packets_forwarded_total"));
|
||||
assert!(output.contains("wzp_relay_bytes_forwarded_total"));
|
||||
assert!(output.contains("wzp_relay_auth_attempts_total"));
|
||||
assert!(output.contains("wzp_relay_handshake_duration_seconds"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn session_quality_update() {
|
||||
let m = RelayMetrics::new();
|
||||
let report = QualityReport {
|
||||
loss_pct: 128, // ~50%
|
||||
rtt_4ms: 25, // 100ms
|
||||
jitter_ms: 10,
|
||||
bitrate_cap_kbps: 200,
|
||||
};
|
||||
m.update_session_quality("sess-abc", &report);
|
||||
|
||||
let output = m.metrics_handler();
|
||||
assert!(output.contains("wzp_relay_session_loss_pct{session_id=\"sess-abc\"}"));
|
||||
assert!(output.contains("wzp_relay_session_rtt_ms{session_id=\"sess-abc\"}"));
|
||||
// Verify rtt value (25 * 4 = 100)
|
||||
assert!(output.contains("wzp_relay_session_rtt_ms{session_id=\"sess-abc\"} 100"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn session_metrics_cleanup() {
|
||||
let m = RelayMetrics::new();
|
||||
let report = QualityReport {
|
||||
loss_pct: 50,
|
||||
rtt_4ms: 10,
|
||||
jitter_ms: 5,
|
||||
bitrate_cap_kbps: 100,
|
||||
};
|
||||
m.update_session_quality("sess-cleanup", &report);
|
||||
m.update_session_buffer("sess-cleanup", 42, 3, 1);
|
||||
|
||||
// Verify they appear
|
||||
let output = m.metrics_handler();
|
||||
assert!(output.contains("sess-cleanup"));
|
||||
|
||||
// Remove and verify they are gone
|
||||
m.remove_session_metrics("sess-cleanup");
|
||||
let output = m.metrics_handler();
|
||||
assert!(!output.contains("sess-cleanup"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn metrics_increment() {
|
||||
let m = RelayMetrics::new();
|
||||
|
||||
m.active_sessions.set(5);
|
||||
m.active_rooms.set(2);
|
||||
m.packets_forwarded.inc_by(100);
|
||||
m.bytes_forwarded.inc_by(48000);
|
||||
m.auth_attempts.with_label_values(&["ok"]).inc();
|
||||
m.auth_attempts.with_label_values(&["fail"]).inc_by(3);
|
||||
m.handshake_duration.observe(0.042);
|
||||
|
||||
let output = m.metrics_handler();
|
||||
assert!(output.contains("wzp_relay_active_sessions 5"));
|
||||
assert!(output.contains("wzp_relay_active_rooms 2"));
|
||||
assert!(output.contains("wzp_relay_packets_forwarded_total 100"));
|
||||
assert!(output.contains("wzp_relay_bytes_forwarded_total 48000"));
|
||||
assert!(output.contains("wzp_relay_auth_attempts_total{result=\"ok\"} 1"));
|
||||
assert!(output.contains("wzp_relay_auth_attempts_total{result=\"fail\"} 3"));
|
||||
assert!(output.contains("wzp_relay_handshake_duration_seconds_count 1"));
|
||||
}
|
||||
}
|
||||
333
crates/wzp-relay/src/presence.rs
Normal file
333
crates/wzp-relay/src/presence.rs
Normal file
@@ -0,0 +1,333 @@
|
||||
//! Presence registry — tracks which fingerprints are connected to this relay
|
||||
//! and to peer relays (via gossip over probe connections).
|
||||
//!
|
||||
//! This enables route resolution: given a fingerprint, determine whether the
|
||||
//! user is local, on a known peer relay, or unknown.
|
||||
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::net::SocketAddr;
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
use serde::Serialize;
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Data structures
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Where a fingerprint is connected.
|
||||
#[derive(Clone, Debug, PartialEq, Eq, Serialize)]
|
||||
pub enum PresenceLocation {
|
||||
/// Connected directly to this relay.
|
||||
Local,
|
||||
/// Connected to a peer relay at the given address.
|
||||
Remote(SocketAddr),
|
||||
}
|
||||
|
||||
/// Presence entry for a fingerprint connected directly to this relay.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct LocalPresence {
|
||||
pub fingerprint: String,
|
||||
pub alias: Option<String>,
|
||||
pub connected_at: Instant,
|
||||
pub room: Option<String>,
|
||||
}
|
||||
|
||||
/// Presence entry for a fingerprint reported by a peer relay.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct RemotePresence {
|
||||
pub fingerprint: String,
|
||||
pub relay_addr: SocketAddr,
|
||||
pub last_seen: Instant,
|
||||
}
|
||||
|
||||
/// Known peer relay and its reported fingerprints.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct PeerRelay {
|
||||
pub addr: SocketAddr,
|
||||
pub fingerprints: HashSet<String>,
|
||||
pub last_update: Instant,
|
||||
pub rtt_ms: Option<f64>,
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Registry
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Central presence registry tracking local and remote fingerprints.
|
||||
pub struct PresenceRegistry {
|
||||
/// Fingerprints connected directly to THIS relay.
|
||||
local: HashMap<String, LocalPresence>,
|
||||
/// Fingerprints reported by peer relays (via gossip).
|
||||
remote: HashMap<String, RemotePresence>,
|
||||
/// Known peer relays and their status.
|
||||
peers: HashMap<SocketAddr, PeerRelay>,
|
||||
}
|
||||
|
||||
impl PresenceRegistry {
|
||||
/// Create an empty registry.
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
local: HashMap::new(),
|
||||
remote: HashMap::new(),
|
||||
peers: HashMap::new(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Register a fingerprint as locally connected (called after auth + handshake).
|
||||
pub fn register_local(&mut self, fingerprint: &str, alias: Option<String>, room: Option<String>) {
|
||||
self.local.insert(fingerprint.to_string(), LocalPresence {
|
||||
fingerprint: fingerprint.to_string(),
|
||||
alias,
|
||||
connected_at: Instant::now(),
|
||||
room,
|
||||
});
|
||||
}
|
||||
|
||||
/// Unregister a locally connected fingerprint (called on disconnect).
|
||||
pub fn unregister_local(&mut self, fingerprint: &str) {
|
||||
self.local.remove(fingerprint);
|
||||
}
|
||||
|
||||
/// Update the fingerprints reported by a peer relay.
|
||||
/// Replaces the previous set for that peer.
|
||||
pub fn update_peer(&mut self, addr: SocketAddr, fingerprints: HashSet<String>) {
|
||||
let now = Instant::now();
|
||||
|
||||
// Remove old remote entries that belonged to this peer
|
||||
self.remote.retain(|_, rp| rp.relay_addr != addr);
|
||||
|
||||
// Insert new remote entries
|
||||
for fp in &fingerprints {
|
||||
self.remote.insert(fp.clone(), RemotePresence {
|
||||
fingerprint: fp.clone(),
|
||||
relay_addr: addr,
|
||||
last_seen: now,
|
||||
});
|
||||
}
|
||||
|
||||
// Update the peer record
|
||||
let peer = self.peers.entry(addr).or_insert_with(|| PeerRelay {
|
||||
addr,
|
||||
fingerprints: HashSet::new(),
|
||||
last_update: now,
|
||||
rtt_ms: None,
|
||||
});
|
||||
peer.fingerprints = fingerprints;
|
||||
peer.last_update = now;
|
||||
}
|
||||
|
||||
/// Look up where a fingerprint is connected.
|
||||
/// Local presence takes priority over remote.
|
||||
pub fn lookup(&self, fingerprint: &str) -> Option<PresenceLocation> {
|
||||
if self.local.contains_key(fingerprint) {
|
||||
return Some(PresenceLocation::Local);
|
||||
}
|
||||
if let Some(rp) = self.remote.get(fingerprint) {
|
||||
return Some(PresenceLocation::Remote(rp.relay_addr));
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
/// Return all fingerprints connected directly to this relay.
|
||||
pub fn local_fingerprints(&self) -> HashSet<String> {
|
||||
self.local.keys().cloned().collect()
|
||||
}
|
||||
|
||||
/// Return a full dump of every known fingerprint and its location.
|
||||
pub fn all_known(&self) -> Vec<(String, PresenceLocation)> {
|
||||
let mut out = Vec::new();
|
||||
for fp in self.local.keys() {
|
||||
out.push((fp.clone(), PresenceLocation::Local));
|
||||
}
|
||||
for (fp, rp) in &self.remote {
|
||||
// Skip if also local (local wins)
|
||||
if !self.local.contains_key(fp) {
|
||||
out.push((fp.clone(), PresenceLocation::Remote(rp.relay_addr)));
|
||||
}
|
||||
}
|
||||
out
|
||||
}
|
||||
|
||||
/// Remove remote entries older than `timeout`.
|
||||
pub fn expire_stale(&mut self, timeout: Duration) {
|
||||
let cutoff = Instant::now() - timeout;
|
||||
|
||||
// Expire remote presence entries
|
||||
self.remote.retain(|_, rp| rp.last_seen > cutoff);
|
||||
|
||||
// Expire peer relay records and their fingerprint sets
|
||||
let stale_peers: Vec<SocketAddr> = self.peers
|
||||
.iter()
|
||||
.filter(|(_, p)| p.last_update <= cutoff)
|
||||
.map(|(addr, _)| *addr)
|
||||
.collect();
|
||||
|
||||
for addr in stale_peers {
|
||||
self.peers.remove(&addr);
|
||||
}
|
||||
}
|
||||
|
||||
/// Return a reference to the peer relay map (for HTTP API).
|
||||
pub fn peers(&self) -> &HashMap<SocketAddr, PeerRelay> {
|
||||
&self.peers
|
||||
}
|
||||
|
||||
/// Return a reference to the local presence map (for HTTP API).
|
||||
pub fn local_entries(&self) -> &HashMap<String, LocalPresence> {
|
||||
&self.local
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Tests
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use std::net::SocketAddr;
|
||||
|
||||
fn addr(s: &str) -> SocketAddr {
|
||||
s.parse().unwrap()
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn register_and_lookup_local() {
|
||||
let mut reg = PresenceRegistry::new();
|
||||
reg.register_local("aabbccdd", Some("alice".into()), Some("room1".into()));
|
||||
|
||||
assert_eq!(reg.lookup("aabbccdd"), Some(PresenceLocation::Local));
|
||||
// Unknown fingerprint returns None
|
||||
assert_eq!(reg.lookup("00000000"), None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn unregister_removes() {
|
||||
let mut reg = PresenceRegistry::new();
|
||||
reg.register_local("aabbccdd", None, None);
|
||||
assert_eq!(reg.lookup("aabbccdd"), Some(PresenceLocation::Local));
|
||||
|
||||
reg.unregister_local("aabbccdd");
|
||||
assert_eq!(reg.lookup("aabbccdd"), None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn update_peer_and_lookup() {
|
||||
let mut reg = PresenceRegistry::new();
|
||||
let peer = addr("10.0.0.2:4433");
|
||||
let mut fps = HashSet::new();
|
||||
fps.insert("deadbeef".to_string());
|
||||
fps.insert("cafebabe".to_string());
|
||||
|
||||
reg.update_peer(peer, fps);
|
||||
|
||||
assert_eq!(reg.lookup("deadbeef"), Some(PresenceLocation::Remote(peer)));
|
||||
assert_eq!(reg.lookup("cafebabe"), Some(PresenceLocation::Remote(peer)));
|
||||
assert_eq!(reg.lookup("unknown"), None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn expire_stale_removes_old() {
|
||||
let mut reg = PresenceRegistry::new();
|
||||
let peer = addr("10.0.0.3:4433");
|
||||
|
||||
let mut fps = HashSet::new();
|
||||
fps.insert("olduser".to_string());
|
||||
reg.update_peer(peer, fps);
|
||||
|
||||
// Verify it's there
|
||||
assert_eq!(reg.lookup("olduser"), Some(PresenceLocation::Remote(peer)));
|
||||
|
||||
// Manually backdate the last_seen and last_update
|
||||
if let Some(rp) = reg.remote.get_mut("olduser") {
|
||||
rp.last_seen = Instant::now() - Duration::from_secs(120);
|
||||
}
|
||||
if let Some(p) = reg.peers.get_mut(&peer) {
|
||||
p.last_update = Instant::now() - Duration::from_secs(120);
|
||||
}
|
||||
|
||||
// Expire with 60s timeout — should remove the 120s-old entries
|
||||
reg.expire_stale(Duration::from_secs(60));
|
||||
|
||||
assert_eq!(reg.lookup("olduser"), None);
|
||||
assert!(reg.peers.get(&peer).is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn local_fingerprints_list() {
|
||||
let mut reg = PresenceRegistry::new();
|
||||
reg.register_local("fp1", None, None);
|
||||
reg.register_local("fp2", Some("bob".into()), Some("room-a".into()));
|
||||
reg.register_local("fp3", None, None);
|
||||
|
||||
let fps = reg.local_fingerprints();
|
||||
assert_eq!(fps.len(), 3);
|
||||
assert!(fps.contains("fp1"));
|
||||
assert!(fps.contains("fp2"));
|
||||
assert!(fps.contains("fp3"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn all_known_includes_local_and_remote() {
|
||||
let mut reg = PresenceRegistry::new();
|
||||
reg.register_local("local1", None, None);
|
||||
|
||||
let peer = addr("10.0.0.5:4433");
|
||||
let mut fps = HashSet::new();
|
||||
fps.insert("remote1".to_string());
|
||||
reg.update_peer(peer, fps);
|
||||
|
||||
let all = reg.all_known();
|
||||
assert_eq!(all.len(), 2);
|
||||
|
||||
let local_entries: Vec<_> = all.iter()
|
||||
.filter(|(_, loc)| *loc == PresenceLocation::Local)
|
||||
.collect();
|
||||
assert_eq!(local_entries.len(), 1);
|
||||
assert_eq!(local_entries[0].0, "local1");
|
||||
|
||||
let remote_entries: Vec<_> = all.iter()
|
||||
.filter(|(_, loc)| matches!(loc, PresenceLocation::Remote(_)))
|
||||
.collect();
|
||||
assert_eq!(remote_entries.len(), 1);
|
||||
assert_eq!(remote_entries[0].0, "remote1");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn local_overrides_remote_in_lookup() {
|
||||
let mut reg = PresenceRegistry::new();
|
||||
let peer = addr("10.0.0.6:4433");
|
||||
|
||||
// Register as remote first
|
||||
let mut fps = HashSet::new();
|
||||
fps.insert("dupfp".to_string());
|
||||
reg.update_peer(peer, fps);
|
||||
assert_eq!(reg.lookup("dupfp"), Some(PresenceLocation::Remote(peer)));
|
||||
|
||||
// Now register locally — local should win
|
||||
reg.register_local("dupfp", None, None);
|
||||
assert_eq!(reg.lookup("dupfp"), Some(PresenceLocation::Local));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn update_peer_replaces_old_fingerprints() {
|
||||
let mut reg = PresenceRegistry::new();
|
||||
let peer = addr("10.0.0.7:4433");
|
||||
|
||||
let mut fps1 = HashSet::new();
|
||||
fps1.insert("user_a".to_string());
|
||||
fps1.insert("user_b".to_string());
|
||||
reg.update_peer(peer, fps1);
|
||||
|
||||
assert_eq!(reg.lookup("user_a"), Some(PresenceLocation::Remote(peer)));
|
||||
assert_eq!(reg.lookup("user_b"), Some(PresenceLocation::Remote(peer)));
|
||||
|
||||
// Update with only user_b — user_a should be gone
|
||||
let mut fps2 = HashSet::new();
|
||||
fps2.insert("user_b".to_string());
|
||||
reg.update_peer(peer, fps2);
|
||||
|
||||
assert_eq!(reg.lookup("user_a"), None);
|
||||
assert_eq!(reg.lookup("user_b"), Some(PresenceLocation::Remote(peer)));
|
||||
}
|
||||
}
|
||||
632
crates/wzp-relay/src/probe.rs
Normal file
632
crates/wzp-relay/src/probe.rs
Normal file
@@ -0,0 +1,632 @@
|
||||
//! Inter-relay health probe.
|
||||
//!
|
||||
//! A `ProbeRunner` maintains a persistent QUIC connection to a peer relay,
|
||||
//! sends 1 Ping/s, and measures RTT, loss, and jitter. Results are exported
|
||||
//! as Prometheus gauges with a `target` label.
|
||||
|
||||
use std::collections::VecDeque;
|
||||
use std::net::SocketAddr;
|
||||
use std::sync::Arc;
|
||||
use std::time::{Duration, SystemTime, UNIX_EPOCH};
|
||||
|
||||
use prometheus::{Gauge, IntGauge, Opts, Registry};
|
||||
use tokio::sync::Mutex;
|
||||
use tracing::{error, info, warn};
|
||||
|
||||
use wzp_proto::{MediaTransport, SignalMessage};
|
||||
|
||||
/// Configuration for a single probe target.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct ProbeConfig {
|
||||
pub target: SocketAddr,
|
||||
pub interval: Duration,
|
||||
}
|
||||
|
||||
impl ProbeConfig {
|
||||
pub fn new(target: SocketAddr) -> Self {
|
||||
Self {
|
||||
target,
|
||||
interval: Duration::from_secs(1),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Prometheus metrics for one probe target.
|
||||
pub struct ProbeMetrics {
|
||||
pub rtt_ms: Gauge,
|
||||
pub loss_pct: Gauge,
|
||||
pub jitter_ms: Gauge,
|
||||
pub up: IntGauge,
|
||||
}
|
||||
|
||||
impl ProbeMetrics {
|
||||
/// Register probe metrics with the given `target` label value.
|
||||
pub fn register(target: &str, registry: &Registry) -> Self {
|
||||
let rtt_ms = Gauge::with_opts(
|
||||
Opts::new("wzp_probe_rtt_ms", "RTT to peer relay in ms")
|
||||
.const_label("target", target),
|
||||
)
|
||||
.expect("probe metric");
|
||||
|
||||
let loss_pct = Gauge::with_opts(
|
||||
Opts::new("wzp_probe_loss_pct", "Packet loss to peer relay in %")
|
||||
.const_label("target", target),
|
||||
)
|
||||
.expect("probe metric");
|
||||
|
||||
let jitter_ms = Gauge::with_opts(
|
||||
Opts::new("wzp_probe_jitter_ms", "Jitter to peer relay in ms")
|
||||
.const_label("target", target),
|
||||
)
|
||||
.expect("probe metric");
|
||||
|
||||
let up = IntGauge::with_opts(
|
||||
Opts::new("wzp_probe_up", "1 if peer relay is reachable, 0 if not")
|
||||
.const_label("target", target),
|
||||
)
|
||||
.expect("probe metric");
|
||||
|
||||
registry.register(Box::new(rtt_ms.clone())).expect("register");
|
||||
registry.register(Box::new(loss_pct.clone())).expect("register");
|
||||
registry.register(Box::new(jitter_ms.clone())).expect("register");
|
||||
registry.register(Box::new(up.clone())).expect("register");
|
||||
|
||||
Self {
|
||||
rtt_ms,
|
||||
loss_pct,
|
||||
jitter_ms,
|
||||
up,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Sliding window for tracking probe results over the last N pings.
|
||||
pub struct SlidingWindow {
|
||||
/// Capacity (number of pings to track).
|
||||
capacity: usize,
|
||||
/// Timestamps of sent pings (ms since epoch) in order.
|
||||
sent: VecDeque<u64>,
|
||||
/// RTT values for received pongs (ms). None = no pong received yet.
|
||||
rtts: VecDeque<Option<f64>>,
|
||||
}
|
||||
|
||||
impl SlidingWindow {
|
||||
pub fn new(capacity: usize) -> Self {
|
||||
Self {
|
||||
capacity,
|
||||
sent: VecDeque::with_capacity(capacity),
|
||||
rtts: VecDeque::with_capacity(capacity),
|
||||
}
|
||||
}
|
||||
|
||||
/// Record a sent ping.
|
||||
pub fn record_sent(&mut self, timestamp_ms: u64) {
|
||||
if self.sent.len() >= self.capacity {
|
||||
self.sent.pop_front();
|
||||
self.rtts.pop_front();
|
||||
}
|
||||
self.sent.push_back(timestamp_ms);
|
||||
self.rtts.push_back(None);
|
||||
}
|
||||
|
||||
/// Record a received pong. Returns the computed RTT in ms, or None if
|
||||
/// the timestamp doesn't match any pending ping.
|
||||
pub fn record_pong(&mut self, timestamp_ms: u64, now_ms: u64) -> Option<f64> {
|
||||
// Find the sent ping with this timestamp
|
||||
for (i, &sent_ts) in self.sent.iter().enumerate() {
|
||||
if sent_ts == timestamp_ms {
|
||||
let rtt = (now_ms as f64) - (sent_ts as f64);
|
||||
self.rtts[i] = Some(rtt);
|
||||
return Some(rtt);
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
/// Compute loss percentage (0.0-100.0) from the current window.
|
||||
/// A ping is considered lost if it has no matching pong.
|
||||
pub fn loss_pct(&self) -> f64 {
|
||||
if self.sent.is_empty() {
|
||||
return 0.0;
|
||||
}
|
||||
let total = self.rtts.len() as f64;
|
||||
let lost = self.rtts.iter().filter(|r| r.is_none()).count() as f64;
|
||||
(lost / total) * 100.0
|
||||
}
|
||||
|
||||
/// Compute jitter as the standard deviation of RTT values (ms).
|
||||
/// Only considers pings that received a pong.
|
||||
pub fn jitter_ms(&self) -> f64 {
|
||||
let rtts: Vec<f64> = self.rtts.iter().filter_map(|r| *r).collect();
|
||||
if rtts.len() < 2 {
|
||||
return 0.0;
|
||||
}
|
||||
let mean = rtts.iter().sum::<f64>() / rtts.len() as f64;
|
||||
let variance = rtts.iter().map(|r| (r - mean).powi(2)).sum::<f64>() / rtts.len() as f64;
|
||||
variance.sqrt()
|
||||
}
|
||||
|
||||
/// Return the most recent RTT value, if any.
|
||||
pub fn latest_rtt(&self) -> Option<f64> {
|
||||
self.rtts.iter().rev().find_map(|r| *r)
|
||||
}
|
||||
}
|
||||
|
||||
/// Runs a health probe against a single peer relay.
|
||||
pub struct ProbeRunner {
|
||||
config: ProbeConfig,
|
||||
metrics: ProbeMetrics,
|
||||
presence: Option<Arc<tokio::sync::Mutex<crate::presence::PresenceRegistry>>>,
|
||||
}
|
||||
|
||||
impl ProbeRunner {
|
||||
/// Create a new probe runner, registering metrics with the given registry.
|
||||
pub fn new(
|
||||
config: ProbeConfig,
|
||||
registry: &Registry,
|
||||
presence: Option<Arc<tokio::sync::Mutex<crate::presence::PresenceRegistry>>>,
|
||||
) -> Self {
|
||||
let target_str = config.target.to_string();
|
||||
let metrics = ProbeMetrics::register(&target_str, registry);
|
||||
Self { config, metrics, presence }
|
||||
}
|
||||
|
||||
/// Run the probe forever. This function never returns under normal operation.
|
||||
/// It connects to the target relay, sends Ping every `interval`, and processes
|
||||
/// Pong replies to compute RTT, loss, and jitter.
|
||||
pub async fn run(&self) -> ! {
|
||||
loop {
|
||||
info!(target = %self.config.target, "probe connecting...");
|
||||
match self.run_session().await {
|
||||
Ok(()) => {
|
||||
// Session ended cleanly (shouldn't happen in practice)
|
||||
warn!(target = %self.config.target, "probe session ended, reconnecting in 5s");
|
||||
}
|
||||
Err(e) => {
|
||||
error!(target = %self.config.target, "probe session error: {e}, reconnecting in 5s");
|
||||
}
|
||||
}
|
||||
self.metrics.up.set(0);
|
||||
self.metrics.rtt_ms.set(0.0);
|
||||
tokio::time::sleep(Duration::from_secs(5)).await;
|
||||
}
|
||||
}
|
||||
|
||||
/// Run one probe session (one QUIC connection). Returns when the connection drops.
|
||||
async fn run_session(&self) -> anyhow::Result<()> {
|
||||
// Create a client-only endpoint on an ephemeral port
|
||||
let bind_addr: SocketAddr = "0.0.0.0:0".parse().unwrap();
|
||||
let endpoint = wzp_transport::create_endpoint(bind_addr, None)?;
|
||||
let client_cfg = wzp_transport::client_config();
|
||||
let conn = wzp_transport::connect(
|
||||
&endpoint,
|
||||
self.config.target,
|
||||
"_probe",
|
||||
client_cfg,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let transport = Arc::new(wzp_transport::QuinnTransport::new(conn));
|
||||
self.metrics.up.set(1);
|
||||
info!(target = %self.config.target, "probe connected");
|
||||
|
||||
let window = Arc::new(Mutex::new(SlidingWindow::new(60)));
|
||||
|
||||
// Spawn recv task for pong messages
|
||||
let recv_transport = transport.clone();
|
||||
let recv_window = window.clone();
|
||||
let rtt_gauge = self.metrics.rtt_ms.clone();
|
||||
let loss_gauge = self.metrics.loss_pct.clone();
|
||||
let jitter_gauge = self.metrics.jitter_ms.clone();
|
||||
let up_gauge = self.metrics.up.clone();
|
||||
|
||||
let recv_presence = self.presence.clone();
|
||||
let recv_target = self.config.target;
|
||||
let recv_handle = tokio::spawn(async move {
|
||||
loop {
|
||||
match recv_transport.recv_signal().await {
|
||||
Ok(Some(SignalMessage::Pong { timestamp_ms })) => {
|
||||
let now_ms = SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.unwrap()
|
||||
.as_millis() as u64;
|
||||
let mut w = recv_window.lock().await;
|
||||
if let Some(rtt) = w.record_pong(timestamp_ms, now_ms) {
|
||||
rtt_gauge.set(rtt);
|
||||
}
|
||||
loss_gauge.set(w.loss_pct());
|
||||
jitter_gauge.set(w.jitter_ms());
|
||||
}
|
||||
Ok(Some(SignalMessage::PresenceUpdate { fingerprints, relay_addr })) => {
|
||||
if let Some(ref reg) = recv_presence {
|
||||
// Parse the relay_addr; fall back to the connection target
|
||||
let addr = relay_addr.parse().unwrap_or(recv_target);
|
||||
let fps: std::collections::HashSet<String> = fingerprints.into_iter().collect();
|
||||
let mut r = reg.lock().await;
|
||||
r.update_peer(addr, fps);
|
||||
}
|
||||
}
|
||||
Ok(Some(_)) => {
|
||||
// Ignore other signals
|
||||
}
|
||||
Ok(None) => {
|
||||
info!("probe recv: connection closed");
|
||||
up_gauge.set(0);
|
||||
break;
|
||||
}
|
||||
Err(e) => {
|
||||
error!("probe recv error: {e}");
|
||||
up_gauge.set(0);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// Send ping loop (+ presence gossip every 10 pings)
|
||||
let mut interval = tokio::time::interval(self.config.interval);
|
||||
let mut ping_count: u64 = 0;
|
||||
loop {
|
||||
interval.tick().await;
|
||||
|
||||
if recv_handle.is_finished() {
|
||||
// Recv task died — connection is lost
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let timestamp_ms = SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.unwrap()
|
||||
.as_millis() as u64;
|
||||
|
||||
{
|
||||
let mut w = window.lock().await;
|
||||
w.record_sent(timestamp_ms);
|
||||
}
|
||||
|
||||
if let Err(e) = transport
|
||||
.send_signal(&SignalMessage::Ping { timestamp_ms })
|
||||
.await
|
||||
{
|
||||
error!(target = %self.config.target, "probe ping send error: {e}");
|
||||
recv_handle.abort();
|
||||
return Err(e.into());
|
||||
}
|
||||
|
||||
// Send presence update every 10 pings (~10 seconds)
|
||||
ping_count += 1;
|
||||
if ping_count % 10 == 0 {
|
||||
if let Some(ref reg) = self.presence {
|
||||
let fps: Vec<String> = {
|
||||
let r = reg.lock().await;
|
||||
r.local_fingerprints().into_iter().collect()
|
||||
};
|
||||
let msg = SignalMessage::PresenceUpdate {
|
||||
fingerprints: fps,
|
||||
relay_addr: self.config.target.to_string(),
|
||||
};
|
||||
if let Err(e) = transport.send_signal(&msg).await {
|
||||
warn!(target = %self.config.target, "presence update send error: {e}");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Coordinates multiple `ProbeRunner` instances for mesh mode.
|
||||
///
|
||||
/// Each relay probes all configured peers concurrently. The `ProbeMesh` owns the
|
||||
/// runners and spawns them as independent tokio tasks.
|
||||
pub struct ProbeMesh {
|
||||
runners: Vec<ProbeRunner>,
|
||||
}
|
||||
|
||||
impl ProbeMesh {
|
||||
/// Create a new mesh coordinator, registering metrics for every target.
|
||||
pub fn new(
|
||||
targets: Vec<SocketAddr>,
|
||||
registry: &Registry,
|
||||
presence: Option<Arc<tokio::sync::Mutex<crate::presence::PresenceRegistry>>>,
|
||||
) -> Self {
|
||||
let runners = targets
|
||||
.into_iter()
|
||||
.map(|addr| {
|
||||
let config = ProbeConfig::new(addr);
|
||||
ProbeRunner::new(config, registry, presence.clone())
|
||||
})
|
||||
.collect();
|
||||
Self { runners }
|
||||
}
|
||||
|
||||
/// Spawn all runners as concurrent tokio tasks. This consumes the mesh.
|
||||
pub async fn run_all(self) {
|
||||
let mut handles = Vec::with_capacity(self.runners.len());
|
||||
for runner in self.runners {
|
||||
let target = runner.config.target;
|
||||
info!(target = %target, "spawning mesh probe");
|
||||
handles.push(tokio::spawn(async move { runner.run().await }));
|
||||
}
|
||||
// Probes run forever; if we ever need to wait:
|
||||
for h in handles {
|
||||
let _ = h.await;
|
||||
}
|
||||
}
|
||||
|
||||
/// Number of probe targets in this mesh.
|
||||
pub fn target_count(&self) -> usize {
|
||||
self.runners.len()
|
||||
}
|
||||
}
|
||||
|
||||
/// Build a human-readable mesh health table from probe metrics in the registry.
|
||||
///
|
||||
/// Scans the registry for `wzp_probe_*` gauges and formats them into a table.
|
||||
pub fn mesh_summary(registry: &Registry) -> String {
|
||||
use std::collections::BTreeMap;
|
||||
|
||||
let families = registry.gather();
|
||||
|
||||
// Collect per-target values: target -> (rtt, loss, jitter, up)
|
||||
let mut targets: BTreeMap<String, (f64, f64, f64, bool)> = BTreeMap::new();
|
||||
|
||||
for family in &families {
|
||||
let name = family.get_name();
|
||||
for metric in family.get_metric() {
|
||||
// Find the "target" label
|
||||
let target_label = metric
|
||||
.get_label()
|
||||
.iter()
|
||||
.find(|l| l.get_name() == "target");
|
||||
let target = match target_label {
|
||||
Some(l) => l.get_value().to_string(),
|
||||
None => continue,
|
||||
};
|
||||
|
||||
let entry = targets.entry(target).or_insert((0.0, 0.0, 0.0, false));
|
||||
|
||||
match name {
|
||||
"wzp_probe_rtt_ms" => entry.0 = metric.get_gauge().get_value(),
|
||||
"wzp_probe_loss_pct" => entry.1 = metric.get_gauge().get_value(),
|
||||
"wzp_probe_jitter_ms" => entry.2 = metric.get_gauge().get_value(),
|
||||
"wzp_probe_up" => entry.3 = metric.get_gauge().get_value() as i64 == 1,
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let mut out = String::new();
|
||||
out.push_str("Relay Mesh Health\n");
|
||||
out.push_str("\u{2500}\u{2500}\u{2500}\u{2500}\u{2500}\u{2500}\u{2500}\u{2500}\u{2500}\u{2500}\u{2500}\u{2500}\u{2500}\u{2500}\u{2500}\u{2500}\u{2500}\u{2500}\u{2500}\u{2500}\u{2500}\u{2500}\u{2500}\u{2500}\u{2500}\u{2500}\u{2500}\u{2500}\u{2500}\u{2500}\u{2500}\u{2500}\u{2500}\u{2500}\u{2500}\u{2500}\u{2500}\u{2500}\u{2500}\u{2500}\u{2500}\n");
|
||||
out.push_str(&format!(
|
||||
"{:<20} {:>6} {:>6} {:>7} {}\n",
|
||||
"Target", "RTT", "Loss", "Jitter", "Status"
|
||||
));
|
||||
|
||||
for (target, (rtt, loss, jitter, up)) in &targets {
|
||||
let status = if *up { "UP" } else { "DOWN" };
|
||||
out.push_str(&format!(
|
||||
"{:<20} {:>5.0}ms {:>5.1}% {:>5.0}ms {}\n",
|
||||
target, rtt, loss, jitter, status
|
||||
));
|
||||
}
|
||||
|
||||
if targets.is_empty() {
|
||||
out.push_str(" (no probe targets configured)\n");
|
||||
}
|
||||
|
||||
out
|
||||
}
|
||||
|
||||
/// Handle an incoming Ping signal by replying with a Pong carrying the same timestamp.
|
||||
/// Returns true if the message was a Ping and was handled, false otherwise.
|
||||
pub async fn handle_ping(
|
||||
transport: &wzp_transport::QuinnTransport,
|
||||
msg: &SignalMessage,
|
||||
) -> bool {
|
||||
if let SignalMessage::Ping { timestamp_ms } = msg {
|
||||
if let Err(e) = transport
|
||||
.send_signal(&SignalMessage::Pong {
|
||||
timestamp_ms: *timestamp_ms,
|
||||
})
|
||||
.await
|
||||
{
|
||||
warn!("failed to send Pong reply: {e}");
|
||||
}
|
||||
true
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use prometheus::Encoder;
|
||||
|
||||
#[test]
|
||||
fn probe_metrics_register() {
|
||||
let registry = Registry::new();
|
||||
let _metrics = ProbeMetrics::register("127.0.0.1:4433", ®istry);
|
||||
// (ProbeRunner::new signature changed but this test only checks ProbeMetrics)
|
||||
|
||||
let encoder = prometheus::TextEncoder::new();
|
||||
let families = registry.gather();
|
||||
let mut buf = Vec::new();
|
||||
encoder.encode(&families, &mut buf).unwrap();
|
||||
let output = String::from_utf8(buf).unwrap();
|
||||
|
||||
assert!(output.contains("wzp_probe_rtt_ms"), "missing wzp_probe_rtt_ms");
|
||||
assert!(output.contains("wzp_probe_loss_pct"), "missing wzp_probe_loss_pct");
|
||||
assert!(output.contains("wzp_probe_jitter_ms"), "missing wzp_probe_jitter_ms");
|
||||
assert!(output.contains("wzp_probe_up"), "missing wzp_probe_up");
|
||||
assert!(
|
||||
output.contains("target=\"127.0.0.1:4433\""),
|
||||
"missing target label"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn rtt_calculation() {
|
||||
let mut window = SlidingWindow::new(60);
|
||||
|
||||
// Send a ping at t=1000
|
||||
window.record_sent(1000);
|
||||
// Receive pong at t=1050 => RTT = 50ms
|
||||
let rtt = window.record_pong(1000, 1050);
|
||||
assert_eq!(rtt, Some(50.0));
|
||||
|
||||
// Send at t=2000, receive at t=2030 => RTT = 30ms
|
||||
window.record_sent(2000);
|
||||
let rtt = window.record_pong(2000, 2030);
|
||||
assert_eq!(rtt, Some(30.0));
|
||||
|
||||
assert_eq!(window.latest_rtt(), Some(30.0));
|
||||
|
||||
// Unknown timestamp returns None
|
||||
let rtt = window.record_pong(9999, 10000);
|
||||
assert!(rtt.is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn loss_calculation() {
|
||||
let mut window = SlidingWindow::new(10);
|
||||
|
||||
// Send 10 pings
|
||||
for i in 0..10 {
|
||||
window.record_sent(i * 1000);
|
||||
}
|
||||
|
||||
// Receive pongs for 7 out of 10 (miss indices 2, 5, 8)
|
||||
for i in 0..10u64 {
|
||||
if i == 2 || i == 5 || i == 8 {
|
||||
continue; // lost
|
||||
}
|
||||
window.record_pong(i * 1000, i * 1000 + 40);
|
||||
}
|
||||
|
||||
// 3 out of 10 lost = 30%
|
||||
let loss = window.loss_pct();
|
||||
assert!((loss - 30.0).abs() < 0.01, "expected ~30%, got {loss}");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn jitter_calculation() {
|
||||
let mut window = SlidingWindow::new(10);
|
||||
|
||||
// Send 4 pings with known RTTs: 10, 20, 30, 40
|
||||
// Mean = 25, variance = ((15^2 + 5^2 + 5^2 + 15^2) / 4) = (225+25+25+225)/4 = 125
|
||||
// std dev = sqrt(125) ≈ 11.18
|
||||
let rtts = [10.0, 20.0, 30.0, 40.0];
|
||||
for (i, rtt) in rtts.iter().enumerate() {
|
||||
let sent = (i as u64) * 1000;
|
||||
window.record_sent(sent);
|
||||
window.record_pong(sent, sent + *rtt as u64);
|
||||
}
|
||||
|
||||
let jitter = window.jitter_ms();
|
||||
assert!(
|
||||
(jitter - 11.18).abs() < 0.1,
|
||||
"expected jitter ~11.18ms, got {jitter}"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn sliding_window_eviction() {
|
||||
let mut window = SlidingWindow::new(5);
|
||||
|
||||
// Fill window
|
||||
for i in 0..5 {
|
||||
window.record_sent(i * 1000);
|
||||
}
|
||||
assert_eq!(window.sent.len(), 5);
|
||||
|
||||
// Add one more — oldest should be evicted
|
||||
window.record_sent(5000);
|
||||
assert_eq!(window.sent.len(), 5);
|
||||
assert_eq!(*window.sent.front().unwrap(), 1000);
|
||||
|
||||
// All 5 are unanswered
|
||||
assert!((window.loss_pct() - 100.0).abs() < 0.01);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn empty_window_edge_cases() {
|
||||
let window = SlidingWindow::new(60);
|
||||
assert_eq!(window.loss_pct(), 0.0);
|
||||
assert_eq!(window.jitter_ms(), 0.0);
|
||||
assert!(window.latest_rtt().is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn mesh_creates_runners() {
|
||||
let registry = Registry::new();
|
||||
let targets: Vec<SocketAddr> = vec![
|
||||
"127.0.0.1:4433".parse().unwrap(),
|
||||
"127.0.0.2:4433".parse().unwrap(),
|
||||
"127.0.0.3:4433".parse().unwrap(),
|
||||
];
|
||||
let mesh = ProbeMesh::new(targets, ®istry, None);
|
||||
assert_eq!(mesh.target_count(), 3);
|
||||
|
||||
// Verify metrics were registered for each target
|
||||
let encoder = prometheus::TextEncoder::new();
|
||||
let families = registry.gather();
|
||||
let mut buf = Vec::new();
|
||||
encoder.encode(&families, &mut buf).unwrap();
|
||||
let output = String::from_utf8(buf).unwrap();
|
||||
|
||||
assert!(output.contains("target=\"127.0.0.1:4433\""));
|
||||
assert!(output.contains("target=\"127.0.0.2:4433\""));
|
||||
assert!(output.contains("target=\"127.0.0.3:4433\""));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn mesh_summary_empty() {
|
||||
let registry = Registry::new();
|
||||
let summary = mesh_summary(®istry);
|
||||
|
||||
// Should contain the header
|
||||
assert!(summary.contains("Relay Mesh Health"));
|
||||
assert!(summary.contains("Target"));
|
||||
assert!(summary.contains("RTT"));
|
||||
assert!(summary.contains("Loss"));
|
||||
assert!(summary.contains("Jitter"));
|
||||
assert!(summary.contains("Status"));
|
||||
// Should indicate no targets
|
||||
assert!(summary.contains("no probe targets configured"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn mesh_summary_with_targets() {
|
||||
let registry = Registry::new();
|
||||
// Register probe metrics for two targets and set values
|
||||
let m1 = ProbeMetrics::register("relay-b:4433", ®istry);
|
||||
m1.rtt_ms.set(12.0);
|
||||
m1.loss_pct.set(0.0);
|
||||
m1.jitter_ms.set(2.0);
|
||||
m1.up.set(1);
|
||||
|
||||
let m2 = ProbeMetrics::register("relay-c:4433", ®istry);
|
||||
m2.rtt_ms.set(45.0);
|
||||
m2.loss_pct.set(0.1);
|
||||
m2.jitter_ms.set(5.0);
|
||||
m2.up.set(0);
|
||||
|
||||
let summary = mesh_summary(®istry);
|
||||
|
||||
assert!(summary.contains("relay-b:4433"));
|
||||
assert!(summary.contains("relay-c:4433"));
|
||||
assert!(summary.contains("UP"));
|
||||
assert!(summary.contains("DOWN"));
|
||||
// Should NOT contain "no probe targets"
|
||||
assert!(!summary.contains("no probe targets configured"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn mesh_zero_targets() {
|
||||
let registry = Registry::new();
|
||||
let mesh = ProbeMesh::new(vec![], ®istry, None);
|
||||
assert_eq!(mesh.target_count(), 0);
|
||||
}
|
||||
}
|
||||
483
crates/wzp-relay/src/relay_link.rs
Normal file
483
crates/wzp-relay/src/relay_link.rs
Normal file
@@ -0,0 +1,483 @@
|
||||
//! Per-session relay forwarding — connect to a peer relay and forward only
|
||||
//! specific sessions' media packets there.
|
||||
//!
|
||||
//! This is the building block for relay chaining (multi-hop calls). Instead
|
||||
//! of forwarding ALL traffic to a single hardcoded relay (forward mode) or
|
||||
//! to everyone in a room (SFU mode), a `RelayLink` represents a QUIC
|
||||
//! connection to one peer relay used for forwarding a specific set of
|
||||
//! sessions.
|
||||
//!
|
||||
//! `RelayLinkManager` tracks all active relay links and their session
|
||||
//! assignments, providing get-or-connect semantics and idle cleanup.
|
||||
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::net::SocketAddr;
|
||||
use std::sync::Arc;
|
||||
|
||||
use tracing::{debug, info, warn};
|
||||
|
||||
use wzp_proto::MediaPacket;
|
||||
use wzp_proto::MediaTransport;
|
||||
|
||||
/// A connection to a peer relay for forwarding specific sessions.
|
||||
///
|
||||
/// Each `RelayLink` holds a QUIC transport to one peer relay and tracks
|
||||
/// which session IDs are being forwarded through it. When all sessions
|
||||
/// are removed the link is considered idle and can be cleaned up.
|
||||
pub struct RelayLink {
|
||||
target_addr: SocketAddr,
|
||||
/// The underlying QUIC transport. `None` only in unit-test stubs where
|
||||
/// no real connection is established.
|
||||
transport: Option<Arc<wzp_transport::QuinnTransport>>,
|
||||
active_sessions: HashSet<String>,
|
||||
}
|
||||
|
||||
impl RelayLink {
|
||||
/// Connect to a peer relay at `target`.
|
||||
///
|
||||
/// Uses the `"_relay"` SNI to signal that this is a relay-to-relay
|
||||
/// connection (similar to `"_probe"` for health checks). The peer
|
||||
/// should skip normal client auth/handshake for relay-SNI connections.
|
||||
pub async fn connect(target: SocketAddr) -> Result<Self, anyhow::Error> {
|
||||
// Create a client-only endpoint on an OS-assigned port.
|
||||
let endpoint = wzp_transport::create_endpoint(
|
||||
"0.0.0.0:0".parse().unwrap(),
|
||||
None,
|
||||
)?;
|
||||
|
||||
let client_cfg = wzp_transport::client_config();
|
||||
let conn = wzp_transport::connect(&endpoint, target, "_relay", client_cfg).await?;
|
||||
let transport = Arc::new(wzp_transport::QuinnTransport::new(conn));
|
||||
|
||||
info!(%target, "relay link established");
|
||||
|
||||
Ok(Self {
|
||||
target_addr: target,
|
||||
transport: Some(transport),
|
||||
active_sessions: HashSet::new(),
|
||||
})
|
||||
}
|
||||
|
||||
/// Create a `RelayLink` from an existing transport (useful when the
|
||||
/// connection was established through other means).
|
||||
pub fn from_transport(
|
||||
target_addr: SocketAddr,
|
||||
transport: Arc<wzp_transport::QuinnTransport>,
|
||||
) -> Self {
|
||||
Self {
|
||||
target_addr,
|
||||
transport: Some(transport),
|
||||
active_sessions: HashSet::new(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a stub `RelayLink` with no transport — for unit tests that
|
||||
/// only exercise session-tracking / management logic.
|
||||
#[cfg(test)]
|
||||
fn stub(target_addr: SocketAddr) -> Self {
|
||||
Self {
|
||||
target_addr,
|
||||
transport: None,
|
||||
active_sessions: HashSet::new(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Forward a media packet to this peer relay.
|
||||
pub async fn forward(&self, pkt: &MediaPacket) -> Result<(), anyhow::Error> {
|
||||
match &self.transport {
|
||||
Some(t) => t
|
||||
.send_media(pkt)
|
||||
.await
|
||||
.map_err(|e| anyhow::anyhow!("relay link forward to {}: {e}", self.target_addr)),
|
||||
None => Err(anyhow::anyhow!(
|
||||
"relay link to {} has no transport (stub)",
|
||||
self.target_addr
|
||||
)),
|
||||
}
|
||||
}
|
||||
|
||||
/// The address of the peer relay this link connects to.
|
||||
pub fn target_addr(&self) -> SocketAddr {
|
||||
self.target_addr
|
||||
}
|
||||
|
||||
/// A reference to the underlying QUIC transport (if connected).
|
||||
pub fn transport(&self) -> Option<&Arc<wzp_transport::QuinnTransport>> {
|
||||
self.transport.as_ref()
|
||||
}
|
||||
|
||||
/// Add a session to be forwarded through this link.
|
||||
pub fn add_session(&mut self, session_id: &str) {
|
||||
if self.active_sessions.insert(session_id.to_string()) {
|
||||
debug!(
|
||||
target_relay = %self.target_addr,
|
||||
session = session_id,
|
||||
count = self.active_sessions.len(),
|
||||
"session added to relay link"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
/// Remove a session from this link.
|
||||
pub fn remove_session(&mut self, session_id: &str) {
|
||||
if self.active_sessions.remove(session_id) {
|
||||
debug!(
|
||||
target_relay = %self.target_addr,
|
||||
session = session_id,
|
||||
count = self.active_sessions.len(),
|
||||
"session removed from relay link"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
/// Check if this link is forwarding any sessions.
|
||||
pub fn is_idle(&self) -> bool {
|
||||
self.active_sessions.is_empty()
|
||||
}
|
||||
|
||||
/// Number of sessions being forwarded through this link.
|
||||
pub fn session_count(&self) -> usize {
|
||||
self.active_sessions.len()
|
||||
}
|
||||
|
||||
/// Check if a specific session is being forwarded through this link.
|
||||
pub fn has_session(&self, session_id: &str) -> bool {
|
||||
self.active_sessions.contains(session_id)
|
||||
}
|
||||
|
||||
/// Close the underlying QUIC connection (no-op if no transport).
|
||||
pub async fn close(&self) {
|
||||
info!(target_relay = %self.target_addr, "closing relay link");
|
||||
if let Some(ref t) = self.transport {
|
||||
let _ = t.close().await;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// RelayLinkManager
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Manages connections to multiple peer relays for per-session forwarding.
|
||||
///
|
||||
/// Each peer relay gets at most one `RelayLink`. Sessions are registered
|
||||
/// on specific links, and idle links (no sessions) can be cleaned up.
|
||||
pub struct RelayLinkManager {
|
||||
links: HashMap<SocketAddr, RelayLink>,
|
||||
}
|
||||
|
||||
impl RelayLinkManager {
|
||||
/// Create an empty link manager.
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
links: HashMap::new(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Get or create a link to a peer relay.
|
||||
///
|
||||
/// If a link already exists it is returned. Otherwise a new QUIC
|
||||
/// connection is established using `RelayLink::connect`.
|
||||
pub async fn get_or_connect(
|
||||
&mut self,
|
||||
target: SocketAddr,
|
||||
) -> Result<&RelayLink, anyhow::Error> {
|
||||
if !self.links.contains_key(&target) {
|
||||
let link = RelayLink::connect(target).await?;
|
||||
self.links.insert(target, link);
|
||||
}
|
||||
Ok(self.links.get(&target).unwrap())
|
||||
}
|
||||
|
||||
/// Get a mutable reference to an existing link (if any).
|
||||
pub fn get_mut(&mut self, target: &SocketAddr) -> Option<&mut RelayLink> {
|
||||
self.links.get_mut(target)
|
||||
}
|
||||
|
||||
/// Get a reference to an existing link (if any).
|
||||
pub fn get(&self, target: &SocketAddr) -> Option<&RelayLink> {
|
||||
self.links.get(target)
|
||||
}
|
||||
|
||||
/// Forward a packet for a specific session to the appropriate relay.
|
||||
///
|
||||
/// The link must already exist (created via `get_or_connect`).
|
||||
pub async fn forward_to(
|
||||
&self,
|
||||
target: SocketAddr,
|
||||
pkt: &MediaPacket,
|
||||
) -> Result<(), anyhow::Error> {
|
||||
match self.links.get(&target) {
|
||||
Some(link) => link.forward(pkt).await,
|
||||
None => Err(anyhow::anyhow!(
|
||||
"no relay link to {target} — call get_or_connect first"
|
||||
)),
|
||||
}
|
||||
}
|
||||
|
||||
/// Register a session on a specific link.
|
||||
///
|
||||
/// The link must already exist. If it does not, a warning is logged
|
||||
/// and the registration is silently skipped.
|
||||
pub fn register_session(&mut self, target: SocketAddr, session_id: &str) {
|
||||
match self.links.get_mut(&target) {
|
||||
Some(link) => link.add_session(session_id),
|
||||
None => {
|
||||
warn!(
|
||||
%target,
|
||||
session = session_id,
|
||||
"cannot register session — no link to target"
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Unregister a session. If the link becomes idle, close and remove it.
|
||||
pub async fn unregister_session(&mut self, target: SocketAddr, session_id: &str) {
|
||||
let should_remove = if let Some(link) = self.links.get_mut(&target) {
|
||||
link.remove_session(session_id);
|
||||
if link.is_idle() {
|
||||
link.close().await;
|
||||
true
|
||||
} else {
|
||||
false
|
||||
}
|
||||
} else {
|
||||
false
|
||||
};
|
||||
|
||||
if should_remove {
|
||||
self.links.remove(&target);
|
||||
info!(%target, "idle relay link removed");
|
||||
}
|
||||
}
|
||||
|
||||
/// Close all links and clear the manager.
|
||||
pub async fn close_all(&mut self) {
|
||||
for (addr, link) in self.links.drain() {
|
||||
info!(%addr, "closing relay link (shutdown)");
|
||||
link.close().await;
|
||||
}
|
||||
}
|
||||
|
||||
/// Number of active links.
|
||||
pub fn link_count(&self) -> usize {
|
||||
self.links.len()
|
||||
}
|
||||
|
||||
/// Total number of sessions being forwarded across all links.
|
||||
pub fn session_count(&self) -> usize {
|
||||
self.links.values().map(|l| l.session_count()).sum()
|
||||
}
|
||||
|
||||
/// Insert a pre-built relay link (for testing or manual setup).
|
||||
pub fn insert(&mut self, link: RelayLink) {
|
||||
self.links.insert(link.target_addr(), link);
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Tests
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
fn addr(s: &str) -> SocketAddr {
|
||||
s.parse().unwrap()
|
||||
}
|
||||
|
||||
// ---------- RelayLink session tracking ----------
|
||||
|
||||
#[test]
|
||||
fn link_manager_tracks_sessions() {
|
||||
let mut mgr = RelayLinkManager::new();
|
||||
let target1 = addr("10.0.0.2:4433");
|
||||
|
||||
let mut link = RelayLink::stub(target1);
|
||||
link.add_session("session-aaa");
|
||||
link.add_session("session-bbb");
|
||||
mgr.insert(link);
|
||||
|
||||
assert_eq!(mgr.link_count(), 1);
|
||||
assert_eq!(mgr.session_count(), 2);
|
||||
|
||||
// Register another session on the same link
|
||||
mgr.register_session(target1, "session-ccc");
|
||||
assert_eq!(mgr.session_count(), 3);
|
||||
|
||||
// Verify individual link
|
||||
let link_ref = mgr.get(&target1).unwrap();
|
||||
assert!(link_ref.has_session("session-aaa"));
|
||||
assert!(link_ref.has_session("session-bbb"));
|
||||
assert!(link_ref.has_session("session-ccc"));
|
||||
assert!(!link_ref.has_session("unknown"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn link_manager_idle_detection() {
|
||||
let mut link = RelayLink::stub(addr("10.0.0.3:4433"));
|
||||
|
||||
// Empty link is idle
|
||||
assert!(link.is_idle());
|
||||
assert_eq!(link.session_count(), 0);
|
||||
|
||||
// Add a session — no longer idle
|
||||
link.add_session("sess-1");
|
||||
assert!(!link.is_idle());
|
||||
assert_eq!(link.session_count(), 1);
|
||||
|
||||
// Remove it — idle again
|
||||
link.remove_session("sess-1");
|
||||
assert!(link.is_idle());
|
||||
assert_eq!(link.session_count(), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn session_forward_signal_roundtrip() {
|
||||
use wzp_proto::SignalMessage;
|
||||
|
||||
// SessionForward roundtrip
|
||||
let msg = SignalMessage::SessionForward {
|
||||
session_id: "abcd1234".to_string(),
|
||||
target_fingerprint: "deadbeef".to_string(),
|
||||
source_relay: "10.0.0.1:4433".to_string(),
|
||||
};
|
||||
let json = serde_json::to_string(&msg).unwrap();
|
||||
let decoded: SignalMessage = serde_json::from_str(&json).unwrap();
|
||||
match decoded {
|
||||
SignalMessage::SessionForward {
|
||||
session_id,
|
||||
target_fingerprint,
|
||||
source_relay,
|
||||
} => {
|
||||
assert_eq!(session_id, "abcd1234");
|
||||
assert_eq!(target_fingerprint, "deadbeef");
|
||||
assert_eq!(source_relay, "10.0.0.1:4433");
|
||||
}
|
||||
_ => panic!("expected SessionForward variant"),
|
||||
}
|
||||
|
||||
// SessionForwardAck roundtrip
|
||||
let ack = SignalMessage::SessionForwardAck {
|
||||
session_id: "abcd1234".to_string(),
|
||||
room_name: "relay-room-42".to_string(),
|
||||
};
|
||||
let json = serde_json::to_string(&ack).unwrap();
|
||||
let decoded: SignalMessage = serde_json::from_str(&json).unwrap();
|
||||
match decoded {
|
||||
SignalMessage::SessionForwardAck {
|
||||
session_id,
|
||||
room_name,
|
||||
} => {
|
||||
assert_eq!(session_id, "abcd1234");
|
||||
assert_eq!(room_name, "relay-room-42");
|
||||
}
|
||||
_ => panic!("expected SessionForwardAck variant"),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn link_manager_multi_target() {
|
||||
let mut mgr = RelayLinkManager::new();
|
||||
let target_a = addr("10.0.0.2:4433");
|
||||
let target_b = addr("10.0.0.3:4433");
|
||||
let target_c = addr("10.0.0.4:4433");
|
||||
|
||||
for (target, sessions) in [
|
||||
(target_a, vec!["s1", "s2"]),
|
||||
(target_b, vec!["s3"]),
|
||||
(target_c, vec!["s4", "s5", "s6"]),
|
||||
] {
|
||||
let mut link = RelayLink::stub(target);
|
||||
for s in sessions {
|
||||
link.add_session(s);
|
||||
}
|
||||
mgr.insert(link);
|
||||
}
|
||||
|
||||
assert_eq!(mgr.link_count(), 3);
|
||||
assert_eq!(mgr.session_count(), 6); // 2 + 1 + 3
|
||||
|
||||
assert_eq!(mgr.get(&target_a).unwrap().session_count(), 2);
|
||||
assert_eq!(mgr.get(&target_b).unwrap().session_count(), 1);
|
||||
assert_eq!(mgr.get(&target_c).unwrap().session_count(), 3);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn link_manager_cleanup() {
|
||||
let mut mgr = RelayLinkManager::new();
|
||||
let target = addr("10.0.0.5:4433");
|
||||
|
||||
let mut link = RelayLink::stub(target);
|
||||
link.add_session("s1");
|
||||
link.add_session("s2");
|
||||
link.add_session("s3");
|
||||
mgr.insert(link);
|
||||
|
||||
assert_eq!(mgr.link_count(), 1);
|
||||
assert_eq!(mgr.session_count(), 3);
|
||||
|
||||
// Remove sessions one by one via the manager's mutable access.
|
||||
// We cannot call the async unregister_session with stub links here,
|
||||
// so we exercise the synchronous management path directly.
|
||||
{
|
||||
let link = mgr.get_mut(&target).unwrap();
|
||||
link.remove_session("s1");
|
||||
assert!(!link.is_idle());
|
||||
link.remove_session("s2");
|
||||
assert!(!link.is_idle());
|
||||
link.remove_session("s3");
|
||||
assert!(link.is_idle());
|
||||
}
|
||||
|
||||
// All sessions removed — link is idle
|
||||
assert_eq!(mgr.session_count(), 0);
|
||||
assert!(mgr.get(&target).unwrap().is_idle());
|
||||
|
||||
// Simulate what unregister_session does: remove the idle link
|
||||
mgr.links.remove(&target);
|
||||
assert_eq!(mgr.link_count(), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn register_session_on_nonexistent_link_is_noop() {
|
||||
let mut mgr = RelayLinkManager::new();
|
||||
// Should not panic, just warn
|
||||
mgr.register_session(addr("10.0.0.99:4433"), "orphan-session");
|
||||
assert_eq!(mgr.link_count(), 0);
|
||||
assert_eq!(mgr.session_count(), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn forward_to_nonexistent_link_errors() {
|
||||
let mgr = RelayLinkManager::new();
|
||||
let target = addr("10.0.0.99:4433");
|
||||
|
||||
let pkt = MediaPacket {
|
||||
header: wzp_proto::packet::MediaHeader {
|
||||
version: 0,
|
||||
is_repair: false,
|
||||
codec_id: wzp_proto::CodecId::Opus16k,
|
||||
has_quality_report: false,
|
||||
fec_ratio_encoded: 0,
|
||||
seq: 1,
|
||||
timestamp: 100,
|
||||
fec_block: 0,
|
||||
fec_symbol: 0,
|
||||
reserved: 0,
|
||||
csrc_count: 0,
|
||||
},
|
||||
payload: bytes::Bytes::from_static(b"test"),
|
||||
quality_report: None,
|
||||
};
|
||||
|
||||
let rt = tokio::runtime::Builder::new_current_thread()
|
||||
.build()
|
||||
.unwrap();
|
||||
let result = rt.block_on(mgr.forward_to(target, &pkt));
|
||||
assert!(result.is_err());
|
||||
assert!(result.unwrap_err().to_string().contains("no relay link"));
|
||||
}
|
||||
}
|
||||
644
crates/wzp-relay/src/room.rs
Normal file
644
crates/wzp-relay/src/room.rs
Normal file
@@ -0,0 +1,644 @@
|
||||
//! Room management for multi-party calls.
|
||||
//!
|
||||
//! Each room holds N participants. When one participant sends a media packet,
|
||||
//! the relay forwards it to all other participants in the room (SFU model).
|
||||
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::sync::atomic::{AtomicU64, Ordering};
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use bytes::Bytes;
|
||||
use tokio::sync::Mutex;
|
||||
use tracing::{error, info, warn};
|
||||
|
||||
use wzp_proto::packet::TrunkFrame;
|
||||
use wzp_proto::MediaTransport;
|
||||
|
||||
use crate::metrics::RelayMetrics;
|
||||
use crate::trunk::TrunkBatcher;
|
||||
|
||||
/// Unique participant ID within a room.
|
||||
pub type ParticipantId = u64;
|
||||
|
||||
static NEXT_PARTICIPANT_ID: AtomicU64 = AtomicU64::new(1);
|
||||
|
||||
fn next_id() -> ParticipantId {
|
||||
NEXT_PARTICIPANT_ID.fetch_add(1, Ordering::Relaxed)
|
||||
}
|
||||
|
||||
/// How to send data to a participant — either via QUIC transport or WebSocket channel.
|
||||
#[derive(Clone)]
|
||||
pub enum ParticipantSender {
|
||||
Quic(Arc<wzp_transport::QuinnTransport>),
|
||||
WebSocket(tokio::sync::mpsc::Sender<Bytes>),
|
||||
}
|
||||
|
||||
impl ParticipantSender {
|
||||
/// Send raw bytes to this participant.
|
||||
pub async fn send_raw(&self, data: &[u8]) -> Result<(), String> {
|
||||
match self {
|
||||
ParticipantSender::WebSocket(tx) => {
|
||||
tx.try_send(Bytes::copy_from_slice(data))
|
||||
.map_err(|e| format!("ws send: {e}"))
|
||||
}
|
||||
ParticipantSender::Quic(transport) => {
|
||||
let pkt = wzp_proto::MediaPacket {
|
||||
header: wzp_proto::packet::MediaHeader::default_pcm(),
|
||||
payload: Bytes::copy_from_slice(data),
|
||||
quality_report: None,
|
||||
};
|
||||
transport.send_media(&pkt).await.map_err(|e| format!("quic send: {e}"))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Check if this is a QUIC participant.
|
||||
pub fn is_quic(&self) -> bool {
|
||||
matches!(self, ParticipantSender::Quic(_))
|
||||
}
|
||||
|
||||
/// Get the QUIC transport if this is a QUIC participant.
|
||||
pub fn as_quic(&self) -> Option<&Arc<wzp_transport::QuinnTransport>> {
|
||||
match self {
|
||||
ParticipantSender::Quic(t) => Some(t),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A participant in a room.
|
||||
struct Participant {
|
||||
id: ParticipantId,
|
||||
_addr: std::net::SocketAddr,
|
||||
sender: ParticipantSender,
|
||||
}
|
||||
|
||||
/// A room holding multiple participants.
|
||||
struct Room {
|
||||
participants: Vec<Participant>,
|
||||
}
|
||||
|
||||
impl Room {
|
||||
fn new() -> Self {
|
||||
Self {
|
||||
participants: Vec::new(),
|
||||
}
|
||||
}
|
||||
|
||||
fn add(&mut self, addr: std::net::SocketAddr, sender: ParticipantSender) -> ParticipantId {
|
||||
let id = next_id();
|
||||
info!(room_size = self.participants.len() + 1, participant = id, %addr, "joined room");
|
||||
self.participants.push(Participant { id, _addr: addr, sender });
|
||||
id
|
||||
}
|
||||
|
||||
fn remove(&mut self, id: ParticipantId) {
|
||||
self.participants.retain(|p| p.id != id);
|
||||
info!(room_size = self.participants.len(), participant = id, "left room");
|
||||
}
|
||||
|
||||
fn others(&self, exclude_id: ParticipantId) -> Vec<ParticipantSender> {
|
||||
self.participants
|
||||
.iter()
|
||||
.filter(|p| p.id != exclude_id)
|
||||
.map(|p| p.sender.clone())
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn is_empty(&self) -> bool {
|
||||
self.participants.is_empty()
|
||||
}
|
||||
|
||||
fn len(&self) -> usize {
|
||||
self.participants.len()
|
||||
}
|
||||
}
|
||||
|
||||
/// Manages all rooms on the relay.
|
||||
pub struct RoomManager {
|
||||
rooms: HashMap<String, Room>,
|
||||
/// Room access control list. Maps hashed room name → allowed fingerprints.
|
||||
/// When `None`, rooms are open (no auth mode). When `Some`, only listed
|
||||
/// fingerprints can join the corresponding room.
|
||||
acl: Option<HashMap<String, HashSet<String>>>,
|
||||
}
|
||||
|
||||
impl RoomManager {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
rooms: HashMap::new(),
|
||||
acl: None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a room manager with ACL enforcement enabled.
|
||||
pub fn with_acl() -> Self {
|
||||
Self {
|
||||
rooms: HashMap::new(),
|
||||
acl: Some(HashMap::new()),
|
||||
}
|
||||
}
|
||||
|
||||
/// Grant a fingerprint access to a room.
|
||||
pub fn allow(&mut self, room_name: &str, fingerprint: &str) {
|
||||
if let Some(ref mut acl) = self.acl {
|
||||
acl.entry(room_name.to_string())
|
||||
.or_default()
|
||||
.insert(fingerprint.to_string());
|
||||
}
|
||||
}
|
||||
|
||||
/// Check if a fingerprint is authorized to join a room.
|
||||
/// Returns true if ACL is disabled (open mode) or the fingerprint is in the allow list.
|
||||
pub fn is_authorized(&self, room_name: &str, fingerprint: Option<&str>) -> bool {
|
||||
match (&self.acl, fingerprint) {
|
||||
(None, _) => true, // no ACL = open
|
||||
(Some(_), None) => false, // ACL enabled but no fingerprint
|
||||
(Some(acl), Some(fp)) => {
|
||||
// Room not in ACL = open room (allow anyone authenticated)
|
||||
match acl.get(room_name) {
|
||||
None => true,
|
||||
Some(allowed) => allowed.contains(fp),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Join a room. Returns the participant ID or an error if unauthorized.
|
||||
pub fn join(
|
||||
&mut self,
|
||||
room_name: &str,
|
||||
addr: std::net::SocketAddr,
|
||||
sender: ParticipantSender,
|
||||
fingerprint: Option<&str>,
|
||||
) -> Result<ParticipantId, String> {
|
||||
if !self.is_authorized(room_name, fingerprint) {
|
||||
warn!(room = room_name, fingerprint = ?fingerprint, "unauthorized room join attempt");
|
||||
return Err("not authorized for this room".to_string());
|
||||
}
|
||||
let room = self.rooms.entry(room_name.to_string()).or_insert_with(Room::new);
|
||||
Ok(room.add(addr, sender))
|
||||
}
|
||||
|
||||
/// Join a room via WebSocket. Convenience wrapper around `join()`.
|
||||
pub fn join_ws(
|
||||
&mut self,
|
||||
room_name: &str,
|
||||
addr: std::net::SocketAddr,
|
||||
sender: tokio::sync::mpsc::Sender<Bytes>,
|
||||
fingerprint: Option<&str>,
|
||||
) -> Result<ParticipantId, String> {
|
||||
self.join(room_name, addr, ParticipantSender::WebSocket(sender), fingerprint)
|
||||
}
|
||||
|
||||
/// Leave a room. Removes the room if empty.
|
||||
pub fn leave(&mut self, room_name: &str, participant_id: ParticipantId) {
|
||||
if let Some(room) = self.rooms.get_mut(room_name) {
|
||||
room.remove(participant_id);
|
||||
if room.is_empty() {
|
||||
self.rooms.remove(room_name);
|
||||
info!(room = room_name, "room closed (empty)");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Get senders for all OTHER participants in a room.
|
||||
pub fn others(
|
||||
&self,
|
||||
room_name: &str,
|
||||
participant_id: ParticipantId,
|
||||
) -> Vec<ParticipantSender> {
|
||||
self.rooms
|
||||
.get(room_name)
|
||||
.map(|r| r.others(participant_id))
|
||||
.unwrap_or_default()
|
||||
}
|
||||
|
||||
/// Get room size.
|
||||
pub fn room_size(&self, room_name: &str) -> usize {
|
||||
self.rooms.get(room_name).map(|r| r.len()).unwrap_or(0)
|
||||
}
|
||||
|
||||
/// List all rooms with their sizes.
|
||||
pub fn list(&self) -> Vec<(String, usize)> {
|
||||
self.rooms.iter().map(|(k, v)| (k.clone(), v.len())).collect()
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// TrunkedForwarder — wraps a transport and batches outgoing media into trunk
|
||||
// frames so multiple packets ride a single QUIC datagram.
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Wraps a [`QuinnTransport`] with a [`TrunkBatcher`] so that small media
|
||||
/// packets are accumulated and sent together in a single QUIC datagram.
|
||||
pub struct TrunkedForwarder {
|
||||
transport: Arc<wzp_transport::QuinnTransport>,
|
||||
batcher: TrunkBatcher,
|
||||
session_id: [u8; 2],
|
||||
}
|
||||
|
||||
impl TrunkedForwarder {
|
||||
/// Create a new trunked forwarder.
|
||||
///
|
||||
/// `session_id` tags every entry pushed into the batcher so the receiver
|
||||
/// can demultiplex packets by session.
|
||||
pub fn new(transport: Arc<wzp_transport::QuinnTransport>, session_id: [u8; 2]) -> Self {
|
||||
Self {
|
||||
transport,
|
||||
batcher: TrunkBatcher::new(),
|
||||
session_id,
|
||||
}
|
||||
}
|
||||
|
||||
/// Push a media packet into the batcher. If the batcher is full it will
|
||||
/// flush automatically and the resulting trunk frame is sent immediately.
|
||||
pub async fn send(&mut self, pkt: &wzp_proto::MediaPacket) -> anyhow::Result<()> {
|
||||
let payload: Bytes = pkt.to_bytes();
|
||||
if let Some(frame) = self.batcher.push(self.session_id, payload) {
|
||||
self.send_frame(&frame)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Flush any pending packets — called on the 5 ms timer tick.
|
||||
pub async fn flush(&mut self) -> anyhow::Result<()> {
|
||||
if let Some(frame) = self.batcher.flush() {
|
||||
self.send_frame(&frame)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Return the flush interval configured on the inner batcher.
|
||||
pub fn flush_interval(&self) -> Duration {
|
||||
self.batcher.flush_interval
|
||||
}
|
||||
|
||||
fn send_frame(&self, frame: &TrunkFrame) -> anyhow::Result<()> {
|
||||
self.transport.send_trunk(frame).map_err(|e| anyhow::anyhow!(e))
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// run_participant — the hot-path forwarding loop
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Run the receive loop for one participant in a room.
|
||||
/// Forwards all received packets to every other participant.
|
||||
///
|
||||
/// When `trunking_enabled` is true, outgoing packets are accumulated per-peer
|
||||
/// into [`TrunkedForwarder`]s and flushed every 5 ms or when the batcher is
|
||||
/// full, reducing QUIC datagram overhead.
|
||||
pub async fn run_participant(
|
||||
room_mgr: Arc<Mutex<RoomManager>>,
|
||||
room_name: String,
|
||||
participant_id: ParticipantId,
|
||||
transport: Arc<wzp_transport::QuinnTransport>,
|
||||
metrics: Arc<RelayMetrics>,
|
||||
session_id: &str,
|
||||
trunking_enabled: bool,
|
||||
) {
|
||||
if trunking_enabled {
|
||||
run_participant_trunked(
|
||||
room_mgr, room_name, participant_id, transport, metrics, session_id,
|
||||
)
|
||||
.await;
|
||||
} else {
|
||||
run_participant_plain(
|
||||
room_mgr, room_name, participant_id, transport, metrics, session_id,
|
||||
)
|
||||
.await;
|
||||
}
|
||||
}
|
||||
|
||||
/// Plain (non-trunked) forwarding loop — original behaviour.
|
||||
async fn run_participant_plain(
|
||||
room_mgr: Arc<Mutex<RoomManager>>,
|
||||
room_name: String,
|
||||
participant_id: ParticipantId,
|
||||
transport: Arc<wzp_transport::QuinnTransport>,
|
||||
metrics: Arc<RelayMetrics>,
|
||||
session_id: &str,
|
||||
) {
|
||||
let addr = transport.connection().remote_address();
|
||||
let mut packets_forwarded = 0u64;
|
||||
|
||||
loop {
|
||||
let pkt = match transport.recv_media().await {
|
||||
Ok(Some(pkt)) => pkt,
|
||||
Ok(None) => {
|
||||
info!(%addr, participant = participant_id, "disconnected");
|
||||
break;
|
||||
}
|
||||
Err(e) => {
|
||||
let msg = e.to_string();
|
||||
if msg.contains("timed out") || msg.contains("reset") || msg.contains("closed") {
|
||||
info!(%addr, participant = participant_id, "connection closed: {e}");
|
||||
} else {
|
||||
error!(%addr, participant = participant_id, "recv error: {e}");
|
||||
}
|
||||
break;
|
||||
}
|
||||
};
|
||||
|
||||
// Update per-session quality metrics if a quality report is present
|
||||
if let Some(ref report) = pkt.quality_report {
|
||||
metrics.update_session_quality(session_id, report);
|
||||
}
|
||||
|
||||
// Get current list of other participants
|
||||
let others = {
|
||||
let mgr = room_mgr.lock().await;
|
||||
mgr.others(&room_name, participant_id)
|
||||
};
|
||||
|
||||
// Forward to all others
|
||||
let pkt_bytes = pkt.payload.len() as u64;
|
||||
for other in &others {
|
||||
match other {
|
||||
ParticipantSender::Quic(t) => {
|
||||
let _ = t.send_media(&pkt).await;
|
||||
}
|
||||
ParticipantSender::WebSocket(_) => {
|
||||
// WS clients receive raw payload bytes
|
||||
let _ = other.send_raw(&pkt.payload).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let fan_out = others.len() as u64;
|
||||
metrics.packets_forwarded.inc_by(fan_out);
|
||||
metrics.bytes_forwarded.inc_by(pkt_bytes * fan_out);
|
||||
packets_forwarded += 1;
|
||||
if packets_forwarded % 500 == 0 {
|
||||
let room_size = {
|
||||
let mgr = room_mgr.lock().await;
|
||||
mgr.room_size(&room_name)
|
||||
};
|
||||
info!(
|
||||
room = %room_name,
|
||||
participant = participant_id,
|
||||
forwarded = packets_forwarded,
|
||||
room_size,
|
||||
"participant stats"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// Clean up
|
||||
let mut mgr = room_mgr.lock().await;
|
||||
mgr.leave(&room_name, participant_id);
|
||||
}
|
||||
|
||||
/// Trunked forwarding loop — batches outgoing packets per peer.
|
||||
async fn run_participant_trunked(
|
||||
room_mgr: Arc<Mutex<RoomManager>>,
|
||||
room_name: String,
|
||||
participant_id: ParticipantId,
|
||||
transport: Arc<wzp_transport::QuinnTransport>,
|
||||
metrics: Arc<RelayMetrics>,
|
||||
session_id: &str,
|
||||
) {
|
||||
use std::collections::HashMap;
|
||||
|
||||
let addr = transport.connection().remote_address();
|
||||
let mut packets_forwarded = 0u64;
|
||||
|
||||
// Per-peer TrunkedForwarders, keyed by the raw pointer of the peer
|
||||
// transport (stable for the Arc's lifetime). We use the remote address
|
||||
// string as the key since it is unique per connection.
|
||||
let mut forwarders: HashMap<std::net::SocketAddr, TrunkedForwarder> = HashMap::new();
|
||||
|
||||
// Derive a 2-byte session tag from the session_id hex string.
|
||||
let sid_bytes: [u8; 2] = parse_session_id_bytes(session_id);
|
||||
|
||||
let mut flush_interval = tokio::time::interval(Duration::from_millis(5));
|
||||
// Don't let missed ticks pile up — skip them and move on.
|
||||
flush_interval.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Skip);
|
||||
|
||||
loop {
|
||||
tokio::select! {
|
||||
biased;
|
||||
|
||||
result = transport.recv_media() => {
|
||||
let pkt = match result {
|
||||
Ok(Some(pkt)) => pkt,
|
||||
Ok(None) => {
|
||||
info!(%addr, participant = participant_id, "disconnected");
|
||||
break;
|
||||
}
|
||||
Err(e) => {
|
||||
error!(%addr, participant = participant_id, "recv error: {e}");
|
||||
break;
|
||||
}
|
||||
};
|
||||
|
||||
if let Some(ref report) = pkt.quality_report {
|
||||
metrics.update_session_quality(session_id, report);
|
||||
}
|
||||
|
||||
let others = {
|
||||
let mgr = room_mgr.lock().await;
|
||||
mgr.others(&room_name, participant_id)
|
||||
};
|
||||
|
||||
let pkt_bytes = pkt.payload.len() as u64;
|
||||
for other in &others {
|
||||
match other {
|
||||
ParticipantSender::Quic(t) => {
|
||||
let peer_addr = t.connection().remote_address();
|
||||
let fwd = forwarders
|
||||
.entry(peer_addr)
|
||||
.or_insert_with(|| TrunkedForwarder::new(t.clone(), sid_bytes));
|
||||
if let Err(e) = fwd.send(&pkt).await {
|
||||
let _ = e;
|
||||
}
|
||||
}
|
||||
ParticipantSender::WebSocket(_) => {
|
||||
// WS clients bypass trunking — send raw payload directly
|
||||
let _ = other.send_raw(&pkt.payload).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let fan_out = others.len() as u64;
|
||||
metrics.packets_forwarded.inc_by(fan_out);
|
||||
metrics.bytes_forwarded.inc_by(pkt_bytes * fan_out);
|
||||
packets_forwarded += 1;
|
||||
if packets_forwarded % 500 == 0 {
|
||||
let room_size = {
|
||||
let mgr = room_mgr.lock().await;
|
||||
mgr.room_size(&room_name)
|
||||
};
|
||||
info!(
|
||||
room = %room_name,
|
||||
participant = participant_id,
|
||||
forwarded = packets_forwarded,
|
||||
room_size,
|
||||
"participant stats (trunked)"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
_ = flush_interval.tick() => {
|
||||
for fwd in forwarders.values_mut() {
|
||||
if let Err(e) = fwd.flush().await {
|
||||
let _ = e;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Final flush — send any remaining buffered packets.
|
||||
for fwd in forwarders.values_mut() {
|
||||
let _ = fwd.flush().await;
|
||||
}
|
||||
|
||||
let mut mgr = room_mgr.lock().await;
|
||||
mgr.leave(&room_name, participant_id);
|
||||
}
|
||||
|
||||
/// Parse up to the first 2 bytes of a hex session-id string into `[u8; 2]`.
|
||||
fn parse_session_id_bytes(session_id: &str) -> [u8; 2] {
|
||||
let bytes: Vec<u8> = (0..session_id.len())
|
||||
.step_by(2)
|
||||
.filter_map(|i| u8::from_str_radix(session_id.get(i..i + 2)?, 16).ok())
|
||||
.collect();
|
||||
let mut out = [0u8; 2];
|
||||
for (i, b) in bytes.iter().take(2).enumerate() {
|
||||
out[i] = *b;
|
||||
}
|
||||
out
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn room_join_leave() {
|
||||
let mut mgr = RoomManager::new();
|
||||
assert_eq!(mgr.room_size("test"), 0);
|
||||
assert!(mgr.list().is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn acl_open_mode_allows_all() {
|
||||
let mgr = RoomManager::new();
|
||||
assert!(mgr.is_authorized("any-room", None));
|
||||
assert!(mgr.is_authorized("any-room", Some("abc")));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn acl_enforced_requires_fingerprint() {
|
||||
let mgr = RoomManager::with_acl();
|
||||
assert!(!mgr.is_authorized("room1", None));
|
||||
// Room not in ACL = open to any authenticated user
|
||||
assert!(mgr.is_authorized("room1", Some("abc")));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn acl_restricts_to_allowed() {
|
||||
let mut mgr = RoomManager::with_acl();
|
||||
mgr.allow("room1", "alice");
|
||||
mgr.allow("room1", "bob");
|
||||
assert!(mgr.is_authorized("room1", Some("alice")));
|
||||
assert!(mgr.is_authorized("room1", Some("bob")));
|
||||
assert!(!mgr.is_authorized("room1", Some("eve")));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parse_session_id_bytes_works() {
|
||||
assert_eq!(parse_session_id_bytes("abcd"), [0xab, 0xcd]);
|
||||
assert_eq!(parse_session_id_bytes("ff00"), [0xff, 0x00]);
|
||||
assert_eq!(parse_session_id_bytes(""), [0x00, 0x00]);
|
||||
// Longer hex strings: only first 2 bytes taken
|
||||
assert_eq!(parse_session_id_bytes("aabbccdd"), [0xaa, 0xbb]);
|
||||
}
|
||||
|
||||
/// Helper: create a minimal MediaPacket with the given payload bytes.
|
||||
fn make_test_packet(payload: &[u8]) -> wzp_proto::MediaPacket {
|
||||
wzp_proto::MediaPacket {
|
||||
header: wzp_proto::packet::MediaHeader {
|
||||
version: 0,
|
||||
is_repair: false,
|
||||
codec_id: wzp_proto::CodecId::Opus16k,
|
||||
has_quality_report: false,
|
||||
fec_ratio_encoded: 0,
|
||||
seq: 1,
|
||||
timestamp: 100,
|
||||
fec_block: 0,
|
||||
fec_symbol: 0,
|
||||
reserved: 0,
|
||||
csrc_count: 0,
|
||||
},
|
||||
payload: Bytes::from(payload.to_vec()),
|
||||
quality_report: None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Push 3 packets into a batcher (simulating TrunkedForwarder.send),
|
||||
/// then flush and verify all 3 appear in a single TrunkFrame.
|
||||
#[test]
|
||||
fn trunked_forwarder_batches() {
|
||||
let session_id: [u8; 2] = [0x00, 0x01];
|
||||
let mut batcher = TrunkBatcher::new();
|
||||
// Ensure max_entries is high enough that 3 packets don't auto-flush.
|
||||
batcher.max_entries = 10;
|
||||
batcher.max_bytes = 4096;
|
||||
|
||||
let pkts = [
|
||||
make_test_packet(b"aaa"),
|
||||
make_test_packet(b"bbb"),
|
||||
make_test_packet(b"ccc"),
|
||||
];
|
||||
|
||||
for pkt in &pkts {
|
||||
let payload = pkt.to_bytes();
|
||||
let flushed = batcher.push(session_id, payload);
|
||||
// Should NOT auto-flush — we are below max_entries.
|
||||
assert!(flushed.is_none(), "unexpected auto-flush");
|
||||
}
|
||||
|
||||
// Explicit flush (simulates the 5 ms timer tick).
|
||||
let frame = batcher.flush().expect("expected a frame with 3 entries");
|
||||
assert_eq!(frame.len(), 3);
|
||||
for entry in &frame.packets {
|
||||
assert_eq!(entry.session_id, session_id);
|
||||
}
|
||||
}
|
||||
|
||||
/// Push exactly max_entries packets and verify the batcher auto-flushes
|
||||
/// on the last push (simulating TrunkedForwarder.send triggering a send).
|
||||
#[test]
|
||||
fn trunked_forwarder_auto_flushes() {
|
||||
let session_id: [u8; 2] = [0x00, 0x02];
|
||||
let mut batcher = TrunkBatcher::new();
|
||||
batcher.max_entries = 5;
|
||||
batcher.max_bytes = 8192;
|
||||
|
||||
let pkt = make_test_packet(b"hello");
|
||||
let mut auto_flushed: Option<wzp_proto::packet::TrunkFrame> = None;
|
||||
|
||||
for i in 0..5 {
|
||||
let payload = pkt.to_bytes();
|
||||
if let Some(frame) = batcher.push(session_id, payload) {
|
||||
assert!(auto_flushed.is_none(), "should auto-flush exactly once");
|
||||
auto_flushed = Some(frame);
|
||||
// The auto-flush should happen on the 5th push (max_entries = 5).
|
||||
assert_eq!(i, 4, "expected auto-flush on the last push");
|
||||
}
|
||||
}
|
||||
|
||||
let frame = auto_flushed.expect("batcher should have auto-flushed at max_entries");
|
||||
assert_eq!(frame.len(), 5);
|
||||
for entry in &frame.packets {
|
||||
assert_eq!(entry.session_id, session_id);
|
||||
}
|
||||
|
||||
// Batcher should now be empty — nothing to flush.
|
||||
assert!(batcher.flush().is_none());
|
||||
}
|
||||
}
|
||||
265
crates/wzp-relay/src/route.rs
Normal file
265
crates/wzp-relay/src/route.rs
Normal file
@@ -0,0 +1,265 @@
|
||||
//! Route resolution — given a target fingerprint, find the relay chain
|
||||
//! needed to reach that user.
|
||||
//!
|
||||
//! Uses the [`PresenceRegistry`] as its data source. Currently supports
|
||||
//! single-hop resolution (local or direct peer). The `resolve_multi_hop`
|
||||
//! method has the signature for future multi-hop expansion but falls back
|
||||
//! to single-hop for now.
|
||||
|
||||
use std::net::SocketAddr;
|
||||
|
||||
use serde::Serialize;
|
||||
|
||||
use crate::presence::{PresenceLocation, PresenceRegistry};
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Route type
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// The resolved route to a target fingerprint.
|
||||
#[derive(Clone, Debug, PartialEq, Eq, Serialize)]
|
||||
pub enum Route {
|
||||
/// Target is connected to this relay directly.
|
||||
Local,
|
||||
/// Target is on a directly connected peer relay.
|
||||
DirectPeer(SocketAddr),
|
||||
/// Target is reachable via a chain of relays (multi-hop).
|
||||
Chain(Vec<SocketAddr>),
|
||||
/// Target not found in any known relay.
|
||||
NotFound,
|
||||
}
|
||||
|
||||
impl std::fmt::Display for Route {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
Route::Local => write!(f, "local"),
|
||||
Route::DirectPeer(addr) => write!(f, "direct_peer({})", addr),
|
||||
Route::Chain(chain) => {
|
||||
let addrs: Vec<String> = chain.iter().map(|a| a.to_string()).collect();
|
||||
write!(f, "chain({})", addrs.join(" -> "))
|
||||
}
|
||||
Route::NotFound => write!(f, "not_found"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// RouteResolver
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Resolves fingerprints to relay routes using the presence registry.
|
||||
pub struct RouteResolver {
|
||||
/// Our own relay address (how peers know us).
|
||||
local_addr: SocketAddr,
|
||||
}
|
||||
|
||||
impl RouteResolver {
|
||||
/// Create a new route resolver for the relay at `local_addr`.
|
||||
pub fn new(local_addr: SocketAddr) -> Self {
|
||||
Self { local_addr }
|
||||
}
|
||||
|
||||
/// Our local relay address.
|
||||
pub fn local_addr(&self) -> SocketAddr {
|
||||
self.local_addr
|
||||
}
|
||||
|
||||
/// Look up a fingerprint in the registry and return the route.
|
||||
///
|
||||
/// - If `registry.lookup()` returns `Local` -> `Route::Local`
|
||||
/// - If returns `Remote(addr)` -> `Route::DirectPeer(addr)`
|
||||
/// - If not found -> `Route::NotFound`
|
||||
pub fn resolve(&self, registry: &PresenceRegistry, target_fingerprint: &str) -> Route {
|
||||
match registry.lookup(target_fingerprint) {
|
||||
Some(PresenceLocation::Local) => Route::Local,
|
||||
Some(PresenceLocation::Remote(addr)) => Route::DirectPeer(addr),
|
||||
None => Route::NotFound,
|
||||
}
|
||||
}
|
||||
|
||||
/// Multi-hop route resolution (future expansion).
|
||||
///
|
||||
/// For now this is equivalent to `resolve()` — single-hop only.
|
||||
/// When multi-hop is implemented, this will query peers transitively
|
||||
/// up to `max_hops` relays deep, using `RouteQuery` / `RouteResponse`
|
||||
/// signals over probe connections.
|
||||
pub fn resolve_multi_hop(
|
||||
&self,
|
||||
registry: &PresenceRegistry,
|
||||
target: &str,
|
||||
_max_hops: usize,
|
||||
) -> Route {
|
||||
// Phase 1: single-hop only (same as resolve).
|
||||
// Future: if resolve returns NotFound and max_hops > 0,
|
||||
// send RouteQuery to each known peer with ttl = max_hops - 1,
|
||||
// collect RouteResponse, and build a Chain.
|
||||
self.resolve(registry, target)
|
||||
}
|
||||
|
||||
/// Build a JSON-serializable route response for the HTTP API.
|
||||
pub fn route_json(
|
||||
&self,
|
||||
fingerprint: &str,
|
||||
route: &Route,
|
||||
) -> serde_json::Value {
|
||||
let (route_type, relay_chain) = match route {
|
||||
Route::Local => ("local", vec![self.local_addr.to_string()]),
|
||||
Route::DirectPeer(addr) => ("direct_peer", vec![self.local_addr.to_string(), addr.to_string()]),
|
||||
Route::Chain(chain) => {
|
||||
let mut addrs = vec![self.local_addr.to_string()];
|
||||
addrs.extend(chain.iter().map(|a| a.to_string()));
|
||||
("chain", addrs)
|
||||
}
|
||||
Route::NotFound => ("not_found", vec![]),
|
||||
};
|
||||
|
||||
serde_json::json!({
|
||||
"fingerprint": fingerprint,
|
||||
"route": route_type,
|
||||
"relay_chain": relay_chain,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Tests
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use std::collections::HashSet;
|
||||
use std::net::SocketAddr;
|
||||
|
||||
fn addr(s: &str) -> SocketAddr {
|
||||
s.parse().unwrap()
|
||||
}
|
||||
|
||||
fn make_resolver() -> RouteResolver {
|
||||
RouteResolver::new(addr("10.0.0.1:4433"))
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn resolve_local() {
|
||||
let resolver = make_resolver();
|
||||
let mut reg = PresenceRegistry::new();
|
||||
reg.register_local("aabbccdd", Some("alice".into()), Some("room1".into()));
|
||||
|
||||
let route = resolver.resolve(®, "aabbccdd");
|
||||
assert_eq!(route, Route::Local);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn resolve_direct_peer() {
|
||||
let resolver = make_resolver();
|
||||
let mut reg = PresenceRegistry::new();
|
||||
let peer = addr("10.0.0.2:4433");
|
||||
let mut fps = HashSet::new();
|
||||
fps.insert("deadbeef".to_string());
|
||||
reg.update_peer(peer, fps);
|
||||
|
||||
let route = resolver.resolve(®, "deadbeef");
|
||||
assert_eq!(route, Route::DirectPeer(peer));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn resolve_not_found() {
|
||||
let resolver = make_resolver();
|
||||
let reg = PresenceRegistry::new();
|
||||
|
||||
let route = resolver.resolve(®, "unknown_fp");
|
||||
assert_eq!(route, Route::NotFound);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn resolve_multi_hop_fallback() {
|
||||
// multi-hop currently falls back to single-hop behavior
|
||||
let resolver = make_resolver();
|
||||
let mut reg = PresenceRegistry::new();
|
||||
reg.register_local("local_fp", None, None);
|
||||
|
||||
let peer = addr("10.0.0.3:4433");
|
||||
let mut fps = HashSet::new();
|
||||
fps.insert("remote_fp".to_string());
|
||||
reg.update_peer(peer, fps);
|
||||
|
||||
// Local lookup works via multi-hop
|
||||
assert_eq!(resolver.resolve_multi_hop(®, "local_fp", 3), Route::Local);
|
||||
// Remote lookup works via multi-hop
|
||||
assert_eq!(
|
||||
resolver.resolve_multi_hop(®, "remote_fp", 3),
|
||||
Route::DirectPeer(peer)
|
||||
);
|
||||
// Not-found works via multi-hop
|
||||
assert_eq!(
|
||||
resolver.resolve_multi_hop(®, "nobody", 3),
|
||||
Route::NotFound
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn route_query_signal_roundtrip() {
|
||||
use wzp_proto::SignalMessage;
|
||||
|
||||
let query = SignalMessage::RouteQuery {
|
||||
fingerprint: "aabbccdd".to_string(),
|
||||
ttl: 3,
|
||||
};
|
||||
let json = serde_json::to_string(&query).unwrap();
|
||||
let decoded: SignalMessage = serde_json::from_str(&json).unwrap();
|
||||
assert!(matches!(
|
||||
decoded,
|
||||
SignalMessage::RouteQuery { ref fingerprint, ttl }
|
||||
if fingerprint == "aabbccdd" && ttl == 3
|
||||
));
|
||||
|
||||
let response = SignalMessage::RouteResponse {
|
||||
fingerprint: "aabbccdd".to_string(),
|
||||
found: true,
|
||||
relay_chain: vec!["10.0.0.1:4433".to_string(), "10.0.0.2:4433".to_string()],
|
||||
};
|
||||
let json = serde_json::to_string(&response).unwrap();
|
||||
let decoded: SignalMessage = serde_json::from_str(&json).unwrap();
|
||||
assert!(matches!(
|
||||
decoded,
|
||||
SignalMessage::RouteResponse { ref fingerprint, found, ref relay_chain }
|
||||
if fingerprint == "aabbccdd" && found && relay_chain.len() == 2
|
||||
));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn route_display() {
|
||||
assert_eq!(Route::Local.to_string(), "local");
|
||||
assert_eq!(
|
||||
Route::DirectPeer(addr("10.0.0.2:4433")).to_string(),
|
||||
"direct_peer(10.0.0.2:4433)"
|
||||
);
|
||||
assert_eq!(
|
||||
Route::Chain(vec![addr("10.0.0.2:4433"), addr("10.0.0.3:4433")]).to_string(),
|
||||
"chain(10.0.0.2:4433 -> 10.0.0.3:4433)"
|
||||
);
|
||||
assert_eq!(Route::NotFound.to_string(), "not_found");
|
||||
|
||||
// Debug is also useful
|
||||
let debug = format!("{:?}", Route::Local);
|
||||
assert!(debug.contains("Local"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn route_json_output() {
|
||||
let resolver = make_resolver();
|
||||
|
||||
let json = resolver.route_json("fp1", &Route::Local);
|
||||
assert_eq!(json["route"], "local");
|
||||
assert_eq!(json["fingerprint"], "fp1");
|
||||
assert_eq!(json["relay_chain"].as_array().unwrap().len(), 1);
|
||||
|
||||
let json = resolver.route_json("fp2", &Route::DirectPeer(addr("10.0.0.2:4433")));
|
||||
assert_eq!(json["route"], "direct_peer");
|
||||
assert_eq!(json["relay_chain"].as_array().unwrap().len(), 2);
|
||||
|
||||
let json = resolver.route_json("fp3", &Route::NotFound);
|
||||
assert_eq!(json["route"], "not_found");
|
||||
assert_eq!(json["relay_chain"].as_array().unwrap().len(), 0);
|
||||
}
|
||||
}
|
||||
@@ -1,6 +1,7 @@
|
||||
//! Session manager — tracks active call sessions on the relay.
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::time::Instant;
|
||||
|
||||
use wzp_proto::{QualityProfile, Session};
|
||||
|
||||
@@ -9,6 +10,26 @@ use crate::pipeline::{PipelineConfig, RelayPipeline};
|
||||
/// Unique identifier for a relay session.
|
||||
pub type SessionId = [u8; 16];
|
||||
|
||||
/// Lifecycle state of a concurrent session.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub enum SessionState {
|
||||
Active,
|
||||
Closing,
|
||||
}
|
||||
|
||||
/// Lightweight metadata for a concurrent session (room-mode tracking).
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct SessionInfo {
|
||||
/// Which room this session belongs to.
|
||||
pub room_name: String,
|
||||
/// Client fingerprint (present when auth is enabled).
|
||||
pub fingerprint: Option<String>,
|
||||
/// When the session was created.
|
||||
pub connected_at: Instant,
|
||||
/// Current lifecycle state.
|
||||
pub state: SessionState,
|
||||
}
|
||||
|
||||
/// A single active call session on the relay.
|
||||
pub struct RelaySession {
|
||||
/// Protocol session state machine.
|
||||
@@ -47,8 +68,14 @@ impl RelaySession {
|
||||
}
|
||||
|
||||
/// Manages all active sessions on a relay.
|
||||
///
|
||||
/// Combines two layers of tracking:
|
||||
/// - `sessions`: heavy `RelaySession` objects (pipeline state machines, used in forward mode)
|
||||
/// - `tracked`: lightweight `SessionInfo` entries (room + fingerprint, used in room mode to
|
||||
/// enforce `max_sessions` and answer lifecycle queries)
|
||||
pub struct SessionManager {
|
||||
sessions: HashMap<SessionId, RelaySession>,
|
||||
tracked: HashMap<SessionId, SessionInfo>,
|
||||
max_sessions: usize,
|
||||
}
|
||||
|
||||
@@ -56,17 +83,20 @@ impl SessionManager {
|
||||
pub fn new(max_sessions: usize) -> Self {
|
||||
Self {
|
||||
sessions: HashMap::new(),
|
||||
tracked: HashMap::new(),
|
||||
max_sessions,
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a new session. Returns None if at capacity.
|
||||
pub fn create_session(
|
||||
// ── Heavy session API (forward-mode pipelines) ──────────────────────
|
||||
|
||||
/// Create a new pipeline session. Returns None if at capacity.
|
||||
pub fn create_pipeline_session(
|
||||
&mut self,
|
||||
session_id: SessionId,
|
||||
config: PipelineConfig,
|
||||
) -> Option<&mut RelaySession> {
|
||||
if self.sessions.len() >= self.max_sessions {
|
||||
if self.total_count() >= self.max_sessions {
|
||||
return None;
|
||||
}
|
||||
self.sessions
|
||||
@@ -75,53 +105,124 @@ impl SessionManager {
|
||||
self.sessions.get_mut(&session_id)
|
||||
}
|
||||
|
||||
/// Get a session by ID.
|
||||
/// Get a pipeline session by ID.
|
||||
pub fn get_session(&mut self, id: &SessionId) -> Option<&mut RelaySession> {
|
||||
self.sessions.get_mut(id)
|
||||
}
|
||||
|
||||
/// Remove a session.
|
||||
pub fn remove_session(&mut self, id: &SessionId) -> Option<RelaySession> {
|
||||
/// Remove a pipeline session.
|
||||
pub fn remove_pipeline_session(&mut self, id: &SessionId) -> Option<RelaySession> {
|
||||
self.sessions.remove(id)
|
||||
}
|
||||
|
||||
/// Number of active sessions.
|
||||
pub fn active_count(&self) -> usize {
|
||||
/// Number of active pipeline sessions.
|
||||
pub fn pipeline_active_count(&self) -> usize {
|
||||
self.sessions.values().filter(|s| s.is_active()).count()
|
||||
}
|
||||
|
||||
/// Total sessions (including inactive/closing).
|
||||
pub fn total_count(&self) -> usize {
|
||||
/// Total pipeline sessions (including inactive/closing).
|
||||
pub fn pipeline_total_count(&self) -> usize {
|
||||
self.sessions.len()
|
||||
}
|
||||
|
||||
/// Remove sessions idle for longer than `timeout_ms`.
|
||||
/// Remove pipeline sessions idle for longer than `timeout_ms`.
|
||||
pub fn expire_idle(&mut self, now_ms: u64, timeout_ms: u64) -> usize {
|
||||
let before = self.sessions.len();
|
||||
self.sessions
|
||||
.retain(|_, s| now_ms.saturating_sub(s.last_activity_ms) < timeout_ms);
|
||||
before - self.sessions.len()
|
||||
}
|
||||
|
||||
// ── Lightweight concurrent-session API (room mode) ──────────────────
|
||||
|
||||
/// Register a new concurrent session.
|
||||
/// Returns the `SessionId` on success, or an error string if `max_sessions` is exceeded.
|
||||
pub fn create_session(
|
||||
&mut self,
|
||||
room: &str,
|
||||
fingerprint: Option<String>,
|
||||
) -> Result<SessionId, String> {
|
||||
if self.total_count() >= self.max_sessions {
|
||||
return Err(format!(
|
||||
"max sessions ({}) exceeded",
|
||||
self.max_sessions
|
||||
));
|
||||
}
|
||||
let id = rand_session_id();
|
||||
self.tracked.insert(id, SessionInfo {
|
||||
room_name: room.to_string(),
|
||||
fingerprint,
|
||||
connected_at: Instant::now(),
|
||||
state: SessionState::Active,
|
||||
});
|
||||
Ok(id)
|
||||
}
|
||||
|
||||
/// Remove a tracked session.
|
||||
pub fn remove_session(&mut self, id: SessionId) {
|
||||
self.tracked.remove(&id);
|
||||
}
|
||||
|
||||
/// Number of currently tracked (room-mode) sessions.
|
||||
pub fn active_count(&self) -> usize {
|
||||
self.tracked.values().filter(|s| s.state == SessionState::Active).count()
|
||||
}
|
||||
|
||||
/// Return all session IDs that belong to a given room.
|
||||
pub fn sessions_in_room(&self, room: &str) -> Vec<SessionId> {
|
||||
self.tracked
|
||||
.iter()
|
||||
.filter(|(_, info)| info.room_name == room)
|
||||
.map(|(id, _)| *id)
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Get metadata for a tracked session.
|
||||
pub fn session_info(&self, id: SessionId) -> Option<&SessionInfo> {
|
||||
self.tracked.get(&id)
|
||||
}
|
||||
|
||||
/// Total sessions across both tracking layers.
|
||||
pub fn total_count(&self) -> usize {
|
||||
self.sessions.len() + self.tracked.len()
|
||||
}
|
||||
}
|
||||
|
||||
/// Generate a random 16-byte session identifier.
|
||||
fn rand_session_id() -> SessionId {
|
||||
let mut id = [0u8; 16];
|
||||
// Use a simple monotonic + random source to avoid pulling in `rand` crate.
|
||||
// Hash the instant + a counter for uniqueness.
|
||||
use std::sync::atomic::{AtomicU64, Ordering};
|
||||
static CTR: AtomicU64 = AtomicU64::new(1);
|
||||
let ctr = CTR.fetch_add(1, Ordering::Relaxed);
|
||||
let bytes = ctr.to_le_bytes();
|
||||
id[..8].copy_from_slice(&bytes);
|
||||
// Mix in some time-based entropy for the upper half.
|
||||
let t = Instant::now().elapsed().as_nanos() as u64;
|
||||
id[8..16].copy_from_slice(&t.to_le_bytes());
|
||||
id
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
// ── Pipeline session tests (pre-existing, adapted to renamed API) ───
|
||||
|
||||
#[test]
|
||||
fn create_and_get_session() {
|
||||
fn create_and_get_pipeline_session() {
|
||||
let mut mgr = SessionManager::new(10);
|
||||
let id = [1u8; 16];
|
||||
mgr.create_session(id, PipelineConfig::default());
|
||||
assert_eq!(mgr.total_count(), 1);
|
||||
mgr.create_pipeline_session(id, PipelineConfig::default());
|
||||
assert!(mgr.get_session(&id).is_some());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn respects_max_sessions() {
|
||||
fn respects_max_pipeline_sessions() {
|
||||
let mut mgr = SessionManager::new(1);
|
||||
mgr.create_session([1u8; 16], PipelineConfig::default());
|
||||
let result = mgr.create_session([2u8; 16], PipelineConfig::default());
|
||||
mgr.create_pipeline_session([1u8; 16], PipelineConfig::default());
|
||||
let result = mgr.create_pipeline_session([2u8; 16], PipelineConfig::default());
|
||||
assert!(result.is_none());
|
||||
}
|
||||
|
||||
@@ -129,10 +230,73 @@ mod tests {
|
||||
fn expire_idle_removes_old() {
|
||||
let mut mgr = SessionManager::new(10);
|
||||
let id = [1u8; 16];
|
||||
mgr.create_session(id, PipelineConfig::default());
|
||||
// Session has last_activity_ms = 0, current time = 60000, timeout = 30000
|
||||
mgr.create_pipeline_session(id, PipelineConfig::default());
|
||||
let expired = mgr.expire_idle(60_000, 30_000);
|
||||
assert_eq!(expired, 1);
|
||||
assert_eq!(mgr.total_count(), 0);
|
||||
assert_eq!(mgr.pipeline_total_count(), 0);
|
||||
}
|
||||
|
||||
// ── Concurrent session (room-mode) tests ────────────────────────────
|
||||
|
||||
#[test]
|
||||
fn create_and_remove() {
|
||||
let mut mgr = SessionManager::new(10);
|
||||
let id = mgr.create_session("room-a", Some("fp123".into())).unwrap();
|
||||
assert_eq!(mgr.active_count(), 1);
|
||||
mgr.remove_session(id);
|
||||
assert_eq!(mgr.active_count(), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn max_sessions_enforced() {
|
||||
let mut mgr = SessionManager::new(2);
|
||||
mgr.create_session("r1", None).unwrap();
|
||||
mgr.create_session("r2", None).unwrap();
|
||||
let err = mgr.create_session("r3", None);
|
||||
assert!(err.is_err());
|
||||
assert!(err.unwrap_err().contains("max sessions"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn sessions_in_room_tracking() {
|
||||
let mut mgr = SessionManager::new(10);
|
||||
let a1 = mgr.create_session("alpha", None).unwrap();
|
||||
let _a2 = mgr.create_session("alpha", None).unwrap();
|
||||
let _b1 = mgr.create_session("beta", None).unwrap();
|
||||
|
||||
let alpha_ids = mgr.sessions_in_room("alpha");
|
||||
assert_eq!(alpha_ids.len(), 2);
|
||||
assert!(alpha_ids.contains(&a1));
|
||||
|
||||
let beta_ids = mgr.sessions_in_room("beta");
|
||||
assert_eq!(beta_ids.len(), 1);
|
||||
|
||||
let empty = mgr.sessions_in_room("gamma");
|
||||
assert!(empty.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn session_info_returns_correct_data() {
|
||||
let mut mgr = SessionManager::new(10);
|
||||
let id = mgr.create_session("room-x", Some("alice-fp".into())).unwrap();
|
||||
|
||||
let info = mgr.session_info(id).expect("session should exist");
|
||||
assert_eq!(info.room_name, "room-x");
|
||||
assert_eq!(info.fingerprint.as_deref(), Some("alice-fp"));
|
||||
assert_eq!(info.state, SessionState::Active);
|
||||
|
||||
// Non-existent session returns None
|
||||
assert!(mgr.session_info([0xFFu8; 16]).is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn max_sessions_shared_across_both_layers() {
|
||||
let mut mgr = SessionManager::new(2);
|
||||
// One pipeline session + one tracked session = 2 = at capacity
|
||||
mgr.create_pipeline_session([1u8; 16], PipelineConfig::default());
|
||||
mgr.create_session("room", None).unwrap();
|
||||
// Both layers should now reject
|
||||
assert!(mgr.create_session("room", None).is_err());
|
||||
assert!(mgr.create_pipeline_session([2u8; 16], PipelineConfig::default()).is_none());
|
||||
}
|
||||
}
|
||||
|
||||
152
crates/wzp-relay/src/trunk.rs
Normal file
152
crates/wzp-relay/src/trunk.rs
Normal file
@@ -0,0 +1,152 @@
|
||||
//! Trunk batching — accumulates media packets from multiple sessions into
|
||||
//! [`TrunkFrame`]s that fit inside a single QUIC datagram.
|
||||
|
||||
use std::time::Duration;
|
||||
|
||||
use bytes::Bytes;
|
||||
use wzp_proto::packet::{TrunkEntry, TrunkFrame};
|
||||
|
||||
/// Batches individual session packets into [`TrunkFrame`]s.
|
||||
///
|
||||
/// A trunk frame is flushed when any of the following thresholds are hit:
|
||||
/// - `max_entries` — maximum number of packets per trunk.
|
||||
/// - `max_bytes` — maximum total wire size (should fit one UDP datagram).
|
||||
///
|
||||
/// The caller is responsible for timer-based flushing using [`flush_interval`]
|
||||
/// and calling [`flush`] when the interval expires.
|
||||
pub struct TrunkBatcher {
|
||||
pending: TrunkFrame,
|
||||
/// Current accumulated wire size of the pending frame.
|
||||
pending_bytes: usize,
|
||||
/// Maximum packets per trunk (default 10).
|
||||
pub max_entries: usize,
|
||||
/// Maximum total wire bytes per trunk (default 1200, fits in one UDP datagram).
|
||||
pub max_bytes: usize,
|
||||
/// Maximum wait before flushing (default 5 ms). Used by the caller for timer scheduling.
|
||||
pub flush_interval: Duration,
|
||||
}
|
||||
|
||||
impl TrunkBatcher {
|
||||
/// Header size: the 2-byte count prefix present in every TrunkFrame.
|
||||
const FRAME_HEADER: usize = 2;
|
||||
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
pending: TrunkFrame::new(),
|
||||
pending_bytes: Self::FRAME_HEADER,
|
||||
max_entries: 10,
|
||||
max_bytes: 1200,
|
||||
flush_interval: Duration::from_millis(5),
|
||||
}
|
||||
}
|
||||
|
||||
/// Push a session packet. Returns `Some(frame)` if the batch is now full
|
||||
/// and was flushed, `None` if more room remains.
|
||||
pub fn push(&mut self, session_id: [u8; 2], payload: Bytes) -> Option<TrunkFrame> {
|
||||
let entry_wire = TrunkEntry::OVERHEAD + payload.len();
|
||||
|
||||
// If adding this entry would exceed limits, flush first.
|
||||
if self.should_flush_with(entry_wire) && !self.pending.is_empty() {
|
||||
let frame = self.take_pending();
|
||||
// Then start a new batch with this entry.
|
||||
self.pending.push(session_id, payload);
|
||||
self.pending_bytes += entry_wire;
|
||||
return Some(frame);
|
||||
}
|
||||
|
||||
self.pending.push(session_id, payload);
|
||||
self.pending_bytes += entry_wire;
|
||||
|
||||
if self.should_flush() {
|
||||
Some(self.take_pending())
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
/// Flush the current pending frame if non-empty.
|
||||
pub fn flush(&mut self) -> Option<TrunkFrame> {
|
||||
if self.pending.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(self.take_pending())
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns `true` if the pending batch has reached `max_entries` or `max_bytes`.
|
||||
pub fn should_flush(&self) -> bool {
|
||||
self.pending.len() >= self.max_entries || self.pending_bytes >= self.max_bytes
|
||||
}
|
||||
|
||||
// --- private helpers ---
|
||||
|
||||
/// Would adding `extra_bytes` exceed a threshold?
|
||||
fn should_flush_with(&self, extra_bytes: usize) -> bool {
|
||||
self.pending.len() + 1 > self.max_entries
|
||||
|| self.pending_bytes + extra_bytes > self.max_bytes
|
||||
}
|
||||
|
||||
/// Take the pending frame out, resetting state.
|
||||
fn take_pending(&mut self) -> TrunkFrame {
|
||||
let frame = std::mem::replace(&mut self.pending, TrunkFrame::new());
|
||||
self.pending_bytes = Self::FRAME_HEADER;
|
||||
frame
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for TrunkBatcher {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn trunk_batcher_fills_and_flushes() {
|
||||
let mut batcher = TrunkBatcher::new();
|
||||
batcher.max_entries = 3;
|
||||
batcher.max_bytes = 4096; // large enough to not interfere
|
||||
|
||||
// First two pushes should not flush.
|
||||
assert!(batcher.push([0, 1], Bytes::from_static(b"aaa")).is_none());
|
||||
assert!(batcher.push([0, 2], Bytes::from_static(b"bbb")).is_none());
|
||||
// Third push should trigger flush (max_entries = 3).
|
||||
let frame = batcher
|
||||
.push([0, 3], Bytes::from_static(b"ccc"))
|
||||
.expect("should flush at max_entries");
|
||||
assert_eq!(frame.len(), 3);
|
||||
assert_eq!(frame.packets[0].session_id, [0, 1]);
|
||||
assert_eq!(frame.packets[2].payload, Bytes::from_static(b"ccc"));
|
||||
|
||||
// Batcher is now empty.
|
||||
assert!(batcher.flush().is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn trunk_batcher_respects_max_bytes() {
|
||||
let mut batcher = TrunkBatcher::new();
|
||||
batcher.max_entries = 100; // won't be the trigger
|
||||
// Frame header (2) + one entry overhead (4) + 50 payload = 56
|
||||
// Two entries: 2 + 2*(4+50) = 110
|
||||
// Three entries: 2 + 3*54 = 164
|
||||
batcher.max_bytes = 120; // allow at most 2 entries of 50-byte payload
|
||||
|
||||
let big = Bytes::from(vec![0xAA; 50]);
|
||||
assert!(batcher.push([0, 1], big.clone()).is_none()); // 56 bytes
|
||||
// Second push: 56 + 54 = 110 < 120, fits
|
||||
assert!(batcher.push([0, 2], big.clone()).is_none());
|
||||
// Third push would be 164 > 120, so existing batch flushes first
|
||||
let frame = batcher
|
||||
.push([0, 3], big.clone())
|
||||
.expect("should flush on max_bytes");
|
||||
assert_eq!(frame.len(), 2);
|
||||
|
||||
// The third entry is now pending
|
||||
let remaining = batcher.flush().unwrap();
|
||||
assert_eq!(remaining.len(), 1);
|
||||
assert_eq!(remaining.packets[0].session_id, [0, 3]);
|
||||
}
|
||||
}
|
||||
243
crates/wzp-relay/src/ws.rs
Normal file
243
crates/wzp-relay/src/ws.rs
Normal file
@@ -0,0 +1,243 @@
|
||||
//! WebSocket transport for browser clients.
|
||||
//!
|
||||
//! Browsers connect via `GET /ws/{room}` → WebSocket upgrade.
|
||||
//! First message must be auth JSON (if auth is enabled).
|
||||
//! Subsequent messages are binary PCM frames forwarded to/from the room.
|
||||
|
||||
use std::net::SocketAddr;
|
||||
use std::sync::Arc;
|
||||
|
||||
use axum::{
|
||||
extract::{
|
||||
ws::{Message, WebSocket},
|
||||
Path, State, WebSocketUpgrade,
|
||||
},
|
||||
response::IntoResponse,
|
||||
routing::get,
|
||||
Router,
|
||||
};
|
||||
use bytes::Bytes;
|
||||
use futures_util::{SinkExt, StreamExt};
|
||||
use tokio::sync::{mpsc, Mutex};
|
||||
use tower_http::services::ServeDir;
|
||||
use tracing::{error, info, warn};
|
||||
|
||||
use crate::auth;
|
||||
use crate::metrics::RelayMetrics;
|
||||
use crate::presence::PresenceRegistry;
|
||||
use crate::room::RoomManager;
|
||||
use crate::session_mgr::SessionManager;
|
||||
|
||||
/// Shared state for WebSocket handlers.
|
||||
#[derive(Clone)]
|
||||
pub struct WsState {
|
||||
pub room_mgr: Arc<Mutex<RoomManager>>,
|
||||
pub session_mgr: Arc<Mutex<SessionManager>>,
|
||||
pub auth_url: Option<String>,
|
||||
pub metrics: Arc<RelayMetrics>,
|
||||
pub presence: Arc<Mutex<PresenceRegistry>>,
|
||||
}
|
||||
|
||||
/// Start the WebSocket + static file server.
|
||||
pub async fn run_ws_server(port: u16, state: WsState, static_dir: Option<String>) {
|
||||
let mut app = Router::new()
|
||||
.route("/ws/{room}", get(ws_upgrade_handler))
|
||||
.with_state(state);
|
||||
|
||||
if let Some(dir) = static_dir {
|
||||
info!(dir = %dir, "serving static files");
|
||||
app = app.fallback_service(ServeDir::new(dir));
|
||||
}
|
||||
|
||||
let addr: SocketAddr = ([0, 0, 0, 0], port).into();
|
||||
info!(%addr, "WebSocket server listening");
|
||||
|
||||
let listener = tokio::net::TcpListener::bind(addr)
|
||||
.await
|
||||
.expect("failed to bind WS listener");
|
||||
axum::serve(listener, app).await.expect("WS server failed");
|
||||
}
|
||||
|
||||
async fn ws_upgrade_handler(
|
||||
Path(room): Path<String>,
|
||||
State(state): State<WsState>,
|
||||
ws: WebSocketUpgrade,
|
||||
) -> impl IntoResponse {
|
||||
ws.on_upgrade(move |socket| handle_ws_connection(socket, room, state))
|
||||
}
|
||||
|
||||
async fn handle_ws_connection(socket: WebSocket, room: String, state: WsState) {
|
||||
let (mut ws_tx, mut ws_rx) = socket.split();
|
||||
|
||||
// 1. Auth: if auth_url is set, first message must be {"type":"auth","token":"..."}
|
||||
let fingerprint: Option<String> = if let Some(ref auth_url) = state.auth_url {
|
||||
match ws_rx.next().await {
|
||||
Some(Ok(Message::Text(text))) => {
|
||||
match serde_json::from_str::<serde_json::Value>(&text) {
|
||||
Ok(parsed) if parsed["type"] == "auth" => {
|
||||
if let Some(token) = parsed["token"].as_str() {
|
||||
match auth::validate_token(auth_url, token).await {
|
||||
Ok(client) => {
|
||||
state.metrics.auth_attempts.with_label_values(&["ok"]).inc();
|
||||
info!(fingerprint = %client.fingerprint, "WS authenticated");
|
||||
let _ = ws_tx
|
||||
.send(Message::Text(r#"{"type":"auth_ok"}"#.into()))
|
||||
.await;
|
||||
Some(client.fingerprint)
|
||||
}
|
||||
Err(e) => {
|
||||
state
|
||||
.metrics
|
||||
.auth_attempts
|
||||
.with_label_values(&["fail"])
|
||||
.inc();
|
||||
let _ = ws_tx
|
||||
.send(Message::Text(
|
||||
format!(r#"{{"type":"auth_error","error":"{e}"}}"#)
|
||||
.into(),
|
||||
))
|
||||
.await;
|
||||
warn!("WS auth failed: {e}");
|
||||
return;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
warn!("WS auth: missing token field");
|
||||
return;
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
warn!("WS: expected auth message as first frame");
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
warn!("WS: connection closed before auth");
|
||||
return;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
let _ = ws_tx
|
||||
.send(Message::Text(r#"{"type":"auth_ok"}"#.into()))
|
||||
.await;
|
||||
None
|
||||
};
|
||||
|
||||
// 2. Create mpsc channel for outbound frames (room → browser)
|
||||
let (tx, mut rx) = mpsc::channel::<Bytes>(64);
|
||||
|
||||
// 3. Create session
|
||||
let session_id = {
|
||||
let mut smgr = state.session_mgr.lock().await;
|
||||
match smgr.create_session(&room, fingerprint.clone()) {
|
||||
Ok(id) => id,
|
||||
Err(e) => {
|
||||
error!(room = %room, "WS session rejected: {e}");
|
||||
return;
|
||||
}
|
||||
}
|
||||
};
|
||||
state.metrics.active_sessions.inc();
|
||||
|
||||
// 4. Join room with WS sender
|
||||
let addr: SocketAddr = ([0, 0, 0, 0], 0).into();
|
||||
let participant_id = {
|
||||
let mut mgr = state.room_mgr.lock().await;
|
||||
match mgr.join_ws(&room, addr, tx, fingerprint.as_deref()) {
|
||||
Ok(id) => {
|
||||
state.metrics.active_rooms.set(mgr.list().len() as i64);
|
||||
id
|
||||
}
|
||||
Err(e) => {
|
||||
error!(room = %room, "WS room join denied: {e}");
|
||||
state.metrics.active_sessions.dec();
|
||||
let mut smgr = state.session_mgr.lock().await;
|
||||
smgr.remove_session(session_id);
|
||||
return;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// 5. Register presence
|
||||
if let Some(ref fp) = fingerprint {
|
||||
let mut reg = state.presence.lock().await;
|
||||
reg.register_local(fp, None, Some(room.clone()));
|
||||
}
|
||||
|
||||
info!(room = %room, participant = participant_id, "WS client joined");
|
||||
|
||||
// 6. Outbound task: mpsc rx → WS binary frames
|
||||
let send_task = tokio::spawn(async move {
|
||||
while let Some(data) = rx.recv().await {
|
||||
if ws_tx
|
||||
.send(Message::Binary(data.to_vec().into()))
|
||||
.await
|
||||
.is_err()
|
||||
{
|
||||
break;
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// 7. Inbound: WS recv → fan-out to room
|
||||
loop {
|
||||
match ws_rx.next().await {
|
||||
Some(Ok(Message::Binary(data))) => {
|
||||
let others = {
|
||||
let mgr = state.room_mgr.lock().await;
|
||||
mgr.others(&room, participant_id)
|
||||
};
|
||||
for other in &others {
|
||||
let _ = other.send_raw(&data).await;
|
||||
}
|
||||
state
|
||||
.metrics
|
||||
.packets_forwarded
|
||||
.inc_by(others.len() as u64);
|
||||
state
|
||||
.metrics
|
||||
.bytes_forwarded
|
||||
.inc_by(data.len() as u64 * others.len() as u64);
|
||||
}
|
||||
Some(Ok(Message::Close(_))) | None => break,
|
||||
_ => continue,
|
||||
}
|
||||
}
|
||||
|
||||
// 8. Cleanup
|
||||
send_task.abort();
|
||||
info!(room = %room, participant = participant_id, "WS client disconnected");
|
||||
|
||||
if let Some(ref fp) = fingerprint {
|
||||
let mut reg = state.presence.lock().await;
|
||||
reg.unregister_local(fp);
|
||||
}
|
||||
|
||||
{
|
||||
let mut mgr = state.room_mgr.lock().await;
|
||||
mgr.leave(&room, participant_id);
|
||||
state.metrics.active_rooms.set(mgr.list().len() as i64);
|
||||
}
|
||||
|
||||
let session_id_str: String = session_id.iter().map(|b| format!("{b:02x}")).collect();
|
||||
state.metrics.remove_session_metrics(&session_id_str);
|
||||
state.metrics.active_sessions.dec();
|
||||
|
||||
{
|
||||
let mut smgr = state.session_mgr.lock().await;
|
||||
smgr.remove_session(session_id);
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn ws_state_is_clone() {
|
||||
// WsState must be Clone for axum's State extractor
|
||||
fn assert_clone<T: Clone>() {}
|
||||
assert_clone::<WsState>();
|
||||
}
|
||||
}
|
||||
295
crates/wzp-relay/tests/handshake_integration.rs
Normal file
295
crates/wzp-relay/tests/handshake_integration.rs
Normal file
@@ -0,0 +1,295 @@
|
||||
//! WZP-S-5 integration tests: crypto handshake wired into live QUIC path.
|
||||
//!
|
||||
//! Verifies that `perform_handshake` (client/caller) and `accept_handshake`
|
||||
//! (relay/callee) complete successfully over a real in-process QUIC connection
|
||||
//! and produce usable `CryptoSession` values.
|
||||
|
||||
use std::net::{Ipv4Addr, SocketAddr};
|
||||
use std::sync::Arc;
|
||||
|
||||
use wzp_client::perform_handshake;
|
||||
use wzp_crypto::{KeyExchange, WarzoneKeyExchange};
|
||||
use wzp_proto::{MediaTransport, SignalMessage};
|
||||
use wzp_relay::handshake::accept_handshake;
|
||||
use wzp_transport::{client_config, create_endpoint, server_config, QuinnTransport};
|
||||
|
||||
/// Establish a QUIC connection and wrap both sides in `QuinnTransport`.
|
||||
///
|
||||
/// Returns (client_transport, server_transport, _endpoints) where the endpoint
|
||||
/// tuple must be kept alive for the duration of the test to avoid premature
|
||||
/// connection teardown.
|
||||
async fn connected_pair() -> (Arc<QuinnTransport>, Arc<QuinnTransport>, (quinn::Endpoint, quinn::Endpoint)) {
|
||||
let _ = rustls::crypto::ring::default_provider().install_default();
|
||||
|
||||
let (sc, _cert_der) = server_config();
|
||||
let server_addr: SocketAddr = (Ipv4Addr::LOCALHOST, 0).into();
|
||||
let server_ep = create_endpoint(server_addr, Some(sc)).expect("server endpoint");
|
||||
let server_listen = server_ep.local_addr().expect("server local addr");
|
||||
|
||||
let client_addr: SocketAddr = (Ipv4Addr::LOCALHOST, 0).into();
|
||||
let client_ep = create_endpoint(client_addr, None).expect("client endpoint");
|
||||
|
||||
let server_ep_clone = server_ep.clone();
|
||||
let accept_fut = tokio::spawn(async move {
|
||||
let conn = wzp_transport::accept(&server_ep_clone).await.expect("accept");
|
||||
Arc::new(QuinnTransport::new(conn))
|
||||
});
|
||||
|
||||
let client_conn =
|
||||
wzp_transport::connect(&client_ep, server_listen, "localhost", client_config())
|
||||
.await
|
||||
.expect("connect");
|
||||
let client_transport = Arc::new(QuinnTransport::new(client_conn));
|
||||
|
||||
let server_transport = accept_fut.await.expect("join accept task");
|
||||
|
||||
(client_transport, server_transport, (server_ep, client_ep))
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------
|
||||
// Test 1: handshake_succeeds
|
||||
// -----------------------------------------------------------------------
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn handshake_succeeds() {
|
||||
let (client_transport, server_transport, _endpoints) = connected_pair().await;
|
||||
|
||||
let caller_seed: [u8; 32] = [0xAA; 32];
|
||||
let callee_seed: [u8; 32] = [0xBB; 32];
|
||||
|
||||
// Clone Arc so the server transport stays alive in the main task too.
|
||||
let server_t = Arc::clone(&server_transport);
|
||||
let callee_handle = tokio::spawn(async move {
|
||||
accept_handshake(server_t.as_ref(), &callee_seed).await
|
||||
});
|
||||
|
||||
let caller_session = perform_handshake(client_transport.as_ref(), &caller_seed)
|
||||
.await
|
||||
.expect("perform_handshake should succeed");
|
||||
|
||||
let (callee_session, chosen_profile) = callee_handle
|
||||
.await
|
||||
.expect("join callee task")
|
||||
.expect("accept_handshake should succeed");
|
||||
|
||||
// Both sides should have derived a working CryptoSession.
|
||||
// Verify by encrypting on one side and decrypting on the other.
|
||||
let header = b"test-header";
|
||||
let plaintext = b"hello warzone";
|
||||
|
||||
let mut ciphertext = Vec::new();
|
||||
let mut caller_session = caller_session;
|
||||
let mut callee_session = callee_session;
|
||||
|
||||
caller_session
|
||||
.encrypt(header, plaintext, &mut ciphertext)
|
||||
.expect("encrypt");
|
||||
|
||||
let mut decrypted = Vec::new();
|
||||
callee_session
|
||||
.decrypt(header, &ciphertext, &mut decrypted)
|
||||
.expect("decrypt");
|
||||
|
||||
assert_eq!(&decrypted, plaintext);
|
||||
assert_eq!(chosen_profile, wzp_proto::QualityProfile::GOOD);
|
||||
|
||||
// Keep transports alive until test completes.
|
||||
drop(server_transport);
|
||||
drop(client_transport);
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------
|
||||
// Test 2: handshake_verifies_identity
|
||||
// -----------------------------------------------------------------------
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn handshake_verifies_identity() {
|
||||
let (client_transport, server_transport, _endpoints) = connected_pair().await;
|
||||
|
||||
// Two completely different seeds => different identity keys.
|
||||
let caller_seed: [u8; 32] = [0x11; 32];
|
||||
let callee_seed: [u8; 32] = [0x22; 32];
|
||||
|
||||
// Confirm the seeds produce different identity public keys.
|
||||
let caller_kx = WarzoneKeyExchange::from_identity_seed(&caller_seed);
|
||||
let callee_kx = WarzoneKeyExchange::from_identity_seed(&callee_seed);
|
||||
assert_ne!(
|
||||
caller_kx.identity_public_key(),
|
||||
callee_kx.identity_public_key(),
|
||||
"different seeds must produce different identity keys"
|
||||
);
|
||||
|
||||
let server_t = Arc::clone(&server_transport);
|
||||
let callee_handle = tokio::spawn(async move {
|
||||
accept_handshake(server_t.as_ref(), &callee_seed).await
|
||||
});
|
||||
|
||||
let caller_session = perform_handshake(client_transport.as_ref(), &caller_seed)
|
||||
.await
|
||||
.expect("handshake must succeed even with different identities");
|
||||
|
||||
let (callee_session, _profile) = callee_handle
|
||||
.await
|
||||
.expect("join")
|
||||
.expect("accept_handshake must succeed");
|
||||
|
||||
// Cross-encrypt/decrypt to prove the shared session works.
|
||||
let header = b"id-test";
|
||||
let plaintext = b"identity verified";
|
||||
|
||||
let mut ct = Vec::new();
|
||||
let mut caller_session = caller_session;
|
||||
let mut callee_session = callee_session;
|
||||
|
||||
caller_session
|
||||
.encrypt(header, plaintext, &mut ct)
|
||||
.expect("encrypt");
|
||||
|
||||
let mut pt = Vec::new();
|
||||
callee_session
|
||||
.decrypt(header, &ct, &mut pt)
|
||||
.expect("decrypt");
|
||||
|
||||
assert_eq!(&pt, plaintext);
|
||||
|
||||
drop(server_transport);
|
||||
drop(client_transport);
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------
|
||||
// Test 3: auth_then_handshake
|
||||
// -----------------------------------------------------------------------
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn auth_then_handshake() {
|
||||
let (client_transport, server_transport, _endpoints) = connected_pair().await;
|
||||
|
||||
let caller_seed: [u8; 32] = [0xCC; 32];
|
||||
let callee_seed: [u8; 32] = [0xDD; 32];
|
||||
|
||||
// The callee side: first consume the AuthToken, then run accept_handshake.
|
||||
let server_t = Arc::clone(&server_transport);
|
||||
let callee_handle = tokio::spawn(async move {
|
||||
// 1. Receive AuthToken
|
||||
let auth_msg = server_t
|
||||
.recv_signal()
|
||||
.await
|
||||
.expect("recv_signal should succeed")
|
||||
.expect("should receive a message");
|
||||
|
||||
let token = match auth_msg {
|
||||
SignalMessage::AuthToken { token } => token,
|
||||
other => panic!("expected AuthToken, got {:?}", std::mem::discriminant(&other)),
|
||||
};
|
||||
|
||||
// 2. Run the cryptographic handshake
|
||||
let (session, profile) = accept_handshake(server_t.as_ref(), &callee_seed)
|
||||
.await
|
||||
.expect("accept_handshake after auth");
|
||||
|
||||
(token, session, profile)
|
||||
});
|
||||
|
||||
// Caller side: send AuthToken first, then perform_handshake.
|
||||
let auth = SignalMessage::AuthToken {
|
||||
token: "bearer-test-token-12345".to_string(),
|
||||
};
|
||||
client_transport
|
||||
.send_signal(&auth)
|
||||
.await
|
||||
.expect("send AuthToken");
|
||||
|
||||
let caller_session = perform_handshake(client_transport.as_ref(), &caller_seed)
|
||||
.await
|
||||
.expect("perform_handshake after auth");
|
||||
|
||||
let (received_token, callee_session, _profile) = callee_handle
|
||||
.await
|
||||
.expect("join callee task");
|
||||
|
||||
// Verify the auth token was received correctly.
|
||||
assert_eq!(received_token, "bearer-test-token-12345");
|
||||
|
||||
// Verify the crypto session works after the auth preamble.
|
||||
let header = b"auth-hdr";
|
||||
let plaintext = b"post-auth payload";
|
||||
|
||||
let mut ct = Vec::new();
|
||||
let mut caller_session = caller_session;
|
||||
let mut callee_session = callee_session;
|
||||
|
||||
caller_session
|
||||
.encrypt(header, plaintext, &mut ct)
|
||||
.expect("encrypt");
|
||||
|
||||
let mut pt = Vec::new();
|
||||
callee_session
|
||||
.decrypt(header, &ct, &mut pt)
|
||||
.expect("decrypt");
|
||||
|
||||
assert_eq!(&pt, plaintext);
|
||||
|
||||
drop(server_transport);
|
||||
drop(client_transport);
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------
|
||||
// Test 4: handshake_rejects_bad_signature
|
||||
// -----------------------------------------------------------------------
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn handshake_rejects_bad_signature() {
|
||||
let (client_transport, server_transport, _endpoints) = connected_pair().await;
|
||||
|
||||
let caller_seed: [u8; 32] = [0xEE; 32];
|
||||
let callee_seed: [u8; 32] = [0xFF; 32];
|
||||
|
||||
// Spawn callee -- it should reject the tampered CallOffer.
|
||||
let server_t = Arc::clone(&server_transport);
|
||||
let callee_handle = tokio::spawn(async move {
|
||||
accept_handshake(server_t.as_ref(), &callee_seed).await
|
||||
});
|
||||
|
||||
// Manually build a CallOffer with a corrupted signature.
|
||||
let mut kx = WarzoneKeyExchange::from_identity_seed(&caller_seed);
|
||||
let identity_pub = kx.identity_public_key();
|
||||
let ephemeral_pub = kx.generate_ephemeral();
|
||||
|
||||
let mut sign_data = Vec::with_capacity(32 + 10);
|
||||
sign_data.extend_from_slice(&ephemeral_pub);
|
||||
sign_data.extend_from_slice(b"call-offer");
|
||||
let mut signature = kx.sign(&sign_data);
|
||||
|
||||
// Tamper: flip bits in the signature.
|
||||
for byte in signature.iter_mut().take(8) {
|
||||
*byte ^= 0xFF;
|
||||
}
|
||||
|
||||
let bad_offer = SignalMessage::CallOffer {
|
||||
identity_pub,
|
||||
ephemeral_pub,
|
||||
signature,
|
||||
supported_profiles: vec![wzp_proto::QualityProfile::GOOD],
|
||||
};
|
||||
|
||||
client_transport
|
||||
.send_signal(&bad_offer)
|
||||
.await
|
||||
.expect("send tampered CallOffer");
|
||||
|
||||
// The callee should return an error about signature verification.
|
||||
let result = callee_handle.await.expect("join callee task");
|
||||
match result {
|
||||
Ok(_) => panic!("accept_handshake must reject a bad signature"),
|
||||
Err(e) => {
|
||||
let err_msg = e.to_string();
|
||||
assert!(
|
||||
err_msg.contains("signature verification failed"),
|
||||
"error should mention signature verification, got: {err_msg}"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
drop(server_transport);
|
||||
drop(client_transport);
|
||||
}
|
||||
@@ -139,6 +139,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn server_config_creates_without_error() {
|
||||
let _ = rustls::crypto::ring::default_provider().install_default();
|
||||
let (cfg, cert_der) = server_config();
|
||||
assert!(!cert_der.is_empty());
|
||||
// Verify the config was created (no panic)
|
||||
@@ -147,6 +148,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn client_config_creates_without_error() {
|
||||
let _ = rustls::crypto::ring::default_provider().install_default();
|
||||
let cfg = client_config();
|
||||
drop(cfg);
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@
|
||||
use async_trait::async_trait;
|
||||
use std::sync::Mutex;
|
||||
|
||||
use wzp_proto::packet::TrunkFrame;
|
||||
use wzp_proto::{MediaPacket, MediaTransport, PathQuality, SignalMessage, TransportError};
|
||||
|
||||
use crate::datagram;
|
||||
@@ -36,6 +37,47 @@ impl QuinnTransport {
|
||||
pub fn max_datagram_size(&self) -> Option<usize> {
|
||||
datagram::max_datagram_payload(&self.connection)
|
||||
}
|
||||
|
||||
/// Send an encoded [`TrunkFrame`] as a single QUIC datagram.
|
||||
pub fn send_trunk(&self, frame: &TrunkFrame) -> Result<(), TransportError> {
|
||||
let data = frame.encode();
|
||||
|
||||
if let Some(max_size) = self.connection.max_datagram_size() {
|
||||
if data.len() > max_size {
|
||||
return Err(TransportError::DatagramTooLarge {
|
||||
size: data.len(),
|
||||
max: max_size,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
self.connection.send_datagram(data).map_err(|e| {
|
||||
TransportError::Internal(format!("send trunk datagram error: {e}"))
|
||||
})?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Receive a single QUIC datagram and decode it as a [`TrunkFrame`].
|
||||
///
|
||||
/// Returns `Ok(None)` on connection close, `Ok(Some(frame))` on success,
|
||||
/// or an error on malformed data / transport failure.
|
||||
pub async fn recv_trunk(&self) -> Result<Option<TrunkFrame>, TransportError> {
|
||||
let data = match self.connection.read_datagram().await {
|
||||
Ok(data) => data,
|
||||
Err(quinn::ConnectionError::ApplicationClosed(_)) => return Ok(None),
|
||||
Err(quinn::ConnectionError::LocallyClosed) => return Ok(None),
|
||||
Err(e) => {
|
||||
return Err(TransportError::Internal(format!(
|
||||
"recv trunk datagram error: {e}"
|
||||
)))
|
||||
}
|
||||
};
|
||||
|
||||
TrunkFrame::decode(&data)
|
||||
.map(Some)
|
||||
.ok_or_else(|| TransportError::Internal("malformed trunk frame".into()))
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
|
||||
36
crates/wzp-web/Cargo.toml
Normal file
36
crates/wzp-web/Cargo.toml
Normal file
@@ -0,0 +1,36 @@
|
||||
[package]
|
||||
name = "wzp-web"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
rust-version.workspace = true
|
||||
description = "WarzonePhone web bridge — browser audio via WebSocket to wzp relay"
|
||||
|
||||
[dependencies]
|
||||
wzp-proto = { workspace = true }
|
||||
wzp-codec = { workspace = true }
|
||||
wzp-fec = { workspace = true }
|
||||
wzp-crypto = { workspace = true }
|
||||
wzp-transport = { workspace = true }
|
||||
wzp-client = { path = "../wzp-client" }
|
||||
tokio = { workspace = true }
|
||||
tracing = { workspace = true }
|
||||
tracing-subscriber = { workspace = true }
|
||||
bytes = { workspace = true }
|
||||
anyhow = "1"
|
||||
wzp-relay = { path = "../wzp-relay" }
|
||||
serde_json = "1"
|
||||
rustls-pemfile = "2"
|
||||
axum = { version = "0.8", features = ["ws"] }
|
||||
tower-http = { version = "0.6", features = ["fs"] }
|
||||
futures = "0.3"
|
||||
axum-server = { version = "0.7", features = ["tls-rustls"] }
|
||||
rcgen = "0.13"
|
||||
rustls = { version = "0.23", default-features = false, features = ["ring", "std"] }
|
||||
rustls-pki-types = "1"
|
||||
tokio-rustls = "0.26"
|
||||
prometheus = "0.13"
|
||||
|
||||
[[bin]]
|
||||
name = "wzp-web"
|
||||
path = "src/main.rs"
|
||||
390
crates/wzp-web/src/main.rs
Normal file
390
crates/wzp-web/src/main.rs
Normal file
@@ -0,0 +1,390 @@
|
||||
//! WarzonePhone Web Bridge
|
||||
//!
|
||||
//! Serves a web page for browser-based voice calls and bridges
|
||||
//! WebSocket audio to the wzp relay protocol.
|
||||
//!
|
||||
//! Usage: wzp-web [--port 8080] [--relay 127.0.0.1:4433] [--tls]
|
||||
//!
|
||||
//! Rooms: clients connect to /ws/<room-name> and are paired by room.
|
||||
|
||||
use std::net::SocketAddr;
|
||||
use std::sync::Arc;
|
||||
|
||||
use axum::extract::ws::{Message, WebSocket};
|
||||
use axum::extract::{Path, WebSocketUpgrade};
|
||||
use axum::response::IntoResponse;
|
||||
use axum::routing::get;
|
||||
use axum::Router;
|
||||
use futures::stream::StreamExt;
|
||||
use futures::SinkExt;
|
||||
use tokio::sync::Mutex;
|
||||
use tower_http::services::ServeDir;
|
||||
use tracing::{error, info, warn};
|
||||
|
||||
use wzp_client::call::{CallConfig, CallDecoder, CallEncoder};
|
||||
use wzp_proto::MediaTransport;
|
||||
|
||||
mod metrics;
|
||||
use metrics::WebMetrics;
|
||||
|
||||
const FRAME_SAMPLES: usize = 960;
|
||||
|
||||
#[derive(Clone)]
|
||||
struct AppState {
|
||||
relay_addr: SocketAddr,
|
||||
auth_url: Option<String>,
|
||||
metrics: WebMetrics,
|
||||
}
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> anyhow::Result<()> {
|
||||
tracing_subscriber::fmt().init();
|
||||
rustls::crypto::ring::default_provider()
|
||||
.install_default()
|
||||
.expect("failed to install rustls crypto provider");
|
||||
|
||||
let mut port: u16 = 8080;
|
||||
let mut relay_addr: SocketAddr = "127.0.0.1:4433".parse()?;
|
||||
let mut use_tls = false;
|
||||
let mut auth_url: Option<String> = None;
|
||||
let mut cert_path: Option<String> = None;
|
||||
let mut key_path: Option<String> = None;
|
||||
|
||||
let args: Vec<String> = std::env::args().collect();
|
||||
let mut i = 1;
|
||||
while i < args.len() {
|
||||
match args[i].as_str() {
|
||||
"--port" => { i += 1; port = args[i].parse().expect("invalid port"); }
|
||||
"--relay" => { i += 1; relay_addr = args[i].parse().expect("invalid relay address"); }
|
||||
"--tls" => { use_tls = true; }
|
||||
"--auth-url" => { i += 1; auth_url = Some(args[i].clone()); }
|
||||
"--cert" => { i += 1; cert_path = Some(args[i].clone()); }
|
||||
"--key" => { i += 1; key_path = Some(args[i].clone()); }
|
||||
"--help" | "-h" => {
|
||||
eprintln!("Usage: wzp-web [--port 8080] [--relay 127.0.0.1:4433] [--tls] [--auth-url <url>]");
|
||||
eprintln!();
|
||||
eprintln!("Options:");
|
||||
eprintln!(" --port <port> HTTP/WebSocket port (default: 8080)");
|
||||
eprintln!(" --relay <addr> WZP relay address (default: 127.0.0.1:4433)");
|
||||
eprintln!(" --tls Enable HTTPS (required for mic on Android)");
|
||||
eprintln!(" --auth-url <url> featherChat auth endpoint for token validation");
|
||||
eprintln!(" --cert <path> TLS certificate PEM file (optional, overrides self-signed)");
|
||||
eprintln!(" --key <path> TLS private key PEM file (optional, overrides self-signed)");
|
||||
eprintln!();
|
||||
eprintln!("Rooms: open https://host:port/<room-name> to join a room.");
|
||||
eprintln!("Browser sends auth JSON as first WS message when --auth-url is set.");
|
||||
std::process::exit(0);
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
i += 1;
|
||||
}
|
||||
|
||||
if let Some(ref url) = auth_url {
|
||||
info!(url, "auth enabled — browsers must send token as first WS message");
|
||||
}
|
||||
|
||||
let web_metrics = WebMetrics::new();
|
||||
let state = AppState {
|
||||
relay_addr,
|
||||
auth_url,
|
||||
metrics: web_metrics,
|
||||
};
|
||||
|
||||
let static_dir = if std::path::Path::new("crates/wzp-web/static").exists() {
|
||||
"crates/wzp-web/static"
|
||||
} else if std::path::Path::new("static").exists() {
|
||||
"static"
|
||||
} else {
|
||||
"static"
|
||||
};
|
||||
|
||||
// Serve index.html for any path that isn't /ws/, /metrics, or a static file.
|
||||
// This lets URLs like /manwe load the SPA which reads the room from the path.
|
||||
let static_service = ServeDir::new(static_dir)
|
||||
.fallback(tower_http::services::ServeFile::new(
|
||||
format!("{}/index.html", static_dir),
|
||||
));
|
||||
|
||||
let app = Router::new()
|
||||
.route("/ws/{room}", get(ws_handler))
|
||||
.route("/metrics", get(metrics::metrics_handler))
|
||||
.fallback_service(static_service)
|
||||
.with_state(state);
|
||||
|
||||
let listen: SocketAddr = format!("0.0.0.0:{port}").parse()?;
|
||||
|
||||
if use_tls {
|
||||
let (cert_der, key_der) = if let (Some(cp), Some(kp)) = (&cert_path, &key_path) {
|
||||
// Load real certificates from files
|
||||
info!(cert = %cp, key = %kp, "loading TLS certificates from files");
|
||||
let cert_pem = std::fs::read(cp)?;
|
||||
let key_pem = std::fs::read(kp)?;
|
||||
let cert = rustls_pemfile::certs(&mut &cert_pem[..])
|
||||
.next()
|
||||
.ok_or_else(|| anyhow::anyhow!("no certificate found in PEM"))??;
|
||||
let key = rustls_pemfile::private_key(&mut &key_pem[..])?
|
||||
.ok_or_else(|| anyhow::anyhow!("no private key found in PEM"))?;
|
||||
(cert, key)
|
||||
} else {
|
||||
// Generate self-signed for development
|
||||
info!("generating self-signed TLS certificate (use --cert/--key for production)");
|
||||
let cert_key = rcgen::generate_simple_self_signed(vec![
|
||||
"localhost".to_string(), "wzp".to_string(),
|
||||
])?;
|
||||
let cert = rustls_pki_types::CertificateDer::from(cert_key.cert);
|
||||
let key = rustls_pki_types::PrivateKeyDer::try_from(cert_key.key_pair.serialize_der())
|
||||
.map_err(|e| anyhow::anyhow!("key error: {e}"))?;
|
||||
(cert, key)
|
||||
};
|
||||
|
||||
let mut tls_config = rustls::ServerConfig::builder()
|
||||
.with_no_client_auth()
|
||||
.with_single_cert(vec![cert_der], key_der)?;
|
||||
tls_config.alpn_protocols = vec![b"h2".to_vec(), b"http/1.1".to_vec()];
|
||||
|
||||
let tls_config = axum_server::tls_rustls::RustlsConfig::from_config(Arc::new(tls_config));
|
||||
|
||||
info!(%listen, %relay_addr, "WarzonePhone web bridge (HTTPS)");
|
||||
info!("Open https://localhost:{port}/<room-name> in your browser");
|
||||
|
||||
axum_server::bind_rustls(listen, tls_config)
|
||||
.serve(app.into_make_service())
|
||||
.await?;
|
||||
} else {
|
||||
info!(%listen, %relay_addr, "WarzonePhone web bridge (HTTP)");
|
||||
info!("Open http://localhost:{port}/<room-name> in your browser");
|
||||
info!("Use --tls for mic access on Android/remote browsers");
|
||||
|
||||
let listener = tokio::net::TcpListener::bind(listen).await?;
|
||||
axum::serve(listener, app).await?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn ws_handler(
|
||||
ws: WebSocketUpgrade,
|
||||
Path(room): Path<String>,
|
||||
axum::extract::State(state): axum::extract::State<AppState>,
|
||||
) -> impl IntoResponse {
|
||||
info!(room = %room, "WebSocket upgrade request");
|
||||
ws.on_upgrade(move |socket| handle_ws(socket, room, state))
|
||||
}
|
||||
|
||||
async fn handle_ws(socket: WebSocket, room: String, state: AppState) {
|
||||
info!(room = %room, "client joined room");
|
||||
|
||||
state.metrics.active_connections.inc();
|
||||
|
||||
let (mut ws_sender, mut ws_receiver) = socket.split();
|
||||
|
||||
// Auth: if --auth-url is set, expect a JSON auth message from the browser first
|
||||
let browser_token: Option<String> = if state.auth_url.is_some() {
|
||||
info!(room = %room, "waiting for auth token from browser...");
|
||||
match ws_receiver.next().await {
|
||||
Some(Ok(Message::Text(text))) => {
|
||||
match serde_json::from_str::<serde_json::Value>(&text) {
|
||||
Ok(v) if v.get("type").and_then(|t| t.as_str()) == Some("auth") => {
|
||||
let token = v.get("token").and_then(|t| t.as_str()).unwrap_or("").to_string();
|
||||
if token.is_empty() {
|
||||
error!(room = %room, "empty auth token");
|
||||
state.metrics.auth_failures.inc();
|
||||
state.metrics.active_connections.dec();
|
||||
return;
|
||||
}
|
||||
// Validate against featherChat
|
||||
if let Some(ref url) = state.auth_url {
|
||||
match wzp_relay::auth::validate_token(url, &token).await {
|
||||
Ok(client) => {
|
||||
info!(room = %room, fingerprint = %client.fingerprint, "browser authenticated");
|
||||
}
|
||||
Err(e) => {
|
||||
error!(room = %room, "browser auth failed: {e}");
|
||||
state.metrics.auth_failures.inc();
|
||||
state.metrics.active_connections.dec();
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
Some(token)
|
||||
}
|
||||
_ => {
|
||||
error!(room = %room, "expected auth JSON, got: {text}");
|
||||
state.metrics.auth_failures.inc();
|
||||
state.metrics.active_connections.dec();
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
error!(room = %room, "no auth message from browser");
|
||||
state.metrics.auth_failures.inc();
|
||||
state.metrics.active_connections.dec();
|
||||
return;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
// Connect to relay
|
||||
let relay_addr = state.relay_addr;
|
||||
let bind_addr: SocketAddr = if relay_addr.is_ipv6() {
|
||||
"[::]:0".parse().unwrap()
|
||||
} else {
|
||||
"0.0.0.0:0".parse().unwrap()
|
||||
};
|
||||
|
||||
let client_config = wzp_transport::client_config();
|
||||
let endpoint = match wzp_transport::create_endpoint(bind_addr, None) {
|
||||
Ok(e) => e,
|
||||
Err(e) => { error!("create endpoint: {e}"); return; }
|
||||
};
|
||||
|
||||
// Hash room name for SNI privacy
|
||||
let sni = if room.is_empty() {
|
||||
"default".to_string()
|
||||
} else {
|
||||
wzp_crypto::hash_room_name(&room)
|
||||
};
|
||||
let connection =
|
||||
match wzp_transport::connect(&endpoint, relay_addr, &sni, client_config).await {
|
||||
Ok(c) => c,
|
||||
Err(e) => { error!("connect to relay: {e}"); return; }
|
||||
};
|
||||
|
||||
info!(room = %room, "connected to relay");
|
||||
|
||||
let transport = Arc::new(wzp_transport::QuinnTransport::new(connection));
|
||||
|
||||
// Send auth token to relay (if auth is enabled)
|
||||
if let Some(ref token) = browser_token {
|
||||
let auth = wzp_proto::SignalMessage::AuthToken {
|
||||
token: token.clone(),
|
||||
};
|
||||
if let Err(e) = transport.send_signal(&auth).await {
|
||||
error!(room = %room, "send auth to relay: {e}");
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// Crypto handshake with relay
|
||||
let handshake_start = std::time::Instant::now();
|
||||
let bridge_seed = wzp_crypto::Seed::generate();
|
||||
match wzp_client::handshake::perform_handshake(&*transport, &bridge_seed.0).await {
|
||||
Ok(_session) => {
|
||||
let elapsed = handshake_start.elapsed().as_secs_f64();
|
||||
state.metrics.handshake_latency.observe(elapsed);
|
||||
info!(room = %room, elapsed_ms = %(elapsed * 1000.0), "crypto handshake with relay complete");
|
||||
}
|
||||
Err(e) => {
|
||||
error!(room = %room, "relay handshake failed: {e}");
|
||||
transport.close().await.ok();
|
||||
state.metrics.active_connections.dec();
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// Web bridge config: low latency for PTT, disable silence suppression
|
||||
// (PTT handles silence at the browser level, no need to suppress here)
|
||||
let config = CallConfig {
|
||||
suppression_enabled: false,
|
||||
jitter_target: 3, // 60ms instead of default (~1s)
|
||||
jitter_max: 20, // 400ms cap
|
||||
jitter_min: 1, // start playing after 20ms
|
||||
..CallConfig::default()
|
||||
};
|
||||
let encoder = Arc::new(Mutex::new(CallEncoder::new(&config)));
|
||||
let decoder = Arc::new(Mutex::new(CallDecoder::new(&config)));
|
||||
|
||||
// Browser → Relay
|
||||
let send_transport = transport.clone();
|
||||
let send_encoder = encoder.clone();
|
||||
let send_room = room.clone();
|
||||
let send_metrics = state.metrics.clone();
|
||||
let send_task = tokio::spawn(async move {
|
||||
let mut frames_sent = 0u64;
|
||||
while let Some(Ok(msg)) = ws_receiver.next().await {
|
||||
match msg {
|
||||
Message::Binary(data) => {
|
||||
if data.len() < FRAME_SAMPLES * 2 { continue; }
|
||||
let pcm: Vec<i16> = data.chunks_exact(2)
|
||||
.take(FRAME_SAMPLES)
|
||||
.map(|c| i16::from_le_bytes([c[0], c[1]]))
|
||||
.collect();
|
||||
|
||||
let packets = {
|
||||
let mut enc = send_encoder.lock().await;
|
||||
match enc.encode_frame(&pcm) {
|
||||
Ok(p) => p,
|
||||
Err(e) => { warn!("encode: {e}"); continue; }
|
||||
}
|
||||
};
|
||||
|
||||
for pkt in &packets {
|
||||
if let Err(e) = send_transport.send_media(pkt).await {
|
||||
error!("relay send: {e}");
|
||||
return;
|
||||
}
|
||||
}
|
||||
send_metrics.frames_bridged.with_label_values(&["up"]).inc();
|
||||
frames_sent += 1;
|
||||
if frames_sent % 500 == 0 {
|
||||
info!(room = %send_room, frames_sent, "browser → relay");
|
||||
}
|
||||
}
|
||||
Message::Close(_) => break,
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
info!(room = %send_room, frames_sent, "send ended");
|
||||
});
|
||||
|
||||
// Relay → Browser
|
||||
let recv_transport = transport.clone();
|
||||
let recv_decoder = decoder.clone();
|
||||
let recv_room = room.clone();
|
||||
let recv_metrics = state.metrics.clone();
|
||||
let recv_task = tokio::spawn(async move {
|
||||
let mut pcm_buf = vec![0i16; FRAME_SAMPLES];
|
||||
let mut frames_recv = 0u64;
|
||||
loop {
|
||||
match recv_transport.recv_media().await {
|
||||
Ok(Some(pkt)) => {
|
||||
let is_repair = pkt.header.is_repair;
|
||||
let mut dec = recv_decoder.lock().await;
|
||||
dec.ingest(pkt);
|
||||
if !is_repair {
|
||||
if let Some(_n) = dec.decode_next(&mut pcm_buf) {
|
||||
let bytes: Vec<u8> = pcm_buf.iter()
|
||||
.flat_map(|s| s.to_le_bytes())
|
||||
.collect();
|
||||
if let Err(e) = ws_sender.send(Message::Binary(bytes.into())).await {
|
||||
error!("ws send: {e}");
|
||||
return;
|
||||
}
|
||||
recv_metrics.frames_bridged.with_label_values(&["down"]).inc();
|
||||
frames_recv += 1;
|
||||
if frames_recv % 500 == 0 {
|
||||
info!(room = %recv_room, frames_recv, "relay → browser");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(None) => { info!(room = %recv_room, "relay closed"); break; }
|
||||
Err(e) => { error!(room = %recv_room, "relay recv: {e}"); break; }
|
||||
}
|
||||
}
|
||||
info!(room = %recv_room, frames_recv, "recv ended");
|
||||
});
|
||||
|
||||
tokio::select! {
|
||||
_ = send_task => {}
|
||||
_ = recv_task => {}
|
||||
}
|
||||
|
||||
transport.close().await.ok();
|
||||
state.metrics.active_connections.dec();
|
||||
info!(room = %room, "session ended");
|
||||
}
|
||||
130
crates/wzp-web/src/metrics.rs
Normal file
130
crates/wzp-web/src/metrics.rs
Normal file
@@ -0,0 +1,130 @@
|
||||
//! Prometheus metrics for the WZP web bridge.
|
||||
|
||||
use prometheus::{
|
||||
Encoder, Histogram, HistogramOpts, IntCounter, IntCounterVec, IntGauge, Opts, Registry,
|
||||
TextEncoder,
|
||||
};
|
||||
|
||||
/// Holds all Prometheus metrics for the web bridge.
|
||||
#[derive(Clone)]
|
||||
pub struct WebMetrics {
|
||||
pub active_connections: IntGauge,
|
||||
pub frames_bridged: IntCounterVec,
|
||||
pub auth_failures: IntCounter,
|
||||
pub handshake_latency: Histogram,
|
||||
registry: Registry,
|
||||
}
|
||||
|
||||
impl WebMetrics {
|
||||
/// Create and register all web bridge metrics.
|
||||
pub fn new() -> Self {
|
||||
let registry = Registry::new();
|
||||
|
||||
let active_connections = IntGauge::with_opts(
|
||||
Opts::new("wzp_web_active_connections", "Current WebSocket connections"),
|
||||
)
|
||||
.expect("metric");
|
||||
registry
|
||||
.register(Box::new(active_connections.clone()))
|
||||
.expect("register");
|
||||
|
||||
let frames_bridged = IntCounterVec::new(
|
||||
Opts::new("wzp_web_frames_bridged_total", "Audio frames bridged"),
|
||||
&["direction"],
|
||||
)
|
||||
.expect("metric");
|
||||
registry
|
||||
.register(Box::new(frames_bridged.clone()))
|
||||
.expect("register");
|
||||
|
||||
let auth_failures = IntCounter::with_opts(
|
||||
Opts::new("wzp_web_auth_failures_total", "Browser auth failures"),
|
||||
)
|
||||
.expect("metric");
|
||||
registry
|
||||
.register(Box::new(auth_failures.clone()))
|
||||
.expect("register");
|
||||
|
||||
let handshake_latency = Histogram::with_opts(
|
||||
HistogramOpts::new(
|
||||
"wzp_web_handshake_latency_seconds",
|
||||
"Relay handshake time",
|
||||
)
|
||||
.buckets(vec![0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1.0, 2.5, 5.0]),
|
||||
)
|
||||
.expect("metric");
|
||||
registry
|
||||
.register(Box::new(handshake_latency.clone()))
|
||||
.expect("register");
|
||||
|
||||
Self {
|
||||
active_connections,
|
||||
frames_bridged,
|
||||
auth_failures,
|
||||
handshake_latency,
|
||||
registry,
|
||||
}
|
||||
}
|
||||
|
||||
/// Encode all metrics as Prometheus text exposition format.
|
||||
pub fn gather(&self) -> String {
|
||||
let encoder = TextEncoder::new();
|
||||
let metric_families = self.registry.gather();
|
||||
let mut buf = Vec::new();
|
||||
encoder.encode(&metric_families, &mut buf).unwrap();
|
||||
String::from_utf8(buf).unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
/// Axum handler that returns Prometheus text metrics.
|
||||
pub async fn metrics_handler(
|
||||
axum::extract::State(state): axum::extract::State<super::AppState>,
|
||||
) -> String {
|
||||
state.metrics.gather()
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn web_metrics_register() {
|
||||
let m = WebMetrics::new();
|
||||
// Touch CounterVec labels so they appear in output
|
||||
m.frames_bridged.with_label_values(&["up"]);
|
||||
m.frames_bridged.with_label_values(&["down"]);
|
||||
let output = m.gather();
|
||||
assert!(
|
||||
output.contains("wzp_web_active_connections"),
|
||||
"missing active_connections"
|
||||
);
|
||||
assert!(
|
||||
output.contains("wzp_web_frames_bridged_total"),
|
||||
"missing frames_bridged"
|
||||
);
|
||||
assert!(
|
||||
output.contains("wzp_web_auth_failures_total"),
|
||||
"missing auth_failures"
|
||||
);
|
||||
assert!(
|
||||
output.contains("wzp_web_handshake_latency_seconds"),
|
||||
"missing handshake_latency"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn web_metrics_track_connections() {
|
||||
let m = WebMetrics::new();
|
||||
assert_eq!(m.active_connections.get(), 0);
|
||||
|
||||
m.active_connections.inc();
|
||||
m.active_connections.inc();
|
||||
assert_eq!(m.active_connections.get(), 2);
|
||||
|
||||
m.active_connections.dec();
|
||||
assert_eq!(m.active_connections.get(), 1);
|
||||
|
||||
let output = m.gather();
|
||||
assert!(output.contains("wzp_web_active_connections 1"));
|
||||
}
|
||||
}
|
||||
142
crates/wzp-web/static/audio-processor.js
Normal file
142
crates/wzp-web/static/audio-processor.js
Normal file
@@ -0,0 +1,142 @@
|
||||
// WarzonePhone AudioWorklet processors.
|
||||
// Both capture and playback handle 960-sample frames (20ms @ 48kHz).
|
||||
// AudioWorklet calls process() with 128-sample blocks, so we buffer internally.
|
||||
|
||||
const FRAME_SIZE = 960;
|
||||
|
||||
class WZPCaptureProcessor extends AudioWorkletProcessor {
|
||||
constructor() {
|
||||
super();
|
||||
// Pre-allocate ring buffer large enough for several frames
|
||||
this._ring = new Float32Array(FRAME_SIZE * 4);
|
||||
this._writePos = 0;
|
||||
}
|
||||
|
||||
process(inputs, _outputs, _parameters) {
|
||||
const input = inputs[0];
|
||||
if (!input || !input[0]) return true;
|
||||
|
||||
const samples = input[0]; // Float32Array, 128 samples typically
|
||||
const len = samples.length;
|
||||
|
||||
// Write into ring buffer
|
||||
if (this._writePos + len > this._ring.length) {
|
||||
// Should not happen with FRAME_SIZE * 4 capacity and timely draining,
|
||||
// but handle gracefully by resizing
|
||||
const bigger = new Float32Array(this._ring.length * 2);
|
||||
bigger.set(this._ring.subarray(0, this._writePos));
|
||||
this._ring = bigger;
|
||||
}
|
||||
this._ring.set(samples, this._writePos);
|
||||
this._writePos += len;
|
||||
|
||||
// Drain complete 960-sample frames
|
||||
while (this._writePos >= FRAME_SIZE) {
|
||||
// Convert Float32 -> Int16 PCM
|
||||
const pcm = new Int16Array(FRAME_SIZE);
|
||||
for (let i = 0; i < FRAME_SIZE; i++) {
|
||||
const s = this._ring[i];
|
||||
pcm[i] = s < -1 ? -32768 : s > 1 ? 32767 : (s * 32767) | 0;
|
||||
}
|
||||
|
||||
// Shift remaining data forward
|
||||
this._writePos -= FRAME_SIZE;
|
||||
if (this._writePos > 0) {
|
||||
this._ring.copyWithin(0, FRAME_SIZE, FRAME_SIZE + this._writePos);
|
||||
}
|
||||
|
||||
// Send the Int16 PCM buffer (1920 bytes) to the main thread
|
||||
this.port.postMessage(pcm.buffer, [pcm.buffer]);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
class WZPPlaybackProcessor extends AudioWorkletProcessor {
|
||||
constructor() {
|
||||
super();
|
||||
// Ring buffer for decoded Float32 samples ready for output
|
||||
this._ring = new Float32Array(FRAME_SIZE * 8);
|
||||
this._readPos = 0;
|
||||
this._writePos = 0;
|
||||
this._maxBuffered = FRAME_SIZE * 6; // ~120ms max to prevent drift
|
||||
|
||||
this.port.onmessage = (e) => {
|
||||
// Receive Int16 PCM from main thread, convert to Float32
|
||||
const pcm = new Int16Array(e.data);
|
||||
const len = pcm.length;
|
||||
|
||||
// Check capacity
|
||||
let available = this._writePos - this._readPos;
|
||||
if (available < 0) available += this._ring.length;
|
||||
if (available + len > this._maxBuffered) {
|
||||
// Too much buffered; drop oldest samples to prevent drift
|
||||
this._readPos = this._writePos;
|
||||
}
|
||||
|
||||
// Ensure ring buffer is big enough
|
||||
if (this._ring.length < len + available + 128) {
|
||||
const bigger = new Float32Array(this._ring.length * 2);
|
||||
// Copy existing data contiguously
|
||||
if (this._readPos <= this._writePos) {
|
||||
bigger.set(this._ring.subarray(this._readPos, this._writePos));
|
||||
} else {
|
||||
const firstPart = this._ring.subarray(this._readPos);
|
||||
const secondPart = this._ring.subarray(0, this._writePos);
|
||||
bigger.set(firstPart);
|
||||
bigger.set(secondPart, firstPart.length);
|
||||
}
|
||||
this._ring = bigger;
|
||||
const count = available;
|
||||
this._readPos = 0;
|
||||
this._writePos = count;
|
||||
}
|
||||
|
||||
// Write converted samples into ring buffer linearly (simpler: use linear buffer)
|
||||
for (let i = 0; i < len; i++) {
|
||||
this._ring[this._writePos] = pcm[i] / 32768.0;
|
||||
this._writePos++;
|
||||
if (this._writePos >= this._ring.length) this._writePos = 0;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
process(_inputs, outputs, _parameters) {
|
||||
const output = outputs[0];
|
||||
if (!output || !output[0]) return true;
|
||||
|
||||
const out = output[0]; // 128 samples typically
|
||||
const needed = out.length;
|
||||
|
||||
let available;
|
||||
if (this._writePos >= this._readPos) {
|
||||
available = this._writePos - this._readPos;
|
||||
} else {
|
||||
available = this._ring.length - this._readPos + this._writePos;
|
||||
}
|
||||
|
||||
if (available >= needed) {
|
||||
for (let i = 0; i < needed; i++) {
|
||||
out[i] = this._ring[this._readPos];
|
||||
this._readPos++;
|
||||
if (this._readPos >= this._ring.length) this._readPos = 0;
|
||||
}
|
||||
} else {
|
||||
// Output what we have, zero-fill the rest (underrun)
|
||||
for (let i = 0; i < available; i++) {
|
||||
out[i] = this._ring[this._readPos];
|
||||
this._readPos++;
|
||||
if (this._readPos >= this._ring.length) this._readPos = 0;
|
||||
}
|
||||
for (let i = available; i < needed; i++) {
|
||||
out[i] = 0;
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
registerProcessor('wzp-capture-processor', WZPCaptureProcessor);
|
||||
registerProcessor('wzp-playback-processor', WZPPlaybackProcessor);
|
||||
348
crates/wzp-web/static/index.html
Normal file
348
crates/wzp-web/static/index.html
Normal file
@@ -0,0 +1,348 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
<title>WarzonePhone</title>
|
||||
<style>
|
||||
* { margin: 0; padding: 0; box-sizing: border-box; }
|
||||
body { font-family: -apple-system, BlinkMacSystemFont, sans-serif; background: #1a1a2e; color: #e0e0e0; display: flex; justify-content: center; align-items: center; min-height: 100vh; }
|
||||
.container { text-align: center; max-width: 420px; padding: 2rem; }
|
||||
h1 { font-size: 1.5rem; margin-bottom: 0.5rem; color: #00d4ff; }
|
||||
.subtitle { color: #888; font-size: 0.85rem; margin-bottom: 1.5rem; }
|
||||
.room-input { margin-bottom: 1.5rem; }
|
||||
.room-input input { background: #2a2a4a; border: 1px solid #444; color: #e0e0e0; padding: 0.6rem 1rem; font-size: 1rem; border-radius: 8px; width: 200px; text-align: center; }
|
||||
.room-input input:focus { outline: none; border-color: #00d4ff; }
|
||||
.room-input label { display: block; color: #888; font-size: 0.8rem; margin-bottom: 0.4rem; }
|
||||
#callBtn { background: #00d4ff; color: #1a1a2e; border: none; padding: 1rem 3rem; font-size: 1.2rem; border-radius: 50px; cursor: pointer; transition: all 0.2s; }
|
||||
#callBtn:hover { background: #00b8d4; transform: scale(1.05); }
|
||||
#callBtn.active { background: #ff4444; color: white; }
|
||||
#callBtn:disabled { background: #444; color: #888; cursor: not-allowed; transform: none; }
|
||||
.status { margin-top: 1.5rem; font-size: 0.9rem; color: #888; min-height: 1.5rem; }
|
||||
.stats { margin-top: 0.5rem; font-size: 0.75rem; color: #555; font-family: monospace; }
|
||||
.level { margin-top: 1rem; height: 6px; background: #333; border-radius: 3px; overflow: hidden; }
|
||||
.level-bar { height: 100%; background: #00d4ff; width: 0%; transition: width 50ms; }
|
||||
.controls { margin-top: 1rem; display: flex; gap: 0.5rem; justify-content: center; flex-wrap: wrap; }
|
||||
.controls label { font-size: 0.8rem; color: #888; cursor: pointer; display: flex; align-items: center; gap: 0.3rem; }
|
||||
.controls input[type="checkbox"] { accent-color: #00d4ff; }
|
||||
#pttBtn { display: none; background: #444; color: #e0e0e0; border: 2px solid #666; padding: 0.8rem 2rem; font-size: 1rem; border-radius: 12px; cursor: pointer; user-select: none; -webkit-user-select: none; touch-action: none; }
|
||||
#pttBtn.transmitting { background: #ff4444; border-color: #ff6666; color: white; }
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<div class="container">
|
||||
<h1>WarzonePhone</h1>
|
||||
<p class="subtitle">Lossy VoIP Protocol</p>
|
||||
<div class="room-input">
|
||||
<label for="room">Room</label>
|
||||
<input type="text" id="room" placeholder="enter room name" value="">
|
||||
</div>
|
||||
<button id="callBtn" onclick="toggleCall()">Connect</button>
|
||||
<div class="controls" id="controls" style="display:none;">
|
||||
<label><input type="checkbox" id="pttMode" onchange="togglePTT()"> Radio mode (push-to-talk)</label>
|
||||
</div>
|
||||
<button id="pttBtn">Hold to Talk</button>
|
||||
<div class="level"><div class="level-bar" id="levelBar"></div></div>
|
||||
<div class="status" id="status"></div>
|
||||
<div class="stats" id="stats"></div>
|
||||
</div>
|
||||
|
||||
<script>
|
||||
const SAMPLE_RATE = 48000;
|
||||
const FRAME_SIZE = 960;
|
||||
|
||||
let ws = null;
|
||||
let audioCtx = null;
|
||||
let mediaStream = null;
|
||||
let captureNode = null;
|
||||
let playbackNode = null;
|
||||
let active = false;
|
||||
let transmitting = true; // in open-mic mode, always transmitting
|
||||
let pttMode = false;
|
||||
let framesSent = 0;
|
||||
let framesRecv = 0;
|
||||
let startTime = 0;
|
||||
let statsInterval = null;
|
||||
|
||||
// Use room from URL path or input field
|
||||
function getRoom() {
|
||||
const path = location.pathname.replace(/^\//, '').replace(/\/$/, '');
|
||||
if (path && path !== 'index.html') return path;
|
||||
const hash = location.hash.replace('#', '');
|
||||
if (hash) return hash;
|
||||
return document.getElementById('room').value.trim() || 'default';
|
||||
}
|
||||
|
||||
// Pre-fill room input from URL on page load
|
||||
(function() {
|
||||
const path = location.pathname.replace(/^\//, '').replace(/\/$/, '');
|
||||
if (path && path !== 'index.html') {
|
||||
document.getElementById('room').value = path;
|
||||
}
|
||||
})();
|
||||
|
||||
function setStatus(msg) { document.getElementById('status').textContent = msg; }
|
||||
function setStats(msg) { document.getElementById('stats').textContent = msg; }
|
||||
|
||||
function toggleCall() {
|
||||
if (active) stopCall();
|
||||
else startCall();
|
||||
}
|
||||
|
||||
async function startCall() {
|
||||
const btn = document.getElementById('callBtn');
|
||||
const room = getRoom();
|
||||
if (!room) { setStatus('Enter a room name'); return; }
|
||||
|
||||
btn.disabled = true;
|
||||
setStatus('Requesting microphone...');
|
||||
|
||||
try {
|
||||
mediaStream = await navigator.mediaDevices.getUserMedia({
|
||||
audio: { sampleRate: SAMPLE_RATE, channelCount: 1, echoCancellation: true, noiseSuppression: true }
|
||||
});
|
||||
} catch(e) {
|
||||
setStatus('Mic access denied: ' + e.message);
|
||||
btn.disabled = false;
|
||||
return;
|
||||
}
|
||||
|
||||
audioCtx = new AudioContext({ sampleRate: SAMPLE_RATE });
|
||||
|
||||
// Connect WebSocket with room name
|
||||
const proto = location.protocol === 'https:' ? 'wss:' : 'ws:';
|
||||
const wsUrl = proto + '//' + location.host + '/ws/' + encodeURIComponent(room);
|
||||
setStatus('Connecting to room: ' + room + '...');
|
||||
|
||||
ws = new WebSocket(wsUrl);
|
||||
ws.binaryType = 'arraybuffer';
|
||||
|
||||
ws.onopen = async () => {
|
||||
setStatus('Connected to room: ' + room);
|
||||
btn.textContent = 'Disconnect';
|
||||
btn.classList.add('active');
|
||||
btn.disabled = false;
|
||||
active = true;
|
||||
framesSent = 0;
|
||||
framesRecv = 0;
|
||||
startTime = Date.now();
|
||||
showControls(true);
|
||||
await startAudioCapture();
|
||||
await startAudioPlayback();
|
||||
startStatsUpdate();
|
||||
};
|
||||
|
||||
ws.onmessage = (event) => {
|
||||
const pcmData = new Int16Array(event.data);
|
||||
framesRecv++;
|
||||
playAudio(pcmData);
|
||||
};
|
||||
|
||||
ws.onclose = () => {
|
||||
if (active) {
|
||||
setStatus('Disconnected — reconnecting to ' + room + '...');
|
||||
setTimeout(() => { if (active) { cleanupAudio(); startCall(); } }, 1000);
|
||||
} else {
|
||||
setStatus('Disconnected');
|
||||
}
|
||||
};
|
||||
|
||||
ws.onerror = () => {
|
||||
if (active) {
|
||||
setStatus('Error — reconnecting...');
|
||||
setTimeout(() => { if (active) { cleanupAudio(); startCall(); } }, 1000);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
function stopCall() {
|
||||
active = false;
|
||||
const btn = document.getElementById('callBtn');
|
||||
btn.textContent = 'Connect';
|
||||
btn.classList.remove('active');
|
||||
btn.disabled = false;
|
||||
showControls(false);
|
||||
cleanupAudio();
|
||||
if (ws) { ws.close(); ws = null; }
|
||||
if (statsInterval) { clearInterval(statsInterval); statsInterval = null; }
|
||||
setStatus('');
|
||||
setStats('');
|
||||
}
|
||||
|
||||
function cleanupAudio() {
|
||||
if (captureNode) { captureNode.disconnect(); captureNode = null; }
|
||||
if (playbackNode) { playbackNode.disconnect(); playbackNode = null; }
|
||||
if (audioCtx) { audioCtx.close(); audioCtx = null; workletLoaded = false; }
|
||||
if (mediaStream) { mediaStream.getTracks().forEach(t => t.stop()); mediaStream = null; }
|
||||
}
|
||||
|
||||
let workletLoaded = false;
|
||||
|
||||
async function loadWorkletModule() {
|
||||
if (workletLoaded) return true;
|
||||
if (typeof AudioWorkletNode === 'undefined' || !audioCtx.audioWorklet) {
|
||||
console.warn('AudioWorklet API not supported in this browser — using ScriptProcessorNode fallback');
|
||||
return false;
|
||||
}
|
||||
try {
|
||||
await audioCtx.audioWorklet.addModule('audio-processor.js');
|
||||
workletLoaded = true;
|
||||
return true;
|
||||
} catch(e) {
|
||||
console.warn('AudioWorklet module failed to load — using ScriptProcessorNode fallback:', e);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
async function startAudioCapture() {
|
||||
const source = audioCtx.createMediaStreamSource(mediaStream);
|
||||
const hasWorklet = await loadWorkletModule();
|
||||
|
||||
if (hasWorklet) {
|
||||
captureNode = new AudioWorkletNode(audioCtx, 'wzp-capture-processor');
|
||||
captureNode.port.onmessage = (e) => {
|
||||
if (!active || !ws || ws.readyState !== WebSocket.OPEN || !transmitting) return;
|
||||
ws.send(e.data);
|
||||
framesSent++;
|
||||
|
||||
// Level meter from the PCM data
|
||||
const pcm = new Int16Array(e.data);
|
||||
let max = 0;
|
||||
for (let i = 0; i < pcm.length; i += 16) max = Math.max(max, Math.abs(pcm[i]));
|
||||
document.getElementById('levelBar').style.width = (max / 32768 * 100) + '%';
|
||||
};
|
||||
source.connect(captureNode);
|
||||
captureNode.connect(audioCtx.destination); // needed to keep worklet alive
|
||||
} else {
|
||||
// Fallback to ScriptProcessorNode (deprecated but widely supported)
|
||||
console.warn('Capture: using ScriptProcessorNode fallback');
|
||||
captureNode = audioCtx.createScriptProcessor(4096, 1, 1);
|
||||
let acc = new Float32Array(0);
|
||||
captureNode.onaudioprocess = (ev) => {
|
||||
if (!active || !ws || ws.readyState !== WebSocket.OPEN || !transmitting) return;
|
||||
const input = ev.inputBuffer.getChannelData(0);
|
||||
const n = new Float32Array(acc.length + input.length);
|
||||
n.set(acc); n.set(input, acc.length); acc = n;
|
||||
while (acc.length >= FRAME_SIZE) {
|
||||
const frame = acc.slice(0, FRAME_SIZE); acc = acc.slice(FRAME_SIZE);
|
||||
const pcm = new Int16Array(FRAME_SIZE);
|
||||
for (let i = 0; i < FRAME_SIZE; i++) pcm[i] = Math.max(-32768, Math.min(32767, Math.round(frame[i] * 32767)));
|
||||
let max = 0;
|
||||
for (let i = 0; i < pcm.length; i += 16) max = Math.max(max, Math.abs(pcm[i]));
|
||||
document.getElementById('levelBar').style.width = (max / 32768 * 100) + '%';
|
||||
ws.send(pcm.buffer);
|
||||
framesSent++;
|
||||
}
|
||||
};
|
||||
source.connect(captureNode);
|
||||
captureNode.connect(audioCtx.destination);
|
||||
}
|
||||
}
|
||||
|
||||
async function startAudioPlayback() {
|
||||
const hasWorklet = await loadWorkletModule();
|
||||
|
||||
if (hasWorklet) {
|
||||
playbackNode = new AudioWorkletNode(audioCtx, 'wzp-playback-processor');
|
||||
playbackNode.connect(audioCtx.destination);
|
||||
} else {
|
||||
console.warn('Playback: using scheduled BufferSource fallback');
|
||||
playbackNode = null; // will use createBufferSource fallback in playAudio()
|
||||
}
|
||||
}
|
||||
|
||||
let nextPlayTime = 0;
|
||||
|
||||
function playAudio(pcmInt16) {
|
||||
if (!audioCtx) return;
|
||||
|
||||
if (playbackNode && playbackNode.port) {
|
||||
// AudioWorklet path — send Int16 PCM directly to the worklet for conversion
|
||||
playbackNode.port.postMessage(pcmInt16.buffer, [pcmInt16.buffer]);
|
||||
} else {
|
||||
// Fallback: scheduled BufferSource (convert Int16 -> Float32 on main thread)
|
||||
const floatData = new Float32Array(pcmInt16.length);
|
||||
for (let i = 0; i < pcmInt16.length; i++) {
|
||||
floatData[i] = pcmInt16[i] / 32768.0;
|
||||
}
|
||||
const buffer = audioCtx.createBuffer(1, floatData.length, SAMPLE_RATE);
|
||||
buffer.getChannelData(0).set(floatData);
|
||||
const source = audioCtx.createBufferSource();
|
||||
source.buffer = buffer;
|
||||
source.connect(audioCtx.destination);
|
||||
const now = audioCtx.currentTime;
|
||||
if (nextPlayTime < now || nextPlayTime > now + 1.0) {
|
||||
nextPlayTime = now + 0.02;
|
||||
}
|
||||
source.start(nextPlayTime);
|
||||
nextPlayTime += buffer.duration;
|
||||
}
|
||||
}
|
||||
|
||||
function startStatsUpdate() {
|
||||
statsInterval = setInterval(() => {
|
||||
if (!active) { clearInterval(statsInterval); return; }
|
||||
const elapsed = ((Date.now() - startTime) / 1000).toFixed(1);
|
||||
setStats(elapsed + 's | sent: ' + framesSent + ' | recv: ' + framesRecv);
|
||||
}, 1000);
|
||||
}
|
||||
|
||||
// --- Push-to-talk ---
|
||||
|
||||
function togglePTT() {
|
||||
pttMode = document.getElementById('pttMode').checked;
|
||||
const btn = document.getElementById('pttBtn');
|
||||
if (pttMode) {
|
||||
transmitting = false;
|
||||
btn.style.display = 'block';
|
||||
} else {
|
||||
transmitting = true;
|
||||
btn.style.display = 'none';
|
||||
}
|
||||
}
|
||||
|
||||
// PTT button — hold to talk (mouse + touch)
|
||||
document.getElementById('pttBtn').addEventListener('mousedown', () => { startTransmit(); });
|
||||
document.getElementById('pttBtn').addEventListener('mouseup', () => { stopTransmit(); });
|
||||
document.getElementById('pttBtn').addEventListener('mouseleave', () => { stopTransmit(); });
|
||||
document.getElementById('pttBtn').addEventListener('touchstart', (e) => { e.preventDefault(); startTransmit(); });
|
||||
document.getElementById('pttBtn').addEventListener('touchend', (e) => { e.preventDefault(); stopTransmit(); });
|
||||
|
||||
// Spacebar PTT
|
||||
document.addEventListener('keydown', (e) => { if (pttMode && active && e.code === 'Space' && !e.repeat) { e.preventDefault(); startTransmit(); } });
|
||||
document.addEventListener('keyup', (e) => { if (pttMode && active && e.code === 'Space') { e.preventDefault(); stopTransmit(); } });
|
||||
|
||||
function startTransmit() {
|
||||
if (!pttMode || !active) return;
|
||||
transmitting = true;
|
||||
document.getElementById('pttBtn').classList.add('transmitting');
|
||||
document.getElementById('pttBtn').textContent = 'Transmitting...';
|
||||
}
|
||||
|
||||
function stopTransmit() {
|
||||
if (!pttMode) return;
|
||||
transmitting = false;
|
||||
document.getElementById('pttBtn').classList.remove('transmitting');
|
||||
document.getElementById('pttBtn').textContent = 'Hold to Talk';
|
||||
}
|
||||
|
||||
// Show controls when connected
|
||||
function showControls(show) {
|
||||
document.getElementById('controls').style.display = show ? 'flex' : 'none';
|
||||
if (!show) {
|
||||
document.getElementById('pttBtn').style.display = 'none';
|
||||
pttMode = false;
|
||||
transmitting = true;
|
||||
}
|
||||
}
|
||||
|
||||
// Set room from URL on load
|
||||
window.addEventListener('load', () => {
|
||||
const room = getRoom();
|
||||
if (room && room !== 'default') {
|
||||
document.getElementById('room').value = room;
|
||||
}
|
||||
});
|
||||
</script>
|
||||
</body>
|
||||
</html>
|
||||
1
deps/featherchat
vendored
Submodule
1
deps/featherchat
vendored
Submodule
Submodule deps/featherchat added at 5764719375
677
docs/API.md
Normal file
677
docs/API.md
Normal file
@@ -0,0 +1,677 @@
|
||||
# WarzonePhone Crate API Reference
|
||||
|
||||
## wzp-proto
|
||||
|
||||
**Path**: `crates/wzp-proto/src/`
|
||||
|
||||
The protocol definition crate. Contains all shared types, trait interfaces, and core logic. No implementation dependencies -- this is the hub of the star dependency graph.
|
||||
|
||||
### Traits (`traits.rs`)
|
||||
|
||||
```rust
|
||||
/// Encodes PCM audio into compressed frames.
|
||||
pub trait AudioEncoder: Send + Sync {
|
||||
fn encode(&mut self, pcm: &[i16], out: &mut [u8]) -> Result<usize, CodecError>;
|
||||
fn codec_id(&self) -> CodecId;
|
||||
fn set_profile(&mut self, profile: QualityProfile) -> Result<(), CodecError>;
|
||||
fn max_frame_bytes(&self) -> usize;
|
||||
fn set_inband_fec(&mut self, _enabled: bool) {} // default no-op
|
||||
fn set_dtx(&mut self, _enabled: bool) {} // default no-op
|
||||
}
|
||||
|
||||
/// Decodes compressed frames back to PCM audio.
|
||||
pub trait AudioDecoder: Send + Sync {
|
||||
fn decode(&mut self, encoded: &[u8], pcm: &mut [i16]) -> Result<usize, CodecError>;
|
||||
fn decode_lost(&mut self, pcm: &mut [i16]) -> Result<usize, CodecError>;
|
||||
fn codec_id(&self) -> CodecId;
|
||||
fn set_profile(&mut self, profile: QualityProfile) -> Result<(), CodecError>;
|
||||
}
|
||||
|
||||
/// Encodes source symbols into FEC-protected blocks.
|
||||
pub trait FecEncoder: Send + Sync {
|
||||
fn add_source_symbol(&mut self, data: &[u8]) -> Result<(), FecError>;
|
||||
fn generate_repair(&mut self, ratio: f32) -> Result<Vec<(u8, Vec<u8>)>, FecError>;
|
||||
fn finalize_block(&mut self) -> Result<u8, FecError>;
|
||||
fn current_block_id(&self) -> u8;
|
||||
fn current_block_size(&self) -> usize;
|
||||
}
|
||||
|
||||
/// Decodes FEC-protected blocks, recovering lost source symbols.
|
||||
pub trait FecDecoder: Send + Sync {
|
||||
fn add_symbol(&mut self, block_id: u8, symbol_index: u8, is_repair: bool, data: &[u8]) -> Result<(), FecError>;
|
||||
fn try_decode(&mut self, block_id: u8) -> Result<Option<Vec<Vec<u8>>>, FecError>;
|
||||
fn expire_before(&mut self, block_id: u8);
|
||||
}
|
||||
|
||||
/// Per-call encryption session (symmetric, after key exchange).
|
||||
pub trait CryptoSession: Send + Sync {
|
||||
fn encrypt(&mut self, header_bytes: &[u8], plaintext: &[u8], out: &mut Vec<u8>) -> Result<(), CryptoError>;
|
||||
fn decrypt(&mut self, header_bytes: &[u8], ciphertext: &[u8], out: &mut Vec<u8>) -> Result<(), CryptoError>;
|
||||
fn initiate_rekey(&mut self) -> Result<[u8; 32], CryptoError>;
|
||||
fn complete_rekey(&mut self, peer_ephemeral_pub: &[u8; 32]) -> Result<(), CryptoError>;
|
||||
fn overhead(&self) -> usize { 16 } // ChaCha20-Poly1305 tag
|
||||
}
|
||||
|
||||
/// Key exchange using the Warzone identity model.
|
||||
pub trait KeyExchange: Send + Sync {
|
||||
fn from_identity_seed(seed: &[u8; 32]) -> Self where Self: Sized;
|
||||
fn generate_ephemeral(&mut self) -> [u8; 32];
|
||||
fn identity_public_key(&self) -> [u8; 32];
|
||||
fn fingerprint(&self) -> [u8; 16];
|
||||
fn sign(&self, data: &[u8]) -> Vec<u8>;
|
||||
fn verify(peer_identity_pub: &[u8; 32], data: &[u8], signature: &[u8]) -> bool where Self: Sized;
|
||||
fn derive_session(&self, peer_ephemeral_pub: &[u8; 32]) -> Result<Box<dyn CryptoSession>, CryptoError>;
|
||||
}
|
||||
|
||||
/// Transport layer for sending/receiving media and signaling.
|
||||
#[async_trait]
|
||||
pub trait MediaTransport: Send + Sync {
|
||||
async fn send_media(&self, packet: &MediaPacket) -> Result<(), TransportError>;
|
||||
async fn recv_media(&self) -> Result<Option<MediaPacket>, TransportError>;
|
||||
async fn send_signal(&self, msg: &SignalMessage) -> Result<(), TransportError>;
|
||||
async fn recv_signal(&self) -> Result<Option<SignalMessage>, TransportError>;
|
||||
fn path_quality(&self) -> PathQuality;
|
||||
async fn close(&self) -> Result<(), TransportError>;
|
||||
}
|
||||
|
||||
/// Wraps/unwraps packets for DPI evasion (Phase 2).
|
||||
pub trait ObfuscationLayer: Send + Sync {
|
||||
fn obfuscate(&mut self, data: &[u8], out: &mut Vec<u8>) -> Result<(), ObfuscationError>;
|
||||
fn deobfuscate(&mut self, data: &[u8], out: &mut Vec<u8>) -> Result<(), ObfuscationError>;
|
||||
}
|
||||
|
||||
/// Adaptive quality controller.
|
||||
pub trait QualityController: Send + Sync {
|
||||
fn observe(&mut self, report: &QualityReport) -> Option<QualityProfile>;
|
||||
fn force_profile(&mut self, profile: QualityProfile);
|
||||
fn current_profile(&self) -> QualityProfile;
|
||||
}
|
||||
```
|
||||
|
||||
### Wire Format Types (`packet.rs`)
|
||||
|
||||
```rust
|
||||
pub struct MediaHeader { /* 12 bytes */ }
|
||||
pub struct QualityReport { /* 4 bytes */ }
|
||||
pub struct MediaPacket { pub header: MediaHeader, pub payload: Bytes, pub quality_report: Option<QualityReport> }
|
||||
pub enum SignalMessage { CallOffer{..}, CallAnswer{..}, IceCandidate{..}, Rekey{..}, QualityUpdate{..}, Ping{..}, Pong{..}, Hangup{..} }
|
||||
pub enum HangupReason { Normal, Busy, Declined, Timeout, Error }
|
||||
```
|
||||
|
||||
Key methods:
|
||||
- `MediaHeader::write_to(&self, buf: &mut impl BufMut)` -- serialize to 12 bytes
|
||||
- `MediaHeader::read_from(buf: &mut impl Buf) -> Option<Self>` -- deserialize
|
||||
- `MediaHeader::encode_fec_ratio(ratio: f32) -> u8` -- float to 7-bit wire encoding
|
||||
- `MediaHeader::decode_fec_ratio(encoded: u8) -> f32` -- 7-bit wire to float
|
||||
- `MediaPacket::to_bytes(&self) -> Bytes` -- serialize complete packet
|
||||
- `MediaPacket::from_bytes(data: Bytes) -> Option<Self>` -- deserialize
|
||||
|
||||
### Codec Identifiers (`codec_id.rs`)
|
||||
|
||||
```rust
|
||||
pub enum CodecId { Opus24k = 0, Opus16k = 1, Opus6k = 2, Codec2_3200 = 3, Codec2_1200 = 4 }
|
||||
|
||||
pub struct QualityProfile {
|
||||
pub codec: CodecId,
|
||||
pub fec_ratio: f32,
|
||||
pub frame_duration_ms: u8,
|
||||
pub frames_per_block: u8,
|
||||
}
|
||||
```
|
||||
|
||||
Constants: `QualityProfile::GOOD`, `QualityProfile::DEGRADED`, `QualityProfile::CATASTROPHIC`
|
||||
|
||||
Key methods:
|
||||
- `CodecId::bitrate_bps(self) -> u32`
|
||||
- `CodecId::frame_duration_ms(self) -> u8`
|
||||
- `CodecId::sample_rate_hz(self) -> u32`
|
||||
- `CodecId::from_wire(val: u8) -> Option<Self>`
|
||||
- `CodecId::to_wire(self) -> u8`
|
||||
- `QualityProfile::total_bitrate_kbps(&self) -> f32`
|
||||
|
||||
### Quality Controller (`quality.rs`)
|
||||
|
||||
```rust
|
||||
pub enum Tier { Good, Degraded, Catastrophic }
|
||||
pub struct AdaptiveQualityController { /* ... */ }
|
||||
```
|
||||
|
||||
Key methods:
|
||||
- `AdaptiveQualityController::new() -> Self` -- starts at Tier::Good
|
||||
- `AdaptiveQualityController::tier(&self) -> Tier`
|
||||
- `Tier::classify(report: &QualityReport) -> Self`
|
||||
- `Tier::profile(self) -> QualityProfile`
|
||||
|
||||
### Jitter Buffer (`jitter.rs`)
|
||||
|
||||
```rust
|
||||
pub struct JitterBuffer { /* ... */ }
|
||||
pub struct JitterStats { pub packets_received: u64, pub packets_played: u64, pub packets_lost: u64, pub packets_late: u64, pub packets_duplicate: u64, pub current_depth: usize }
|
||||
pub enum PlayoutResult { Packet(MediaPacket), Missing { seq: u16 }, NotReady }
|
||||
```
|
||||
|
||||
Key methods:
|
||||
- `JitterBuffer::new(target_depth: usize, max_depth: usize, min_depth: usize) -> Self`
|
||||
- `JitterBuffer::default_5s() -> Self` -- target=50, max=250, min=25
|
||||
- `JitterBuffer::push(&mut self, packet: MediaPacket)`
|
||||
- `JitterBuffer::pop(&mut self) -> PlayoutResult`
|
||||
- `JitterBuffer::depth(&self) -> usize`
|
||||
- `JitterBuffer::stats(&self) -> &JitterStats`
|
||||
- `JitterBuffer::reset(&mut self)`
|
||||
- `JitterBuffer::set_target_depth(&mut self, depth: usize)`
|
||||
|
||||
### Session State Machine (`session.rs`)
|
||||
|
||||
```rust
|
||||
pub enum SessionState { Idle, Connecting, Handshaking, Active, Rekeying, Closed }
|
||||
pub enum SessionEvent { Initiate, Connected, HandshakeComplete, RekeyStart, RekeyComplete, Terminate{reason}, ConnectionLost }
|
||||
pub struct Session { /* ... */ }
|
||||
```
|
||||
|
||||
Key methods:
|
||||
- `Session::new(session_id: [u8; 16]) -> Self`
|
||||
- `Session::state(&self) -> SessionState`
|
||||
- `Session::transition(&mut self, event: SessionEvent, now_ms: u64) -> Result<SessionState, TransitionError>`
|
||||
- `Session::is_media_active(&self) -> bool` -- true for Active and Rekeying
|
||||
|
||||
### Error Types (`error.rs`)
|
||||
|
||||
```rust
|
||||
pub enum CodecError { EncodeFailed(String), DecodeFailed(String), UnsupportedTransition{from, to} }
|
||||
pub enum FecError { BlockFull{max}, InsufficientSymbols{needed, have}, InvalidBlock(u8), Internal(String) }
|
||||
pub enum CryptoError { DecryptionFailed, InvalidPublicKey, RekeyFailed(String), ReplayDetected{seq}, Internal(String) }
|
||||
pub enum TransportError { ConnectionLost, DatagramTooLarge{size, max}, Timeout{ms}, Io(io::Error), Internal(String) }
|
||||
pub enum ObfuscationError { Failed(String), InvalidFraming }
|
||||
```
|
||||
|
||||
### PathQuality (`traits.rs`)
|
||||
|
||||
```rust
|
||||
pub struct PathQuality {
|
||||
pub loss_pct: f32, // 0.0-100.0
|
||||
pub rtt_ms: u32,
|
||||
pub jitter_ms: u32,
|
||||
pub bandwidth_kbps: u32,
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## wzp-codec
|
||||
|
||||
**Path**: `crates/wzp-codec/src/`
|
||||
|
||||
### Factory Functions (`lib.rs`)
|
||||
|
||||
```rust
|
||||
/// Create an adaptive encoder (accepts 48 kHz PCM, handles resampling for Codec2).
|
||||
pub fn create_encoder(profile: QualityProfile) -> Box<dyn AudioEncoder>
|
||||
|
||||
/// Create an adaptive decoder (outputs 48 kHz PCM, handles upsampling from Codec2).
|
||||
pub fn create_decoder(profile: QualityProfile) -> Box<dyn AudioDecoder>
|
||||
```
|
||||
|
||||
### Public Types
|
||||
|
||||
```rust
|
||||
pub struct AdaptiveEncoder { /* wraps OpusEncoder + Codec2Encoder */ }
|
||||
pub struct AdaptiveDecoder { /* wraps OpusDecoder + Codec2Decoder */ }
|
||||
pub struct OpusEncoder { /* audiopus::coder::Encoder wrapper */ }
|
||||
pub struct OpusDecoder { /* audiopus::coder::Decoder wrapper */ }
|
||||
pub struct Codec2Encoder { /* codec2::Codec2 wrapper */ }
|
||||
pub struct Codec2Decoder { /* codec2::Codec2 wrapper */ }
|
||||
```
|
||||
|
||||
Key methods on concrete types:
|
||||
- `OpusEncoder::new(profile: QualityProfile) -> Result<Self, CodecError>`
|
||||
- `OpusEncoder::frame_samples(&self) -> usize` -- 960 for 20ms, 1920 for 40ms
|
||||
- `Codec2Encoder::new(profile: QualityProfile) -> Result<Self, CodecError>`
|
||||
- `Codec2Encoder::frame_samples(&self) -> usize` -- 160 for 20ms/3200bps, 320 for 40ms/1200bps
|
||||
|
||||
### Resampler (`resample.rs`)
|
||||
|
||||
```rust
|
||||
pub fn resample_48k_to_8k(input: &[i16]) -> Vec<i16> // 6:1 decimation with box filter
|
||||
pub fn resample_8k_to_48k(input: &[i16]) -> Vec<i16> // 1:6 linear interpolation
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## wzp-fec
|
||||
|
||||
**Path**: `crates/wzp-fec/src/`
|
||||
|
||||
### Factory Functions (`lib.rs`)
|
||||
|
||||
```rust
|
||||
/// Create an encoder/decoder pair configured for the given quality profile.
|
||||
pub fn create_fec_pair(profile: &QualityProfile) -> (RaptorQFecEncoder, RaptorQFecDecoder)
|
||||
|
||||
/// Create an encoder configured for the given quality profile.
|
||||
pub fn create_encoder(profile: &QualityProfile) -> RaptorQFecEncoder
|
||||
|
||||
/// Create a decoder configured for the given quality profile.
|
||||
pub fn create_decoder(profile: &QualityProfile) -> RaptorQFecDecoder
|
||||
```
|
||||
|
||||
### RaptorQFecEncoder (`encoder.rs`)
|
||||
|
||||
```rust
|
||||
pub struct RaptorQFecEncoder { /* block_id, frames_per_block, source_symbols, symbol_size */ }
|
||||
```
|
||||
|
||||
Key methods:
|
||||
- `RaptorQFecEncoder::new(frames_per_block: usize, symbol_size: u16) -> Self`
|
||||
- `RaptorQFecEncoder::with_defaults(frames_per_block: usize) -> Self` -- symbol_size=256
|
||||
- Implements `FecEncoder` trait
|
||||
|
||||
### RaptorQFecDecoder (`decoder.rs`)
|
||||
|
||||
```rust
|
||||
pub struct RaptorQFecDecoder { /* blocks: HashMap<u8, BlockState>, symbol_size, frames_per_block */ }
|
||||
```
|
||||
|
||||
Key methods:
|
||||
- `RaptorQFecDecoder::new(frames_per_block: usize, symbol_size: u16) -> Self`
|
||||
- `RaptorQFecDecoder::with_defaults(frames_per_block: usize) -> Self`
|
||||
- Implements `FecDecoder` trait
|
||||
|
||||
### Interleaver (`interleave.rs`)
|
||||
|
||||
```rust
|
||||
pub type Symbol = (u8, u8, bool, Vec<u8>); // (block_id, symbol_index, is_repair, data)
|
||||
pub struct Interleaver { depth: usize }
|
||||
```
|
||||
|
||||
Key methods:
|
||||
- `Interleaver::new(depth: usize) -> Self`
|
||||
- `Interleaver::with_default_depth() -> Self` -- depth=3
|
||||
- `Interleaver::interleave(&self, blocks: &[Vec<Symbol>]) -> Vec<Symbol>`
|
||||
- `Interleaver::depth(&self) -> usize`
|
||||
|
||||
### AdaptiveFec (`adaptive.rs`)
|
||||
|
||||
```rust
|
||||
pub struct AdaptiveFec { pub frames_per_block: usize, pub repair_ratio: f32, pub symbol_size: u16 }
|
||||
```
|
||||
|
||||
Key methods:
|
||||
- `AdaptiveFec::from_profile(profile: &QualityProfile) -> Self`
|
||||
- `AdaptiveFec::build_encoder(&self) -> RaptorQFecEncoder`
|
||||
- `AdaptiveFec::ratio(&self) -> f32`
|
||||
- `AdaptiveFec::overhead_factor(&self) -> f32` -- 1.0 + repair_ratio
|
||||
|
||||
### Block Managers (`block_manager.rs`)
|
||||
|
||||
```rust
|
||||
pub enum EncoderBlockState { Building, Pending, Sent, Acknowledged }
|
||||
pub enum DecoderBlockState { Assembling, Complete, Expired }
|
||||
pub struct EncoderBlockManager { /* ... */ }
|
||||
pub struct DecoderBlockManager { /* ... */ }
|
||||
```
|
||||
|
||||
Key methods:
|
||||
- `EncoderBlockManager::next_block_id(&mut self) -> u8`
|
||||
- `EncoderBlockManager::mark_sent(&mut self, block_id: u8)`
|
||||
- `EncoderBlockManager::mark_acknowledged(&mut self, block_id: u8)`
|
||||
- `DecoderBlockManager::touch(&mut self, block_id: u8)`
|
||||
- `DecoderBlockManager::mark_complete(&mut self, block_id: u8)`
|
||||
- `DecoderBlockManager::expire_before(&mut self, block_id: u8)`
|
||||
|
||||
### Helper Functions (`encoder.rs`)
|
||||
|
||||
```rust
|
||||
/// Build source EncodingPackets for a given block (for testing/interleaving).
|
||||
pub fn source_packets_for_block(block_id: u8, symbols: &[Vec<u8>], symbol_size: u16) -> Vec<EncodingPacket>
|
||||
|
||||
/// Generate repair packets for the given source symbols.
|
||||
pub fn repair_packets_for_block(block_id: u8, symbols: &[Vec<u8>], symbol_size: u16, ratio: f32) -> Vec<EncodingPacket>
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## wzp-crypto
|
||||
|
||||
**Path**: `crates/wzp-crypto/src/`
|
||||
|
||||
### Re-exports (`lib.rs`)
|
||||
|
||||
```rust
|
||||
pub use anti_replay::AntiReplayWindow;
|
||||
pub use handshake::WarzoneKeyExchange;
|
||||
pub use nonce::{build_nonce, Direction};
|
||||
pub use rekey::RekeyManager;
|
||||
pub use session::ChaChaSession;
|
||||
pub use wzp_proto::{CryptoError, CryptoSession, KeyExchange};
|
||||
```
|
||||
|
||||
### WarzoneKeyExchange (`handshake.rs`)
|
||||
|
||||
```rust
|
||||
pub struct WarzoneKeyExchange { /* signing_key, x25519_static, ephemeral_secret */ }
|
||||
```
|
||||
|
||||
Implements `KeyExchange` trait. Key derivation:
|
||||
- Ed25519: `HKDF(seed, "warzone-ed25519-identity")`
|
||||
- X25519: `HKDF(seed, "warzone-x25519-identity")`
|
||||
- Session: `HKDF(X25519_DH_shared_secret, "warzone-session-key")`
|
||||
|
||||
### ChaChaSession (`session.rs`)
|
||||
|
||||
```rust
|
||||
pub struct ChaChaSession { /* cipher, session_id, send_seq, recv_seq, rekey_mgr, pending_rekey_secret */ }
|
||||
```
|
||||
|
||||
Key methods:
|
||||
- `ChaChaSession::new(shared_secret: [u8; 32]) -> Self`
|
||||
- Implements `CryptoSession` trait
|
||||
|
||||
### AntiReplayWindow (`anti_replay.rs`)
|
||||
|
||||
```rust
|
||||
pub struct AntiReplayWindow { /* highest: u16, bitmap: Vec<u64>, initialized: bool */ }
|
||||
```
|
||||
|
||||
Key methods:
|
||||
- `AntiReplayWindow::new() -> Self` -- 1024-packet window
|
||||
- `AntiReplayWindow::check_and_update(&mut self, seq: u16) -> Result<(), CryptoError>`
|
||||
|
||||
### Nonce Construction (`nonce.rs`)
|
||||
|
||||
```rust
|
||||
pub enum Direction { Send = 0, Recv = 1 }
|
||||
pub fn build_nonce(session_id: &[u8; 4], seq: u32, direction: Direction) -> [u8; 12]
|
||||
```
|
||||
|
||||
### RekeyManager (`rekey.rs`)
|
||||
|
||||
```rust
|
||||
pub struct RekeyManager { /* current_key, last_rekey_at */ }
|
||||
```
|
||||
|
||||
Key methods:
|
||||
- `RekeyManager::new(initial_key: [u8; 32]) -> Self`
|
||||
- `RekeyManager::should_rekey(&self, packet_count: u64) -> bool` -- every 2^16 packets
|
||||
- `RekeyManager::perform_rekey(&mut self, new_peer_pub: &[u8; 32], our_new_secret: StaticSecret, packet_count: u64) -> [u8; 32]`
|
||||
|
||||
---
|
||||
|
||||
## wzp-transport
|
||||
|
||||
**Path**: `crates/wzp-transport/src/`
|
||||
|
||||
### Re-exports (`lib.rs`)
|
||||
|
||||
```rust
|
||||
pub use config::{client_config, server_config};
|
||||
pub use connection::{accept, connect, create_endpoint};
|
||||
pub use path_monitor::PathMonitor;
|
||||
pub use quic::QuinnTransport;
|
||||
pub use wzp_proto::{MediaTransport, PathQuality, TransportError};
|
||||
```
|
||||
|
||||
### QuinnTransport (`quic.rs`)
|
||||
|
||||
```rust
|
||||
pub struct QuinnTransport { /* connection: quinn::Connection, path_monitor: Mutex<PathMonitor> */ }
|
||||
```
|
||||
|
||||
Key methods:
|
||||
- `QuinnTransport::new(connection: quinn::Connection) -> Self`
|
||||
- `QuinnTransport::connection(&self) -> &quinn::Connection`
|
||||
- `QuinnTransport::max_datagram_size(&self) -> Option<usize>`
|
||||
- Implements `MediaTransport` trait
|
||||
|
||||
### Configuration (`config.rs`)
|
||||
|
||||
```rust
|
||||
/// Create a server configuration with a self-signed certificate.
|
||||
pub fn server_config() -> (quinn::ServerConfig, Vec<u8>)
|
||||
|
||||
/// Create a client configuration that trusts any certificate (testing).
|
||||
pub fn client_config() -> quinn::ClientConfig
|
||||
```
|
||||
|
||||
QUIC parameters: ALPN `wzp`, 30s idle timeout, 5s keepalive, 256KB receive window, 128KB send window, 300ms initial RTT.
|
||||
|
||||
### Connection Lifecycle (`connection.rs`)
|
||||
|
||||
```rust
|
||||
pub fn create_endpoint(bind_addr: SocketAddr, server_config: Option<quinn::ServerConfig>) -> Result<quinn::Endpoint, TransportError>
|
||||
pub async fn connect(endpoint: &quinn::Endpoint, addr: SocketAddr, server_name: &str, config: quinn::ClientConfig) -> Result<quinn::Connection, TransportError>
|
||||
pub async fn accept(endpoint: &quinn::Endpoint) -> Result<quinn::Connection, TransportError>
|
||||
```
|
||||
|
||||
### PathMonitor (`path_monitor.rs`)
|
||||
|
||||
```rust
|
||||
pub struct PathMonitor { /* EWMA state for loss, RTT, jitter, bandwidth */ }
|
||||
```
|
||||
|
||||
Key methods:
|
||||
- `PathMonitor::new() -> Self`
|
||||
- `PathMonitor::observe_sent(&mut self, seq: u16, timestamp_ms: u64)`
|
||||
- `PathMonitor::observe_received(&mut self, seq: u16, timestamp_ms: u64)`
|
||||
- `PathMonitor::observe_rtt(&mut self, rtt_ms: u32)`
|
||||
- `PathMonitor::quality(&self) -> PathQuality`
|
||||
|
||||
### Datagram Helpers (`datagram.rs`)
|
||||
|
||||
```rust
|
||||
pub fn serialize_media(packet: &MediaPacket) -> Bytes
|
||||
pub fn deserialize_media(data: Bytes) -> Option<MediaPacket>
|
||||
pub fn max_datagram_payload(connection: &quinn::Connection) -> Option<usize>
|
||||
```
|
||||
|
||||
### Reliable Stream Framing (`reliable.rs`)
|
||||
|
||||
```rust
|
||||
pub async fn send_signal(connection: &Connection, msg: &SignalMessage) -> Result<(), TransportError>
|
||||
pub async fn recv_signal(recv: &mut quinn::RecvStream) -> Result<SignalMessage, TransportError>
|
||||
```
|
||||
|
||||
Framing: 4-byte big-endian length prefix + serde_json payload. Max message size: 1 MB.
|
||||
|
||||
---
|
||||
|
||||
## wzp-relay
|
||||
|
||||
**Path**: `crates/wzp-relay/src/`
|
||||
|
||||
### Re-exports (`lib.rs`)
|
||||
|
||||
```rust
|
||||
pub use config::RelayConfig;
|
||||
pub use handshake::accept_handshake;
|
||||
pub use pipeline::{PipelineConfig, PipelineStats, RelayPipeline};
|
||||
pub use session_mgr::{RelaySession, SessionId, SessionManager};
|
||||
```
|
||||
|
||||
### RoomManager (`room.rs`)
|
||||
|
||||
```rust
|
||||
pub type ParticipantId = u64;
|
||||
pub struct RoomManager { /* rooms: HashMap<String, Room> */ }
|
||||
```
|
||||
|
||||
Key methods:
|
||||
- `RoomManager::new() -> Self`
|
||||
- `RoomManager::join(&mut self, room_name: &str, addr: SocketAddr, transport: Arc<QuinnTransport>) -> ParticipantId`
|
||||
- `RoomManager::leave(&mut self, room_name: &str, participant_id: ParticipantId)`
|
||||
- `RoomManager::others(&self, room_name: &str, participant_id: ParticipantId) -> Vec<Arc<QuinnTransport>>`
|
||||
- `RoomManager::room_size(&self, room_name: &str) -> usize`
|
||||
- `RoomManager::list(&self) -> Vec<(String, usize)>`
|
||||
|
||||
```rust
|
||||
/// Run the receive loop for one participant in a room (forwards to all others).
|
||||
pub async fn run_participant(room_mgr: Arc<Mutex<RoomManager>>, room_name: String, participant_id: ParticipantId, transport: Arc<QuinnTransport>)
|
||||
```
|
||||
|
||||
### RelayPipeline (`pipeline.rs`)
|
||||
|
||||
```rust
|
||||
pub struct PipelineConfig { pub initial_profile: QualityProfile, pub jitter_target: usize, pub jitter_max: usize, pub jitter_min: usize }
|
||||
pub struct PipelineStats { pub packets_received: u64, pub packets_forwarded: u64, pub packets_fec_recovered: u64, pub packets_lost: u64, pub profile_changes: u64 }
|
||||
pub struct RelayPipeline { /* fec_encoder, fec_decoder, jitter, quality, profile, out_seq, stats */ }
|
||||
```
|
||||
|
||||
Key methods:
|
||||
- `RelayPipeline::new(config: PipelineConfig) -> Self`
|
||||
- `RelayPipeline::ingest(&mut self, packet: MediaPacket) -> Vec<MediaPacket>` -- FEC decode + jitter pop
|
||||
- `RelayPipeline::prepare_outbound(&mut self, packet: MediaPacket) -> Vec<MediaPacket>` -- assign seq + FEC encode
|
||||
- `RelayPipeline::stats(&self) -> &PipelineStats`
|
||||
- `RelayPipeline::profile(&self) -> QualityProfile`
|
||||
|
||||
### SessionManager (`session_mgr.rs`)
|
||||
|
||||
```rust
|
||||
pub type SessionId = [u8; 16];
|
||||
pub struct RelaySession { pub state: Session, pub upstream_pipeline: RelayPipeline, pub downstream_pipeline: RelayPipeline, pub profile: QualityProfile, pub last_activity_ms: u64 }
|
||||
pub struct SessionManager { /* sessions: HashMap<SessionId, RelaySession>, max_sessions */ }
|
||||
```
|
||||
|
||||
Key methods:
|
||||
- `SessionManager::new(max_sessions: usize) -> Self`
|
||||
- `SessionManager::create_session(&mut self, session_id: SessionId, config: PipelineConfig) -> Option<&mut RelaySession>`
|
||||
- `SessionManager::get_session(&mut self, id: &SessionId) -> Option<&mut RelaySession>`
|
||||
- `SessionManager::remove_session(&mut self, id: &SessionId) -> Option<RelaySession>`
|
||||
- `SessionManager::expire_idle(&mut self, now_ms: u64, timeout_ms: u64) -> usize`
|
||||
|
||||
### Handshake (`handshake.rs`)
|
||||
|
||||
```rust
|
||||
/// Accept the relay (callee) side of the cryptographic handshake.
|
||||
pub async fn accept_handshake(transport: &dyn MediaTransport, seed: &[u8; 32]) -> Result<(Box<dyn CryptoSession>, QualityProfile), anyhow::Error>
|
||||
```
|
||||
|
||||
### RelayConfig (`config.rs`)
|
||||
|
||||
```rust
|
||||
pub struct RelayConfig {
|
||||
pub listen_addr: SocketAddr, // default: 0.0.0.0:4433
|
||||
pub remote_relay: Option<SocketAddr>, // None = room mode
|
||||
pub max_sessions: usize, // default: 100
|
||||
pub jitter_target_depth: usize, // default: 50
|
||||
pub jitter_max_depth: usize, // default: 250
|
||||
pub log_level: String, // default: "info"
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## wzp-client
|
||||
|
||||
**Path**: `crates/wzp-client/src/`
|
||||
|
||||
### Re-exports (`lib.rs`)
|
||||
|
||||
```rust
|
||||
#[cfg(feature = "audio")]
|
||||
pub use audio_io::{AudioCapture, AudioPlayback};
|
||||
pub use call::{CallConfig, CallDecoder, CallEncoder};
|
||||
pub use handshake::perform_handshake;
|
||||
```
|
||||
|
||||
### CallEncoder (`call.rs`)
|
||||
|
||||
```rust
|
||||
pub struct CallEncoder { /* audio_enc, fec_enc, profile, seq, block_id, frame_in_block, timestamp_ms */ }
|
||||
```
|
||||
|
||||
Key methods:
|
||||
- `CallEncoder::new(config: &CallConfig) -> Self`
|
||||
- `CallEncoder::encode_frame(&mut self, pcm: &[i16]) -> Result<Vec<MediaPacket>, anyhow::Error>` -- returns source + repair packets
|
||||
- `CallEncoder::set_profile(&mut self, profile: QualityProfile) -> Result<(), anyhow::Error>`
|
||||
|
||||
### CallDecoder (`call.rs`)
|
||||
|
||||
```rust
|
||||
pub struct CallDecoder { /* audio_dec, fec_dec, jitter, quality, profile */ }
|
||||
```
|
||||
|
||||
Key methods:
|
||||
- `CallDecoder::new(config: &CallConfig) -> Self`
|
||||
- `CallDecoder::ingest(&mut self, packet: MediaPacket)` -- feeds FEC decoder and jitter buffer
|
||||
- `CallDecoder::decode_next(&mut self, pcm: &mut [i16]) -> Option<usize>` -- pops from jitter, decodes
|
||||
- `CallDecoder::profile(&self) -> QualityProfile`
|
||||
- `CallDecoder::jitter_stats(&self) -> JitterStats`
|
||||
|
||||
### CallConfig (`call.rs`)
|
||||
|
||||
```rust
|
||||
pub struct CallConfig {
|
||||
pub profile: QualityProfile, // default: GOOD
|
||||
pub jitter_target: usize, // default: 10
|
||||
pub jitter_max: usize, // default: 250
|
||||
pub jitter_min: usize, // default: 3
|
||||
}
|
||||
```
|
||||
|
||||
### Client Handshake (`handshake.rs`)
|
||||
|
||||
```rust
|
||||
/// Perform the client (caller) side of the cryptographic handshake.
|
||||
pub async fn perform_handshake(transport: &dyn MediaTransport, seed: &[u8; 32]) -> Result<Box<dyn CryptoSession>, anyhow::Error>
|
||||
```
|
||||
|
||||
### Echo Test (`echo_test.rs`)
|
||||
|
||||
```rust
|
||||
pub struct WindowResult { pub index: usize, pub time_offset_secs: f64, pub frames_sent: u32, pub frames_received: u32, pub loss_pct: f32, pub snr_db: f32, pub correlation: f32, pub peak_amplitude: i16, pub is_silent: bool }
|
||||
pub struct EchoTestResult { pub duration_secs: f64, pub total_frames_sent: u64, pub total_frames_received: u64, pub overall_loss_pct: f32, pub windows: Vec<WindowResult>, /* ... */ }
|
||||
|
||||
pub async fn run_echo_test(transport: &(dyn MediaTransport + Send + Sync), duration_secs: u32, window_secs: f64) -> anyhow::Result<EchoTestResult>
|
||||
pub fn print_report(result: &EchoTestResult)
|
||||
```
|
||||
|
||||
### Audio I/O (`audio_io.rs`, requires `audio` feature)
|
||||
|
||||
```rust
|
||||
pub struct AudioCapture { /* rx: mpsc::Receiver<Vec<i16>>, running: Arc<AtomicBool> */ }
|
||||
pub struct AudioPlayback { /* tx: mpsc::SyncSender<Vec<i16>>, running: Arc<AtomicBool> */ }
|
||||
```
|
||||
|
||||
Key methods:
|
||||
- `AudioCapture::start() -> Result<Self, anyhow::Error>` -- opens default input at 48 kHz mono
|
||||
- `AudioCapture::read_frame(&self) -> Option<Vec<i16>>` -- blocking, returns 960 samples
|
||||
- `AudioCapture::stop(&self)`
|
||||
- `AudioPlayback::start() -> Result<Self, anyhow::Error>` -- opens default output at 48 kHz mono
|
||||
- `AudioPlayback::write_frame(&self, pcm: &[i16])`
|
||||
- `AudioPlayback::stop(&self)`
|
||||
|
||||
### Benchmarks (`bench.rs`)
|
||||
|
||||
```rust
|
||||
pub struct CodecResult { pub frames: usize, pub avg_encode_us: f64, pub avg_decode_us: f64, pub frames_per_sec: f64, pub compression_ratio: f64, /* ... */ }
|
||||
pub struct FecResult { pub blocks_attempted: usize, pub blocks_recovered: usize, pub recovery_rate_pct: f64, /* ... */ }
|
||||
pub struct CryptoResult { pub packets: usize, pub packets_per_sec: f64, pub megabytes_per_sec: f64, pub avg_latency_us: f64, /* ... */ }
|
||||
pub struct PipelineResult { pub frames: usize, pub avg_e2e_latency_us: f64, pub overhead_ratio: f64, /* ... */ }
|
||||
|
||||
pub fn generate_sine_wave(freq_hz: f32, sample_rate: u32, num_samples: usize) -> Vec<i16>
|
||||
pub fn bench_codec_roundtrip() -> CodecResult // 1000 frames Opus 24kbps
|
||||
pub fn bench_fec_recovery(loss_pct: f32) -> FecResult // 100 blocks with simulated loss
|
||||
pub fn bench_encrypt_decrypt() -> CryptoResult // 30000 packets ChaCha20
|
||||
pub fn bench_full_pipeline() -> PipelineResult // 50 frames E2E
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## wzp-web
|
||||
|
||||
**Path**: `crates/wzp-web/src/`
|
||||
|
||||
The web bridge binary. No public library API -- it is a standalone Axum server.
|
||||
|
||||
### Binary: `wzp-web`
|
||||
|
||||
- Serves static files from `crates/wzp-web/static/`
|
||||
- WebSocket endpoint: `GET /ws/{room}` -- upgrades to WebSocket
|
||||
- Each WebSocket client gets a QUIC connection to the relay with the room name as SNI
|
||||
- Browser -> relay: WebSocket binary messages (960 Int16 samples as raw bytes) -> `CallEncoder` -> `MediaTransport::send_media()`
|
||||
- Relay -> browser: `MediaTransport::recv_media()` -> `CallDecoder` -> WebSocket binary messages
|
||||
|
||||
### Static Files
|
||||
|
||||
- `static/index.html` -- web UI with room input, connect/disconnect, PTT, level meter
|
||||
- `static/audio-processor.js` -- AudioWorklet for microphone capture (960-sample frames)
|
||||
- `static/playback-processor.js` -- AudioWorklet for audio playback (ring buffer, 200ms max)
|
||||
607
docs/ARCHITECTURE.md
Normal file
607
docs/ARCHITECTURE.md
Normal file
@@ -0,0 +1,607 @@
|
||||
# WarzonePhone Architecture
|
||||
|
||||
> Custom lossy VoIP protocol built in Rust. E2E encrypted, FEC-protected, adaptive quality, designed for hostile network conditions.
|
||||
|
||||
## System Overview
|
||||
|
||||
```mermaid
|
||||
graph TB
|
||||
subgraph "Client A (Browser/CLI)"
|
||||
MIC[Microphone] --> DN[NoiseSupressor<br/>RNNoise ML]
|
||||
DN --> SD[SilenceDetector<br/>VAD + Hangover]
|
||||
SD --> ENC[CallEncoder<br/>Opus/Codec2]
|
||||
ENC --> FEC_E[FEC Encoder<br/>RaptorQ]
|
||||
FEC_E --> CRYPT_E[ChaCha20-Poly1305<br/>Encrypt]
|
||||
CRYPT_E --> QUIC_S[QUIC Datagram<br/>Send]
|
||||
|
||||
QUIC_R[QUIC Datagram<br/>Recv] --> CRYPT_D[ChaCha20-Poly1305<br/>Decrypt]
|
||||
CRYPT_D --> FEC_D[FEC Decoder<br/>RaptorQ]
|
||||
FEC_D --> JIT[JitterBuffer<br/>Adaptive Playout]
|
||||
JIT --> DEC[CallDecoder<br/>Opus/Codec2]
|
||||
DEC --> SPK[Speaker]
|
||||
end
|
||||
|
||||
subgraph "Relay (SFU)"
|
||||
ACCEPT[Accept QUIC] --> AUTH{Auth?}
|
||||
AUTH -->|token| VALIDATE[POST /v1/auth/validate]
|
||||
AUTH -->|no auth| HS
|
||||
VALIDATE --> HS[Crypto Handshake<br/>X25519 + Ed25519]
|
||||
HS --> ROOM[Room Manager<br/>Named Rooms via SNI]
|
||||
ROOM --> FWD[Forward to<br/>Other Participants]
|
||||
end
|
||||
|
||||
subgraph "Client B"
|
||||
B_SPK[Speaker]
|
||||
B_MIC[Microphone]
|
||||
end
|
||||
|
||||
QUIC_S -->|UDP/QUIC| ACCEPT
|
||||
FWD -->|UDP/QUIC| QUIC_R
|
||||
B_MIC -.->|same pipeline| ACCEPT
|
||||
FWD -.->|same pipeline| B_SPK
|
||||
|
||||
style MIC fill:#4a9eff
|
||||
style SPK fill:#4a9eff
|
||||
style B_MIC fill:#4a9eff
|
||||
style B_SPK fill:#4a9eff
|
||||
style ROOM fill:#ff9f43
|
||||
style CRYPT_E fill:#ee5a24
|
||||
style CRYPT_D fill:#ee5a24
|
||||
```
|
||||
|
||||
## Crate Dependency Graph
|
||||
|
||||
```mermaid
|
||||
graph TD
|
||||
PROTO[wzp-proto<br/>Types, Traits, Wire Format]
|
||||
|
||||
CODEC[wzp-codec<br/>Opus + Codec2 + RNNoise]
|
||||
FEC[wzp-fec<br/>RaptorQ FEC]
|
||||
CRYPTO[wzp-crypto<br/>ChaCha20 + Identity]
|
||||
TRANSPORT[wzp-transport<br/>QUIC/Quinn]
|
||||
|
||||
RELAY[wzp-relay<br/>Relay Daemon]
|
||||
CLIENT[wzp-client<br/>CLI + Call Engine]
|
||||
WEB[wzp-web<br/>Browser Bridge]
|
||||
|
||||
PROTO --> CODEC
|
||||
PROTO --> FEC
|
||||
PROTO --> CRYPTO
|
||||
PROTO --> TRANSPORT
|
||||
|
||||
CODEC --> CLIENT
|
||||
FEC --> CLIENT
|
||||
CRYPTO --> CLIENT
|
||||
TRANSPORT --> CLIENT
|
||||
CODEC --> RELAY
|
||||
FEC --> RELAY
|
||||
CRYPTO --> RELAY
|
||||
TRANSPORT --> RELAY
|
||||
|
||||
CLIENT --> WEB
|
||||
TRANSPORT --> WEB
|
||||
CRYPTO --> WEB
|
||||
|
||||
FC[warzone-protocol<br/>featherChat Identity] -.->|path dep| CRYPTO
|
||||
|
||||
style PROTO fill:#6c5ce7
|
||||
style RELAY fill:#ff9f43
|
||||
style CLIENT fill:#00b894
|
||||
style WEB fill:#0984e3
|
||||
style FC fill:#fd79a8
|
||||
```
|
||||
|
||||
## Wire Formats
|
||||
|
||||
### MediaHeader (12 bytes)
|
||||
|
||||
```
|
||||
Byte 0: [V:1][T:1][CodecID:4][Q:1][FecHi:1]
|
||||
Byte 1: [FecLo:6][unused:2]
|
||||
Bytes 2-3: sequence (u16 BE)
|
||||
Bytes 4-7: timestamp_ms (u32 BE)
|
||||
Byte 8: fec_block_id (u8)
|
||||
Byte 9: fec_symbol_idx (u8)
|
||||
Byte 10: reserved
|
||||
Byte 11: csrc_count
|
||||
|
||||
V = version (0), T = is_repair, CodecID = codec, Q = quality_report appended
|
||||
```
|
||||
|
||||
### MiniHeader (4 bytes, compressed)
|
||||
|
||||
```
|
||||
Bytes 0-1: timestamp_delta_ms (u16 BE)
|
||||
Bytes 2-3: payload_len (u16 BE)
|
||||
|
||||
Preceded by FRAME_TYPE_MINI (0x01). Full header every 50 frames (~1s).
|
||||
Saves 8 bytes/packet (67% header reduction).
|
||||
```
|
||||
|
||||
### TrunkFrame (batched datagrams)
|
||||
|
||||
```
|
||||
[count:u16]
|
||||
[session_id:2][len:u16][payload:len] x count
|
||||
|
||||
Packs multiple session packets into one QUIC datagram.
|
||||
Max 10 entries or 1200 bytes, flushed every 5ms.
|
||||
```
|
||||
|
||||
### QualityReport (4 bytes, optional)
|
||||
|
||||
```
|
||||
Byte 0: loss_pct (0-255 maps to 0-100%)
|
||||
Byte 1: rtt_4ms (0-255 maps to 0-1020ms)
|
||||
Byte 2: jitter_ms
|
||||
Byte 3: bitrate_cap_kbps
|
||||
```
|
||||
|
||||
### SignalMessage (JSON over reliable QUIC stream)
|
||||
|
||||
```
|
||||
[4-byte length prefix][serde_json payload]
|
||||
|
||||
Variants:
|
||||
CallOffer { identity_pub, ephemeral_pub, signature, supported_profiles }
|
||||
CallAnswer { identity_pub, ephemeral_pub, signature, chosen_profile }
|
||||
IceCandidate { candidate }
|
||||
Hangup { reason: Normal|Busy|Declined|Timeout|Error }
|
||||
AuthToken { token }
|
||||
Hold, Unhold, Mute, Unmute
|
||||
Transfer { target_fingerprint, relay_addr }
|
||||
TransferAck
|
||||
Rekey { new_ephemeral_pub, signature }
|
||||
QualityUpdate { report, recommended_profile }
|
||||
Ping/Pong { timestamp_ms }
|
||||
```
|
||||
|
||||
## Quality Profiles
|
||||
|
||||
```mermaid
|
||||
graph LR
|
||||
subgraph GOOD ["GOOD (28.8 kbps)"]
|
||||
G_C[Opus 24kbps]
|
||||
G_F[FEC 20%]
|
||||
G_FR[20ms frames]
|
||||
end
|
||||
|
||||
subgraph DEGRADED ["DEGRADED (9.0 kbps)"]
|
||||
D_C[Opus 6kbps]
|
||||
D_F[FEC 50%]
|
||||
D_FR[40ms frames]
|
||||
end
|
||||
|
||||
subgraph CATASTROPHIC ["CATASTROPHIC (2.4 kbps)"]
|
||||
C_C[Codec2 1200bps]
|
||||
C_F[FEC 100%]
|
||||
C_FR[40ms frames]
|
||||
end
|
||||
|
||||
GOOD -->|"loss>5% or RTT>100ms<br/>3 consecutive reports"| DEGRADED
|
||||
DEGRADED -->|"loss>15% or RTT>200ms<br/>3 consecutive"| CATASTROPHIC
|
||||
CATASTROPHIC -->|"loss<5% and RTT<100ms<br/>3 consecutive"| DEGRADED
|
||||
DEGRADED -->|"loss<5% and RTT<100ms<br/>3 consecutive"| GOOD
|
||||
|
||||
style GOOD fill:#00b894
|
||||
style DEGRADED fill:#fdcb6e
|
||||
style CATASTROPHIC fill:#e17055
|
||||
```
|
||||
|
||||
## Cryptographic Handshake
|
||||
|
||||
```mermaid
|
||||
sequenceDiagram
|
||||
participant C as Caller
|
||||
participant R as Relay/Callee
|
||||
|
||||
Note over C: Derive identity from seed<br/>Ed25519 + X25519 via HKDF
|
||||
|
||||
C->>C: Generate ephemeral X25519
|
||||
C->>C: Sign(ephemeral_pub || "call-offer")
|
||||
C->>R: CallOffer { identity_pub, ephemeral_pub, signature, profiles }
|
||||
|
||||
R->>R: Verify Ed25519 signature
|
||||
R->>R: Generate ephemeral X25519
|
||||
R->>R: shared_secret = DH(eph_b, eph_a)
|
||||
R->>R: session_key = HKDF(shared_secret, "warzone-session-key")
|
||||
R->>R: Sign(ephemeral_pub || "call-answer")
|
||||
R->>C: CallAnswer { identity_pub, ephemeral_pub, signature, chosen_profile }
|
||||
|
||||
C->>C: Verify signature
|
||||
C->>C: shared_secret = DH(eph_a, eph_b)
|
||||
C->>C: session_key = HKDF(shared_secret)
|
||||
|
||||
Note over C,R: Both have identical ChaCha20-Poly1305 session key
|
||||
C->>R: Encrypted media (QUIC datagrams)
|
||||
R->>C: Encrypted media (QUIC datagrams)
|
||||
|
||||
Note over C,R: Rekey every 65,536 packets<br/>New ephemeral DH + HKDF mix
|
||||
```
|
||||
|
||||
## Identity Model (featherChat Compatible)
|
||||
|
||||
```mermaid
|
||||
graph TD
|
||||
SEED[32-byte Seed<br/>BIP39 Mnemonic 24 words] --> HKDF1[HKDF<br/>salt=None<br/>info=warzone-ed25519]
|
||||
SEED --> HKDF2[HKDF<br/>salt=None<br/>info=warzone-x25519]
|
||||
|
||||
HKDF1 --> ED[Ed25519 SigningKey<br/>Digital Signatures]
|
||||
HKDF2 --> X25519[X25519 StaticSecret<br/>Key Agreement]
|
||||
|
||||
ED --> VKEY[Ed25519 VerifyingKey<br/>Public]
|
||||
X25519 --> XPUB[X25519 PublicKey<br/>Public]
|
||||
|
||||
VKEY --> FP[Fingerprint<br/>SHA-256 pubkey truncated 16 bytes<br/>xxxx:xxxx:xxxx:xxxx:xxxx:xxxx:xxxx:xxxx]
|
||||
|
||||
style SEED fill:#6c5ce7
|
||||
style FP fill:#fd79a8
|
||||
style ED fill:#ee5a24
|
||||
style X25519 fill:#00b894
|
||||
```
|
||||
|
||||
## Relay Modes
|
||||
|
||||
```mermaid
|
||||
graph TB
|
||||
subgraph "Room Mode (Default SFU)"
|
||||
C1[Client 1] -->|QUIC SNI=room-hash| RM[Room Manager]
|
||||
C2[Client 2] -->|QUIC SNI=room-hash| RM
|
||||
C3[Client 3] -->|QUIC SNI=room-hash| RM
|
||||
RM --> R1[Room abc123]
|
||||
R1 -->|fan-out| C1
|
||||
R1 -->|fan-out| C2
|
||||
R1 -->|fan-out| C3
|
||||
end
|
||||
|
||||
subgraph "Forward Mode with --remote"
|
||||
C4[Client] -->|QUIC| RA[Relay A]
|
||||
RA -->|FEC decode then jitter then FEC encode| RB[Relay B]
|
||||
RB -->|QUIC| C5[Client]
|
||||
end
|
||||
|
||||
subgraph "Probe Mode with --probe"
|
||||
PA[Relay A] -->|Ping 1/s ~50 bytes| PB[Relay B]
|
||||
PB -->|Pong| PA
|
||||
PA --> PM[Prometheus<br/>RTT Loss Jitter Up/Down]
|
||||
end
|
||||
|
||||
style RM fill:#ff9f43
|
||||
style R1 fill:#fdcb6e
|
||||
style PM fill:#0984e3
|
||||
```
|
||||
|
||||
## Web Bridge Architecture
|
||||
|
||||
```mermaid
|
||||
sequenceDiagram
|
||||
participant B as Browser
|
||||
participant W as wzp-web
|
||||
participant R as wzp-relay
|
||||
|
||||
B->>W: HTTPS GET /room-name
|
||||
W->>B: index.html (SPA)
|
||||
|
||||
B->>W: WebSocket /ws/room-name
|
||||
Note over B,W: Optional auth JSON message
|
||||
|
||||
W->>R: QUIC connect (SNI = hashed room name)
|
||||
Note over W,R: AuthToken then Handshake then Join Room
|
||||
|
||||
loop Every 20ms
|
||||
B->>W: WS Binary Int16 x 960 PCM
|
||||
W->>W: CallEncoder Opus + FEC
|
||||
W->>R: QUIC Datagram encrypted
|
||||
end
|
||||
|
||||
loop Incoming audio
|
||||
R->>W: QUIC Datagram
|
||||
W->>W: CallDecoder FEC + Opus
|
||||
W->>B: WS Binary Int16 x 960 PCM
|
||||
end
|
||||
|
||||
Note over B: AudioWorklet<br/>WZPCaptureProcessor mic to 960 frames<br/>WZPPlaybackProcessor ring buffer to speaker
|
||||
```
|
||||
|
||||
## FEC Protection (RaptorQ)
|
||||
|
||||
```mermaid
|
||||
graph LR
|
||||
subgraph "Encoder"
|
||||
F1[Frame 1] --> BLK[Source Block<br/>5-10 frames]
|
||||
F2[Frame 2] --> BLK
|
||||
F3[Frame 3] --> BLK
|
||||
F4[Frame 4] --> BLK
|
||||
F5[Frame 5] --> BLK
|
||||
BLK --> SRC[5 Source Symbols]
|
||||
BLK --> REP[1-10 Repair Symbols<br/>ratio dependent]
|
||||
SRC --> INT[Interleaver<br/>depth=3]
|
||||
REP --> INT
|
||||
end
|
||||
|
||||
subgraph "Network"
|
||||
INT --> LOSS{Packet Loss}
|
||||
LOSS -->|some lost| RCV[Received Symbols]
|
||||
end
|
||||
|
||||
subgraph "Decoder"
|
||||
RCV --> DEINT[De-interleaver]
|
||||
DEINT --> RAPTORQ[RaptorQ Decoder<br/>Reconstruct from<br/>any K of K+R symbols]
|
||||
RAPTORQ --> OUT[Original Frames]
|
||||
end
|
||||
|
||||
style LOSS fill:#e17055
|
||||
style RAPTORQ fill:#00b894
|
||||
```
|
||||
|
||||
## Telemetry Stack
|
||||
|
||||
```mermaid
|
||||
graph TB
|
||||
subgraph "Relay"
|
||||
RM[RelayMetrics<br/>sessions rooms packets]
|
||||
SM[SessionMetrics<br/>per-session jitter loss RTT]
|
||||
PM[ProbeMetrics<br/>inter-relay RTT loss]
|
||||
RM --> PROM1[GET /metrics :9090]
|
||||
SM --> PROM1
|
||||
PM --> PROM1
|
||||
end
|
||||
|
||||
subgraph "Web Bridge"
|
||||
WM[WebMetrics<br/>connections frames latency]
|
||||
WM --> PROM2[GET /metrics :8080]
|
||||
end
|
||||
|
||||
subgraph "Client"
|
||||
CM[JitterStats + QualityAdapter]
|
||||
CM --> JSONL[--metrics-file<br/>JSONL 1 line/sec]
|
||||
end
|
||||
|
||||
PROM1 --> GRAF[Grafana Dashboard<br/>4 rows 18 panels]
|
||||
PROM2 --> GRAF
|
||||
JSONL --> ANALYSIS[Offline Analysis]
|
||||
|
||||
style GRAF fill:#ff6b6b
|
||||
style PROM1 fill:#0984e3
|
||||
style PROM2 fill:#0984e3
|
||||
```
|
||||
|
||||
## Session State Machine
|
||||
|
||||
```mermaid
|
||||
stateDiagram-v2
|
||||
[*] --> Idle
|
||||
Idle --> Connecting: connect
|
||||
Connecting --> Handshaking: QUIC established
|
||||
Handshaking --> Active: CallOffer/Answer complete
|
||||
Active --> Rekeying: 65536 packets
|
||||
Rekeying --> Active: new key derived
|
||||
Active --> Closed: Hangup/Error/Timeout
|
||||
Rekeying --> Closed: Error
|
||||
Connecting --> Closed: Timeout
|
||||
Handshaking --> Closed: Signature fail
|
||||
|
||||
note right of Active: Media flows
|
||||
note right of Rekeying: Media continues while rekeying
|
||||
```
|
||||
|
||||
## Audio Processing Pipeline Detail
|
||||
|
||||
```mermaid
|
||||
graph TD
|
||||
subgraph "Capture 20ms at 48kHz = 960 samples"
|
||||
MIC[Microphone / AudioWorklet] --> PCM[PCM i16 x 960]
|
||||
PCM --> RNN[RNNoise Denoise<br/>2 x 480 samples]
|
||||
RNN --> VAD{Silent?}
|
||||
VAD -->|Yes over 100ms| CN[ComfortNoise packet<br/>every 200ms]
|
||||
VAD -->|No or Hangover| OPUS[Opus/Codec2 Encode]
|
||||
end
|
||||
|
||||
subgraph "FEC + Crypto"
|
||||
OPUS --> SYMBOL[Pad to 256-byte symbol]
|
||||
CN --> SYMBOL
|
||||
SYMBOL --> BLOCK[Accumulate block<br/>5-10 symbols]
|
||||
BLOCK --> RAPTOR[RaptorQ encode<br/>+ repair symbols]
|
||||
RAPTOR --> INTERLEAVE[Interleave depth=3]
|
||||
INTERLEAVE --> HDR[Add MediaHeader<br/>or MiniHeader]
|
||||
HDR --> ENCRYPT[ChaCha20-Poly1305<br/>header=AAD payload=encrypted]
|
||||
ENCRYPT --> QUIC[QUIC Datagram]
|
||||
end
|
||||
|
||||
style RNN fill:#a29bfe
|
||||
style ENCRYPT fill:#ee5a24
|
||||
style RAPTOR fill:#00b894
|
||||
```
|
||||
|
||||
## Adaptive Jitter Buffer
|
||||
|
||||
```mermaid
|
||||
graph TD
|
||||
PKT[Incoming Packet] --> SEQ{Sequence Check}
|
||||
SEQ -->|Duplicate| DROP[Drop + AntiReplay]
|
||||
SEQ -->|Valid| BUF[BTreeMap Buffer<br/>ordered by seq]
|
||||
|
||||
BUF --> ADAPT[AdaptivePlayoutDelay<br/>EMA jitter tracking]
|
||||
ADAPT --> TARGET[target_delay =<br/>ceil jitter_ema/20ms + 2]
|
||||
|
||||
BUF --> READY{depth >= target?}
|
||||
READY -->|No| WAIT[Wait / Underrun++]
|
||||
READY -->|Yes| POP[Pop lowest seq]
|
||||
POP --> DECODE[Decode to PCM]
|
||||
DECODE --> PLAY[Playout]
|
||||
|
||||
BUF --> OVERFLOW{depth > max?}
|
||||
OVERFLOW -->|Yes| EVICT[Drop oldest<br/>Overrun++]
|
||||
|
||||
style ADAPT fill:#fdcb6e
|
||||
style DROP fill:#e17055
|
||||
style EVICT fill:#e17055
|
||||
```
|
||||
|
||||
## Deployment Topology
|
||||
|
||||
```mermaid
|
||||
graph TB
|
||||
subgraph "Region A"
|
||||
RA[wzp-relay A<br/>:4433 UDP]
|
||||
WA[wzp-web A<br/>:8080 HTTPS]
|
||||
WA --> RA
|
||||
end
|
||||
|
||||
subgraph "Region B"
|
||||
RB[wzp-relay B<br/>:4433 UDP]
|
||||
WB[wzp-web B<br/>:8080 HTTPS]
|
||||
WB --> RB
|
||||
end
|
||||
|
||||
RA <-->|Probe 1/s| RB
|
||||
|
||||
BA[Browser A] -->|WSS| WA
|
||||
BB[Browser B] -->|WSS| WB
|
||||
CA[CLI Client] -->|QUIC| RA
|
||||
|
||||
PROM[Prometheus] -->|scrape| RA
|
||||
PROM -->|scrape| RB
|
||||
PROM -->|scrape| WA
|
||||
PROM --> GRAF[Grafana]
|
||||
|
||||
FC[featherChat Server] -->|auth validate| RA
|
||||
FC -->|auth validate| RB
|
||||
|
||||
style RA fill:#ff9f43
|
||||
style RB fill:#ff9f43
|
||||
style GRAF fill:#ff6b6b
|
||||
style FC fill:#fd79a8
|
||||
```
|
||||
|
||||
## featherChat Integration Flow
|
||||
|
||||
```mermaid
|
||||
sequenceDiagram
|
||||
participant A as User A WZP Client
|
||||
participant FC as featherChat Server
|
||||
participant R as WZP Relay
|
||||
participant B as User B WZP Client
|
||||
|
||||
Note over A,B: Both users share BIP39 seed = same identity
|
||||
|
||||
A->>FC: WS CallSignal Offer payload=JSON SignalMessage
|
||||
FC->>B: WS CallSignal Offer payload + relay_addr + room
|
||||
|
||||
B->>R: QUIC connect SNI = hashed room
|
||||
B->>R: AuthToken fc_bearer_token
|
||||
R->>FC: POST /v1/auth/validate token
|
||||
FC->>R: valid true fingerprint ...
|
||||
B->>R: CallOffer then CallAnswer handshake
|
||||
|
||||
A->>R: QUIC connect same room
|
||||
A->>R: AuthToken + Handshake
|
||||
|
||||
Note over A,B: Both in same room media flows E2E encrypted
|
||||
A->>R: Encrypted media
|
||||
R->>B: Forward SFU no decryption
|
||||
B->>R: Encrypted media
|
||||
R->>A: Forward
|
||||
```
|
||||
|
||||
## Bandwidth Usage
|
||||
|
||||
| Profile | Audio | FEC Overhead | Total | Use Case |
|
||||
|---------|-------|-------------|-------|----------|
|
||||
| **GOOD** | 24 kbps (Opus) | 20% = 4.8 kbps | **28.8 kbps** | WiFi, LTE, good links |
|
||||
| **DEGRADED** | 6 kbps (Opus) | 50% = 3 kbps | **9.0 kbps** | 3G, congested WiFi |
|
||||
| **CATASTROPHIC** | 1.2 kbps (Codec2) | 100% = 1.2 kbps | **2.4 kbps** | Satellite, extreme loss |
|
||||
|
||||
With silence suppression: ~50% savings in typical conversations.
|
||||
With mini-frames: 8 bytes/packet saved (67% header reduction).
|
||||
With trunking: shared QUIC overhead across multiplexed sessions.
|
||||
|
||||
## Project Structure
|
||||
|
||||
```
|
||||
warzonePhone/
|
||||
├── Cargo.toml # Workspace root
|
||||
├── crates/
|
||||
│ ├── wzp-proto/ # Protocol types, traits, wire format
|
||||
│ │ └── src/
|
||||
│ │ ├── codec_id.rs # CodecId, QualityProfile
|
||||
│ │ ├── error.rs # Error types
|
||||
│ │ ├── jitter.rs # JitterBuffer, AdaptivePlayoutDelay
|
||||
│ │ ├── packet.rs # MediaHeader, MiniHeader, TrunkFrame, SignalMessage
|
||||
│ │ ├── quality.rs # Tier, AdaptiveQualityController
|
||||
│ │ ├── session.rs # SessionState machine
|
||||
│ │ └── traits.rs # AudioEncoder, FecEncoder, CryptoSession, etc.
|
||||
│ ├── wzp-codec/ # Audio codecs
|
||||
│ │ └── src/
|
||||
│ │ ├── adaptive.rs # AdaptiveEncoder/Decoder (Opus + Codec2)
|
||||
│ │ ├── denoise.rs # NoiseSupressor (RNNoise/nnnoiseless)
|
||||
│ │ └── silence.rs # SilenceDetector, ComfortNoise
|
||||
│ ├── wzp-fec/ # Forward error correction
|
||||
│ │ └── src/
|
||||
│ │ ├── encoder.rs # RaptorQFecEncoder
|
||||
│ │ ├── decoder.rs # RaptorQFecDecoder
|
||||
│ │ └── interleave.rs # Interleaver (burst protection)
|
||||
│ ├── wzp-crypto/ # Cryptography + identity
|
||||
│ │ └── src/
|
||||
│ │ ├── identity.rs # Seed, Fingerprint, hash_room_name
|
||||
│ │ ├── handshake.rs # WarzoneKeyExchange (X25519 + Ed25519)
|
||||
│ │ ├── session.rs # ChaChaSession (ChaCha20-Poly1305)
|
||||
│ │ ├── nonce.rs # Deterministic nonce construction
|
||||
│ │ ├── anti_replay.rs # Sliding window replay protection
|
||||
│ │ └── rekey.rs # Forward secrecy rekeying
|
||||
│ ├── wzp-transport/ # QUIC transport layer
|
||||
│ │ └── src/lib.rs # QuinnTransport, send/recv media/signal/trunk
|
||||
│ ├── wzp-relay/ # Relay daemon
|
||||
│ │ └── src/
|
||||
│ │ ├── main.rs # CLI, connection loop, auth + handshake
|
||||
│ │ ├── room.rs # RoomManager, TrunkedForwarder
|
||||
│ │ ├── pipeline.rs # RelayPipeline (forward mode)
|
||||
│ │ ├── session_mgr.rs # SessionManager (limits, lifecycle)
|
||||
│ │ ├── auth.rs # featherChat token validation
|
||||
│ │ ├── handshake.rs # Relay-side accept_handshake
|
||||
│ │ ├── metrics.rs # Prometheus RelayMetrics + per-session
|
||||
│ │ ├── probe.rs # Inter-relay probes + ProbeMesh
|
||||
│ │ └── trunk.rs # TrunkBatcher
|
||||
│ ├── wzp-client/ # Call engine + CLI
|
||||
│ │ └── src/
|
||||
│ │ ├── cli.rs # CLI arg parsing + main
|
||||
│ │ ├── call.rs # CallEncoder, CallDecoder, QualityAdapter
|
||||
│ │ ├── handshake.rs # Client-side perform_handshake
|
||||
│ │ ├── featherchat.rs # CallSignal bridge
|
||||
│ │ ├── echo_test.rs # Automated echo quality test
|
||||
│ │ ├── drift_test.rs # Clock drift measurement
|
||||
│ │ ├── sweep.rs # Jitter buffer parameter sweep
|
||||
│ │ ├── metrics.rs # JSONL telemetry writer
|
||||
│ │ └── bench.rs # Component benchmarks
|
||||
│ └── wzp-web/ # Browser bridge
|
||||
│ ├── src/
|
||||
│ │ ├── main.rs # Axum server, WS handler, TLS
|
||||
│ │ └── metrics.rs # Prometheus WebMetrics
|
||||
│ └── static/
|
||||
│ ├── index.html # SPA UI (room, PTT, level meter)
|
||||
│ └── audio-processor.js # AudioWorklet (capture + playback)
|
||||
├── deps/featherchat/ # Git submodule
|
||||
├── docs/
|
||||
│ ├── ARCHITECTURE.md # This file
|
||||
│ ├── TELEMETRY.md # Metrics specification
|
||||
│ ├── INTEGRATION_TASKS.md # featherChat task tracker
|
||||
│ ├── WZP-FC-SHARED-CRATES.md # Shared crate strategy
|
||||
│ └── grafana-dashboard.json # Pre-built Grafana dashboard
|
||||
└── scripts/
|
||||
└── build-linux.sh # Hetzner VM build
|
||||
```
|
||||
|
||||
## Test Coverage
|
||||
|
||||
272 tests across all crates, 0 failures.
|
||||
|
||||
| Crate | Tests | Key Coverage |
|
||||
|-------|-------|-------------|
|
||||
| wzp-proto | 41 | Wire format, jitter buffer, quality tiers, mini-frames, trunking |
|
||||
| wzp-codec | 31 | Opus/Codec2 roundtrip, silence detection, noise suppression |
|
||||
| wzp-fec | 22 | RaptorQ encode/decode, loss recovery, interleaving |
|
||||
| wzp-crypto | 34 + 28 compat | Encrypt/decrypt, handshake, anti-replay, featherChat identity compat |
|
||||
| wzp-transport | 2 | QUIC connection setup |
|
||||
| wzp-relay | 40 + 4 integration | Room ACL, session mgmt, metrics, probes, mesh, trunking |
|
||||
| wzp-client | 30 + 2 integration | Encoder/decoder, quality adapter, silence, drift, sweep |
|
||||
| wzp-web | 2 | Metrics |
|
||||
168
docs/DESIGN.md
Normal file
168
docs/DESIGN.md
Normal file
@@ -0,0 +1,168 @@
|
||||
# WarzonePhone Detailed Design Decisions
|
||||
|
||||
## Why Opus + Codec2 (Not Just One)
|
||||
|
||||
The dual-codec architecture is driven by the extreme range of network conditions WarzonePhone targets:
|
||||
|
||||
**Opus** (24/16/6 kbps) is the clear choice for normal to degraded conditions. It offers excellent quality at moderate bitrates, has built-in inband FEC and DTX (discontinuous transmission), and the `audiopus` crate provides mature Rust bindings to libopus. Opus operates at 48 kHz natively.
|
||||
|
||||
**Codec2** (3200/1200 bps) is a narrowband vocoder designed specifically for HF radio links with extreme bandwidth constraints. At 1200 bps (1.2 kbps), it produces intelligible speech in only 6 bytes per 40ms frame -- roughly 20x lower bitrate than Opus at its minimum. The pure-Rust `codec2` crate means no C dependencies for this codec. Codec2 operates at 8 kHz, so the adaptive layer handles 48 kHz <-> 8 kHz resampling transparently.
|
||||
|
||||
The `AdaptiveEncoder`/`AdaptiveDecoder` in `crates/wzp-codec/src/adaptive.rs` hold both codec instances and switch between them based on the active `QualityProfile`. This avoids codec re-initialization latency during tier transitions.
|
||||
|
||||
**Bandwidth comparison with FEC overhead:**
|
||||
|
||||
| Tier | Codec Bitrate | FEC Ratio | Total Bandwidth |
|
||||
|------|--------------|-----------|----------------|
|
||||
| GOOD | 24 kbps | 20% | ~28.8 kbps |
|
||||
| DEGRADED | 6 kbps | 50% | ~9.0 kbps |
|
||||
| CATASTROPHIC | 1.2 kbps | 100% | ~2.4 kbps |
|
||||
|
||||
At the catastrophic tier, the entire call (audio + FEC + headers) fits within approximately 3 kbps, which is viable even over severely degraded links.
|
||||
|
||||
## Why RaptorQ Over Reed-Solomon
|
||||
|
||||
**Reed-Solomon** is a classical block erasure code. It works well but has fixed-rate overhead: you must decide in advance how many repair symbols to generate, and decoding requires receiving exactly K of any K+R symbols.
|
||||
|
||||
**RaptorQ** (RFC 6330) is a fountain code with several advantages for VoIP:
|
||||
|
||||
1. **Rateless**: You can generate an arbitrary number of repair symbols on the fly. If conditions worsen mid-block, you can generate additional repair without re-encoding.
|
||||
|
||||
2. **Efficient decoding**: RaptorQ can decode from any K symbols with high probability (typically K + 1 or K + 2 suffice), compared to Reed-Solomon which requires exactly K.
|
||||
|
||||
3. **Lower computational complexity**: O(K) encoding and decoding time, compared to O(K^2) for Reed-Solomon. This matters for real-time audio at 50 frames/second.
|
||||
|
||||
4. **Variable block sizes**: The encoder handles 1-56403 source symbols per block (the WZP implementation uses 5-10, but the flexibility is there).
|
||||
|
||||
The `raptorq` crate (v2) provides a well-tested pure-Rust implementation. The WZP FEC layer adds length-prefixed padding (2-byte LE prefix + zero-pad to 256 bytes) so that variable-length audio frames can be recovered exactly.
|
||||
|
||||
**FEC bandwidth math at different loss rates:**
|
||||
|
||||
With 5 source frames per block:
|
||||
- 20% repair (GOOD): 1 repair symbol. Survives loss of 1 out of 6 packets (16.7% loss).
|
||||
- 50% repair (DEGRADED): 3 repair symbols. Survives loss of 3 out of 8 packets (37.5% loss).
|
||||
- 100% repair (CATASTROPHIC): 5 repair symbols. Survives loss of 5 out of 10 packets (50% loss).
|
||||
|
||||
The benchmark (`wzp-bench --fec --loss 30`) dynamically scales the FEC ratio to survive the requested loss percentage.
|
||||
|
||||
## Why QUIC Over Raw UDP
|
||||
|
||||
Raw UDP would be simpler and lower-latency, but QUIC (via the `quinn` crate) provides:
|
||||
|
||||
1. **DATAGRAM frames**: Unreliable delivery without head-of-line blocking (RFC 9221). Media packets use this path, so they behave like UDP datagrams but benefit from QUIC's connection management.
|
||||
|
||||
2. **Reliable streams**: Signaling messages (CallOffer, CallAnswer, Rekey, Hangup) require reliable delivery. QUIC provides multiplexed streams without needing a separate TCP connection.
|
||||
|
||||
3. **Built-in congestion control**: QUIC's congestion control prevents overwhelming degraded links, which is important when chaining relays.
|
||||
|
||||
4. **Connection migration**: QUIC connections survive IP address changes (e.g., WiFi to cellular handoff), which is valuable for mobile clients.
|
||||
|
||||
5. **TLS 1.3 built-in**: The QUIC handshake provides encryption at the transport level. While WZP has its own end-to-end ChaCha20 layer, the QUIC TLS protects the header and signaling from eavesdroppers.
|
||||
|
||||
6. **NAT keepalive**: QUIC's built-in keep-alive (configured at 5-second intervals) maintains NAT bindings without application-level pings.
|
||||
|
||||
7. **Firewall traversal**: QUIC runs on UDP port 443 by default, which is commonly allowed through firewalls. The `wzp` ALPN protocol identifier distinguishes WZP traffic.
|
||||
|
||||
The tradeoff is approximately 20-40 bytes of additional per-packet overhead compared to raw UDP (QUIC short header + DATAGRAM frame overhead).
|
||||
|
||||
## Why ChaCha20-Poly1305 Over AES-GCM
|
||||
|
||||
1. **Software performance**: ChaCha20-Poly1305 is faster than AES-GCM on hardware without AES-NI instructions. This matters for ARM devices (Android phones, Raspberry Pi relays, embedded systems) where AES hardware acceleration may be absent.
|
||||
|
||||
2. **Constant-time by design**: ChaCha20 uses only add-rotate-XOR operations, making it inherently resistant to timing side-channel attacks. AES-GCM implementations without hardware support often require careful constant-time implementation.
|
||||
|
||||
3. **Warzone messenger compatibility**: The existing Warzone messenger uses ChaCha20-Poly1305 for message encryption. Reusing the same primitive simplifies the security audit and allows key material to be shared across messaging and calling.
|
||||
|
||||
4. **16-byte overhead**: Both ChaCha20-Poly1305 and AES-128-GCM produce a 16-byte authentication tag. There is no size advantage to AES-GCM.
|
||||
|
||||
5. **AEAD with AAD**: The MediaHeader is used as Associated Authenticated Data (AAD), ensuring the header is authenticated but not encrypted. This allows relays to read routing information (block ID, sequence number) without decrypting the payload.
|
||||
|
||||
## Why Star Dependency Graph (Parallel Development)
|
||||
|
||||
The workspace follows a strict star dependency pattern:
|
||||
|
||||
```
|
||||
wzp-proto (hub)
|
||||
/ | \ \
|
||||
wzp-codec wzp-fec wzp-crypto wzp-transport
|
||||
\ | / /
|
||||
wzp-relay
|
||||
wzp-client
|
||||
wzp-web
|
||||
```
|
||||
|
||||
- `wzp-proto` defines all trait interfaces and wire format types
|
||||
- Each "leaf" crate (codec, fec, crypto, transport) depends only on `wzp-proto`
|
||||
- No leaf crate depends on another leaf crate
|
||||
- Integration crates (relay, client, web) depend on all leaves
|
||||
|
||||
This enables:
|
||||
1. **Parallel development**: 5 agents/developers can work on 5 crates simultaneously with zero merge conflicts
|
||||
2. **Independent testing**: Each crate has comprehensive tests that run without requiring other implementations
|
||||
3. **Pluggability**: Any implementation can be swapped (e.g., replace RaptorQ with Reed-Solomon) by implementing the same trait
|
||||
4. **Fast compilation**: Changes to one leaf only recompile that leaf and the integration crates, not other leaves
|
||||
|
||||
## Jitter Buffer Trade-offs
|
||||
|
||||
The jitter buffer must balance two competing goals:
|
||||
|
||||
**Lower latency** (smaller buffer):
|
||||
- Better conversational interactivity
|
||||
- Less memory usage
|
||||
- But more vulnerable to jitter and reordering
|
||||
|
||||
**Higher quality** (larger buffer):
|
||||
- More time to receive out-of-order packets
|
||||
- More time for FEC recovery (repair packets may arrive after source packets)
|
||||
- But adds perceptible delay to the conversation
|
||||
|
||||
The default configuration:
|
||||
- Target: 10 packets (200ms) for the client, 50 packets (1s) for the relay
|
||||
- Minimum: 3 packets (60ms) before playout begins (client), 25 packets (500ms) for relay
|
||||
- Maximum: 250 packets (5s) absolute cap
|
||||
|
||||
The relay uses a deeper buffer because it needs to absorb jitter from the lossy inter-relay link. The client uses a shallower buffer for lower latency since it is on the last hop.
|
||||
|
||||
**Known issue**: The current jitter buffer does not adapt its depth based on observed jitter. It uses sequence-number ordering only, without timestamp-based playout scheduling. This can lead to drift during long calls, as observed in echo tests.
|
||||
|
||||
## Browser Audio: AudioWorklet vs ScriptProcessorNode
|
||||
|
||||
The web bridge (`crates/wzp-web/static/`) uses AudioWorklet as the primary audio I/O mechanism, with ScriptProcessorNode as a fallback.
|
||||
|
||||
**AudioWorklet** (preferred):
|
||||
- Runs on a dedicated audio rendering thread
|
||||
- Lower latency (no main-thread round-trip)
|
||||
- Consistent 128-sample callback timing
|
||||
- Supported in Chrome 66+, Firefox 76+, Safari 14.1+
|
||||
|
||||
**ScriptProcessorNode** (fallback):
|
||||
- Runs on the main thread via `onaudioprocess` callback
|
||||
- Higher latency, potential glitches from main-thread GC pauses
|
||||
- Deprecated by the Web Audio specification
|
||||
- Used when AudioWorklet is not available
|
||||
|
||||
Both paths accumulate Float32 samples into 960-sample (20ms) Int16 frames before sending via WebSocket, matching the WZP codec frame size.
|
||||
|
||||
**Playback** uses an AudioWorklet with a ring buffer capped at 200ms (9600 samples at 48 kHz). When the buffer exceeds this limit, old samples are dropped to prevent unbounded drift. The fallback path uses scheduled `AudioBufferSourceNode` instances.
|
||||
|
||||
## Room Mode: SFU vs MCU Trade-offs
|
||||
|
||||
WarzonePhone implements an **SFU** (Selective Forwarding Unit) architecture:
|
||||
|
||||
**SFU** (implemented):
|
||||
- Relay forwards each participant's packets to all other participants unchanged
|
||||
- No transcoding -- the relay never decodes or re-encodes audio
|
||||
- O(N) bandwidth at the relay for N participants (each packet is sent N-1 times)
|
||||
- Each client receives separate streams from each other participant
|
||||
- Client must mix/decode multiple streams locally
|
||||
- Lower relay CPU usage (no transcoding)
|
||||
- End-to-end encryption is preserved (relay never sees plaintext)
|
||||
|
||||
**MCU** (not implemented, for comparison):
|
||||
- Relay would decode all streams, mix them, and re-encode a single combined stream
|
||||
- O(1) bandwidth to each client (receives one mixed stream)
|
||||
- Requires the relay to have codec keys (breaks E2E encryption)
|
||||
- Higher relay CPU (decoding N streams + mixing + re-encoding)
|
||||
- Audio quality loss from re-encoding
|
||||
|
||||
The SFU choice is driven by the E2E encryption requirement: since relays never have access to the audio codec keys, they cannot decode, mix, or re-encode. The current room implementation in `crates/wzp-relay/src/room.rs` forwards received datagrams to all other participants in the room with best-effort delivery -- if one send fails, the relay continues to the next participant.
|
||||
204
docs/EXTENSIBILITY.md
Normal file
204
docs/EXTENSIBILITY.md
Normal file
@@ -0,0 +1,204 @@
|
||||
# WarzonePhone Extension Points & Future Features
|
||||
|
||||
## Trait-Based Architecture
|
||||
|
||||
The protocol is designed around trait interfaces defined in `crates/wzp-proto/src/traits.rs`. Any implementation that satisfies the trait contract can be plugged in without modifying other crates.
|
||||
|
||||
### Adding a New Audio Codec
|
||||
|
||||
Implement `AudioEncoder` and `AudioDecoder` from `wzp_proto::traits`:
|
||||
|
||||
```rust
|
||||
pub trait AudioEncoder: Send + Sync {
|
||||
fn encode(&mut self, pcm: &[i16], out: &mut [u8]) -> Result<usize, CodecError>;
|
||||
fn codec_id(&self) -> CodecId;
|
||||
fn set_profile(&mut self, profile: QualityProfile) -> Result<(), CodecError>;
|
||||
fn max_frame_bytes(&self) -> usize;
|
||||
fn set_inband_fec(&mut self, _enabled: bool) {}
|
||||
fn set_dtx(&mut self, _enabled: bool) {}
|
||||
}
|
||||
|
||||
pub trait AudioDecoder: Send + Sync {
|
||||
fn decode(&mut self, encoded: &[u8], pcm: &mut [i16]) -> Result<usize, CodecError>;
|
||||
fn decode_lost(&mut self, pcm: &mut [i16]) -> Result<usize, CodecError>;
|
||||
fn codec_id(&self) -> CodecId;
|
||||
fn set_profile(&mut self, profile: QualityProfile) -> Result<(), CodecError>;
|
||||
}
|
||||
```
|
||||
|
||||
Steps:
|
||||
1. Add a new variant to `CodecId` in `crates/wzp-proto/src/codec_id.rs` (uses 4-bit wire encoding, currently 5 of 16 values used)
|
||||
2. Implement `AudioEncoder` and `AudioDecoder` for your codec
|
||||
3. Register the codec in `AdaptiveEncoder`/`AdaptiveDecoder` in `crates/wzp-codec/src/adaptive.rs`
|
||||
4. Add a `QualityProfile` constant for the new codec
|
||||
|
||||
### Adding a New FEC Scheme
|
||||
|
||||
Implement `FecEncoder` and `FecDecoder` from `wzp_proto::traits`:
|
||||
|
||||
```rust
|
||||
pub trait FecEncoder: Send + Sync {
|
||||
fn add_source_symbol(&mut self, data: &[u8]) -> Result<(), FecError>;
|
||||
fn generate_repair(&mut self, ratio: f32) -> Result<Vec<(u8, Vec<u8>)>, FecError>;
|
||||
fn finalize_block(&mut self) -> Result<u8, FecError>;
|
||||
fn current_block_id(&self) -> u8;
|
||||
fn current_block_size(&self) -> usize;
|
||||
}
|
||||
|
||||
pub trait FecDecoder: Send + Sync {
|
||||
fn add_symbol(&mut self, block_id: u8, symbol_index: u8, is_repair: bool, data: &[u8]) -> Result<(), FecError>;
|
||||
fn try_decode(&mut self, block_id: u8) -> Result<Option<Vec<Vec<u8>>>, FecError>;
|
||||
fn expire_before(&mut self, block_id: u8);
|
||||
}
|
||||
```
|
||||
|
||||
For example, a Reed-Solomon implementation would maintain the same block/symbol structure but use a different coding algorithm internally. The FEC block ID and symbol index fields in `MediaHeader` support any scheme that fits the block/symbol model.
|
||||
|
||||
### Adding a New Transport
|
||||
|
||||
Implement `MediaTransport` from `wzp_proto::traits`:
|
||||
|
||||
```rust
|
||||
#[async_trait]
|
||||
pub trait MediaTransport: Send + Sync {
|
||||
async fn send_media(&self, packet: &MediaPacket) -> Result<(), TransportError>;
|
||||
async fn recv_media(&self) -> Result<Option<MediaPacket>, TransportError>;
|
||||
async fn send_signal(&self, msg: &SignalMessage) -> Result<(), TransportError>;
|
||||
async fn recv_signal(&self) -> Result<Option<SignalMessage>, TransportError>;
|
||||
fn path_quality(&self) -> PathQuality;
|
||||
async fn close(&self) -> Result<(), TransportError>;
|
||||
}
|
||||
```
|
||||
|
||||
A raw UDP transport, a WebRTC data channel transport, or a TCP tunnel transport could all implement this trait.
|
||||
|
||||
## Obfuscation Layer (Phase 2)
|
||||
|
||||
The `ObfuscationLayer` trait is defined in `crates/wzp-proto/src/traits.rs` but not yet implemented:
|
||||
|
||||
```rust
|
||||
pub trait ObfuscationLayer: Send + Sync {
|
||||
fn obfuscate(&mut self, data: &[u8], out: &mut Vec<u8>) -> Result<(), ObfuscationError>;
|
||||
fn deobfuscate(&mut self, data: &[u8], out: &mut Vec<u8>) -> Result<(), ObfuscationError>;
|
||||
}
|
||||
```
|
||||
|
||||
Planned implementations:
|
||||
- **TLS-in-TLS**: Wrap QUIC traffic inside a TLS connection to port 443, making it look like ordinary HTTPS
|
||||
- **HTTP/2 mimicry**: Frame QUIC packets as HTTP/2 data frames
|
||||
- **Random padding**: Add random-length padding to defeat traffic analysis
|
||||
- **Domain fronting**: Use CDN infrastructure to hide the true destination
|
||||
|
||||
The obfuscation layer sits between the crypto layer and the transport layer in the protocol stack, wrapping encrypted packets before transmission.
|
||||
|
||||
## FeatherChat / Warzone Messenger Integration
|
||||
|
||||
As described in `docs/featherchat.md`, WarzonePhone is designed to integrate with the existing Warzone messenger.
|
||||
|
||||
### Shared Identity Model
|
||||
|
||||
Both WarzonePhone and Warzone use the same identity derivation:
|
||||
- 32-byte seed (BIP39 mnemonic backup)
|
||||
- HKDF with context strings: `"warzone-ed25519-identity"` and `"warzone-x25519-identity"`
|
||||
- Ed25519 for signing, X25519 for encryption
|
||||
- Fingerprint: `SHA-256(Ed25519_pub)[:16]`
|
||||
|
||||
This is implemented in `crates/wzp-crypto/src/handshake.rs` as `WarzoneKeyExchange::from_identity_seed()`.
|
||||
|
||||
### Signaling via Existing WebSocket
|
||||
|
||||
Call initiation flows through the Warzone messenger's existing WebSocket connection:
|
||||
1. Caller looks up callee via `@alias`, federated address, or raw fingerprint
|
||||
2. Caller sends `WireMessage::CallOffer` through the existing message channel
|
||||
3. Callee receives the offer and responds with `WireMessage::CallAnswer`
|
||||
4. Both sides establish a direct QUIC connection to the relay using ephemeral keys from the signaling exchange
|
||||
|
||||
The `SignalMessage::CallOffer` and `SignalMessage::CallAnswer` variants in `crates/wzp-proto/src/packet.rs` carry the same fields needed for this flow.
|
||||
|
||||
### Key Derivation from Existing Shared Secret
|
||||
|
||||
When two Warzone users already have an X3DH shared secret from their messaging session, call keys can be derived from it:
|
||||
- `HKDF(x3dh_shared_secret, "warzone-call-session")` -> 32-byte session key
|
||||
- Or: fresh ephemeral exchange per call (current implementation) for independent forward secrecy
|
||||
|
||||
### Unified Addressing
|
||||
|
||||
The Warzone addressing system resolves user identities across multiple namespaces:
|
||||
|
||||
| Method | Format | Resolution |
|
||||
|--------|--------|------------|
|
||||
| Local alias | `@manwe` | Server resolves to fingerprint |
|
||||
| Federated | `@manwe.b1.example.com` | DNS TXT record -> fingerprint + endpoint |
|
||||
| ENS | `@manwe.eth` | Ethereum address -> fingerprint (planned) |
|
||||
| Raw fingerprint | `xxxx:xxxx:...` | Direct lookup |
|
||||
|
||||
A user calls `@manwe` the same way they message `@manwe`.
|
||||
|
||||
## Authentication: Caller Verification Before Bridging
|
||||
|
||||
Currently, relays forward packets without verifying caller identity. To add authentication:
|
||||
|
||||
1. **Relay-side handshake**: The relay receives the `CallOffer`, verifies the Ed25519 signature, and checks the caller's identity against an allowlist before accepting the connection.
|
||||
|
||||
2. **Implementation point**: `crates/wzp-relay/src/handshake.rs` already implements `accept_handshake()` which performs signature verification. To gate admission, add an authorization check after signature verification.
|
||||
|
||||
3. **Token-based auth**: Add a `token: Vec<u8>` field to `CallOffer` containing a relay-issued authentication token (e.g., signed by the relay operator's key).
|
||||
|
||||
## Multi-Relay Mesh
|
||||
|
||||
The current two-relay chain (`--remote` flag) can be extended to a multi-hop mesh:
|
||||
|
||||
```
|
||||
Client -> Relay A -> Relay B -> Relay C -> Destination
|
||||
```
|
||||
|
||||
Each hop uses the relay pipeline (FEC decode -> jitter buffer -> FEC re-encode) to absorb loss on each link independently. This requires:
|
||||
|
||||
1. Relay discovery and route selection (not yet implemented)
|
||||
2. Per-hop FEC parameters (each link may have different loss characteristics)
|
||||
3. Cumulative latency management (each hop adds jitter buffer delay)
|
||||
|
||||
## Video Support
|
||||
|
||||
The trait architecture supports video by adding:
|
||||
|
||||
1. **Video codec trait**: Similar to `AudioEncoder`/`AudioDecoder` but for video frames
|
||||
2. **Codec choices**: AV1 (best compression, higher CPU), VP9 SVC (scalable, moderate CPU)
|
||||
3. **Separate FEC strategy**: Video frames are larger and more critical (I-frames vs P-frames need different protection levels)
|
||||
4. **SVC (Scalable Video Coding)**: With VP9 SVC, the relay can drop enhancement layers without transcoding, adapting video quality to each receiver's bandwidth
|
||||
|
||||
Video would add new `CodecId` variants and a separate `QualityProfile` for video parameters.
|
||||
|
||||
## Android Native Client
|
||||
|
||||
The workspace is designed with Android in mind (`wzp-client` description mentions "for Android (JNI) and Windows desktop"):
|
||||
|
||||
1. **JNI bindings**: Use `jni` crate or `uniffi` to expose `CallEncoder`, `CallDecoder`, and `MediaTransport` to Kotlin/Java
|
||||
2. **Audio I/O**: Android uses AAudio or OpenSL ES instead of cpal
|
||||
3. **Build**: Cross-compile with `cargo ndk` targeting `aarch64-linux-android` and `armv7-linux-androideabi`
|
||||
4. **Permissions**: `RECORD_AUDIO`, `INTERNET`, `WAKE_LOCK`
|
||||
|
||||
## STUN/TURN NAT Traversal Integration
|
||||
|
||||
The `SignalMessage::IceCandidate` variant is already defined for NAT traversal:
|
||||
|
||||
```rust
|
||||
IceCandidate { candidate: String }
|
||||
```
|
||||
|
||||
Integration would involve:
|
||||
1. STUN server queries to discover the client's public IP/port
|
||||
2. ICE candidate exchange via the signaling channel
|
||||
3. TURN relay fallback when direct UDP is blocked
|
||||
4. Integration with the existing QUIC transport (QUIC can traverse NATs via its connection migration)
|
||||
|
||||
## Bandwidth Estimation and Adaptive Bitrate
|
||||
|
||||
The `PathMonitor` in `crates/wzp-transport/src/path_monitor.rs` already estimates bandwidth from observed packet rates. To close the loop:
|
||||
|
||||
1. Feed `PathMonitor::quality()` into `AdaptiveQualityController::observe()` as `QualityReport` values
|
||||
2. The controller will trigger tier transitions when conditions change
|
||||
3. Propagate the new `QualityProfile` to both encoder (codec switch) and FEC (ratio change)
|
||||
4. Signal the peer via `SignalMessage::QualityUpdate` so both sides switch simultaneously
|
||||
|
||||
The framework is in place; the missing piece is the integration wiring in the client's main loop to periodically generate quality reports from path metrics.
|
||||
1209
docs/FEATHERCHAT_INTEGRATION.md
Normal file
1209
docs/FEATHERCHAT_INTEGRATION.md
Normal file
File diff suppressed because it is too large
Load Diff
93
docs/INTEGRATION_TASKS.md
Normal file
93
docs/INTEGRATION_TASKS.md
Normal file
@@ -0,0 +1,93 @@
|
||||
# WZP Integration Tasks
|
||||
|
||||
Based on featherChat commit 65f6390 — FUTURE_TASKS.md with WZP integration items.
|
||||
|
||||
## Status Key
|
||||
- DONE = implemented and tested
|
||||
- PARTIAL = code exists but not wired into live path
|
||||
- TODO = not started
|
||||
|
||||
---
|
||||
|
||||
## WZP-Side Tasks (our responsibility)
|
||||
|
||||
### WZP-S-1. HKDF Salt/Info String Alignment — DONE
|
||||
- Both use `None` salt, info strings `warzone-ed25519` / `warzone-x25519`
|
||||
- 15 cross-project tests verify identical output
|
||||
|
||||
### WZP-S-2. Accept featherChat Bearer Token on Relay — DONE
|
||||
- `--auth-url` flag on relay
|
||||
- Clients send `SignalMessage::AuthToken` as first signal
|
||||
- Relay calls `POST {auth_url}` to validate, rejects if invalid
|
||||
- Commit: `ad16ddb`
|
||||
|
||||
### WZP-S-3. Signaling Bridge Mode — DONE
|
||||
- `featherchat.rs` module: encode/decode WZP SignalMessage into FC CallSignal.payload
|
||||
- `WzpCallPayload` wraps signal + relay_addr + room
|
||||
- Commit: `ad16ddb`
|
||||
|
||||
### WZP-S-4. Room Access Control — DONE
|
||||
- `hash_room_name()` in wzp-crypto: SHA-256("featherchat-group:" + name)[:16] → 32 hex chars
|
||||
- CLI `--room <name>` hashes before using as SNI
|
||||
- Web bridge hashes room name before connecting to relay
|
||||
- RoomManager gains ACL: `with_acl()`, `allow()`, `is_authorized()`
|
||||
- `join()` now returns `Result<ParticipantId, String>`, rejects unauthorized
|
||||
- Relay passes authenticated fingerprint to room join
|
||||
|
||||
### WZP-S-5. Wire Crypto Handshake into Live Path — DONE
|
||||
- CLI: `perform_handshake()` called after connect, before any media mode
|
||||
- Relay: `accept_handshake()` called after auth, before room join
|
||||
- Web bridge: `perform_handshake()` called after auth token, before audio loops
|
||||
- Relay generates ephemeral identity seed at startup, logs fingerprint
|
||||
- Quality profile negotiated during handshake
|
||||
|
||||
### WZP-S-6. Web Bridge + featherChat Web Client — DONE
|
||||
- `--auth-url` flag on web bridge
|
||||
- Browser sends `{ "type": "auth", "token": "..." }` as first WS message
|
||||
- Web bridge validates token against featherChat, then passes to relay
|
||||
- `--cert`/`--key` flags for production TLS certificates
|
||||
|
||||
### WZP-S-7. Publish wzp-proto for featherChat — DONE
|
||||
- `wzp-proto/Cargo.toml` now standalone (no workspace inheritance)
|
||||
- featherChat can use: `wzp-proto = { git = "ssh://...", path = "crates/wzp-proto" }`
|
||||
|
||||
### WZP-S-8. CLI Seed Input — DONE
|
||||
- `--seed <hex>` and `--mnemonic <24 words>` flags
|
||||
- featherChat-compatible identity: same seed → same keys
|
||||
- Commit: `12cdfe6`
|
||||
|
||||
### WZP-S-9. Fix Hardcoded Assumptions — DONE
|
||||
1. No auth on relay — ✅ fixed via S-2 (`--auth-url`)
|
||||
2. Room names from SNI — ✅ fixed via S-4 (hashed room names)
|
||||
3. No signaling before media — ✅ fixed via S-5 (mandatory handshake)
|
||||
4. Self-signed TLS — ✅ fixed via S-6 (`--cert`/`--key` for production)
|
||||
5. No codec negotiation in web bridge — ✅ profile negotiated in handshake
|
||||
6. No connection to FC key registry — ✅ fixed via S-2 (token validation)
|
||||
|
||||
---
|
||||
|
||||
## featherChat-Side Tasks (their responsibility, we support)
|
||||
|
||||
### WZP-FC-1. Add CallSignal WireMessage variant — DONE (v0.0.21, 064a730)
|
||||
### WZP-FC-2. Call state management + sled tree — TODO (1-2d)
|
||||
### WZP-FC-3. WS handler for call signaling — TODO (0.5d)
|
||||
### WZP-FC-4. Auth token validation endpoint — DONE (v0.0.21, 064a730)
|
||||
### WZP-FC-5. Group-to-room mapping — TODO (1d)
|
||||
### WZP-FC-6. Presence/online status API — TODO (0.5-2d)
|
||||
### WZP-FC-7. Missed call notifications — TODO (0.5d)
|
||||
### WZP-FC-8. Cross-project identity verification — DONE (15 tests, 26dc848)
|
||||
### WZP-FC-9. HKDF salt investigation — DONE (no mismatch)
|
||||
### WZP-FC-10. Web bridge shared auth — DONE
|
||||
- FC: GET /v1/wzp/relay-config, CORS layer, service token
|
||||
- WZP: web bridge --auth-url validates browser tokens via FC
|
||||
### FC-CRATE-1. Standalone warzone-protocol — DONE (v0.0.21, 4a4fa9f)
|
||||
|
||||
---
|
||||
|
||||
## All WZP-S Tasks Complete
|
||||
|
||||
The WZP side of integration is finished. featherChat needs:
|
||||
1. **FC-2 + FC-3** — call state management + WS routing (makes real calls possible)
|
||||
2. **FC-5** — group-to-room mapping (uses `hash_room_name` convention)
|
||||
3. **FC-6/7** — presence + missed calls (UX polish)
|
||||
4. **FC-10** — web bridge shared auth (browser token flow)
|
||||
193
docs/PROGRESS.md
Normal file
193
docs/PROGRESS.md
Normal file
@@ -0,0 +1,193 @@
|
||||
# WarzonePhone Development Progress Report
|
||||
|
||||
## Phase 1: Protocol Core
|
||||
|
||||
**Scope**: Define the protocol types, traits, and core logic in `wzp-proto`.
|
||||
|
||||
**What was built**:
|
||||
- Wire format types: `MediaHeader` (12-byte compact binary), `QualityReport` (4 bytes), `MediaPacket`, `SignalMessage` (8 variants)
|
||||
- Trait definitions: `AudioEncoder`, `AudioDecoder`, `FecEncoder`, `FecDecoder`, `CryptoSession`, `KeyExchange`, `MediaTransport`, `ObfuscationLayer`, `QualityController`
|
||||
- `CodecId` enum with 5 variants (Opus24k/16k/6k, Codec2_3200/1200) and 4-bit wire encoding
|
||||
- `QualityProfile` with 3 preset tiers (GOOD, DEGRADED, CATASTROPHIC)
|
||||
- `AdaptiveQualityController` with hysteresis (3-down/10-up thresholds, sliding window of 20 reports)
|
||||
- `JitterBuffer` with BTreeMap-based reordering, wrapping sequence arithmetic, min/max/target depth
|
||||
- `Session` state machine (Idle -> Connecting -> Handshaking -> Active <-> Rekeying -> Closed)
|
||||
- Full error type hierarchy (`CodecError`, `FecError`, `CryptoError`, `TransportError`, `ObfuscationError`)
|
||||
|
||||
**Tests**: 27 tests across packet roundtrip, quality controller, jitter buffer, session state machine, sequence wrapping
|
||||
|
||||
## Phase 2: Implementation Crates (Parallel)
|
||||
|
||||
**Scope**: Implement the 4 leaf crates against the trait interfaces, in parallel.
|
||||
|
||||
### wzp-codec
|
||||
- Opus encoder/decoder via `audiopus` (48 kHz mono, VoIP application mode, inband FEC, DTX)
|
||||
- Codec2 encoder/decoder via pure-Rust `codec2` crate (3200 and 1200 bps modes)
|
||||
- `AdaptiveEncoder`/`AdaptiveDecoder` wrapping both codecs with transparent switching
|
||||
- Linear resampler for 48 kHz <-> 8 kHz conversion (box filter downsampling, linear interpolation upsampling)
|
||||
- All callers work with 48 kHz PCM regardless of active codec
|
||||
|
||||
### wzp-fec
|
||||
- `RaptorQFecEncoder`: accumulates source symbols with 2-byte length prefix + zero padding to 256-byte symbol size
|
||||
- `RaptorQFecDecoder`: multi-block concurrent decoding with HashMap-based block tracking
|
||||
- `Interleaver`: round-robin temporal interleaving across multiple FEC blocks
|
||||
- `BlockManager`: encoder-side (Building/Pending/Sent/Acknowledged) and decoder-side (Assembling/Complete/Expired) lifecycle tracking
|
||||
- `AdaptiveFec`: maps `QualityProfile` to FEC parameters
|
||||
- Factory function `create_fec_pair()` for convenient encoder/decoder creation
|
||||
|
||||
### wzp-crypto
|
||||
- `WarzoneKeyExchange`: identity seed -> HKDF -> Ed25519 + X25519, ephemeral generation, signature, verification, session derivation
|
||||
- `ChaChaSession`: ChaCha20-Poly1305 AEAD with deterministic nonce construction (session_id + seq + direction)
|
||||
- `RekeyManager`: triggers rekey every 2^16 packets, HKDF mixing of old key + new DH, zeroization of old key
|
||||
- `AntiReplayWindow`: 1024-packet sliding window bitmap with u16 wrapping support
|
||||
- Nonce module: 12-byte nonce layout (4-byte session_id + 4-byte seq BE + 1-byte direction + 3-byte padding)
|
||||
|
||||
### wzp-transport
|
||||
- `QuinnTransport`: implements `MediaTransport` trait over quinn QUIC connection
|
||||
- DATAGRAM frames for unreliable media, bidirectional streams for reliable signaling
|
||||
- Length-prefixed JSON framing (4-byte BE length + serde_json payload) for signaling
|
||||
- VoIP-tuned QUIC configuration (30s idle timeout, 5s keepalive, conservative flow control, 300ms initial RTT)
|
||||
- `PathMonitor`: EWMA-smoothed loss, RTT, jitter, bandwidth estimation
|
||||
- Connection lifecycle: `create_endpoint()`, `connect()`, `accept()`
|
||||
- Self-signed certificate generation for testing
|
||||
|
||||
**Tests**: 55+ tests across all 4 crates (codec roundtrip, FEC recovery at 30/50/70% loss, crypto encrypt/decrypt, handshake, anti-replay, transport serialization, path monitoring)
|
||||
|
||||
## Phase 3: Integration (Relay + Client)
|
||||
|
||||
**Scope**: Wire all layers together into working relay and client binaries.
|
||||
|
||||
### wzp-relay
|
||||
- Room mode (SFU): `RoomManager` with named rooms, auto-create/auto-delete, per-participant forwarding
|
||||
- Forward mode: two-pipeline architecture (upstream/downstream) with FEC re-encode and jitter buffering
|
||||
- `RelayPipeline`: ingest -> FEC decode -> jitter buffer -> pop -> FEC re-encode -> send
|
||||
- `SessionManager`: tracks active sessions, max session limit, idle expiration
|
||||
- Relay-side handshake: `accept_handshake()` with signature verification and profile negotiation
|
||||
- `RelayConfig`: configurable listen address, remote relay, max sessions, jitter parameters
|
||||
- Periodic stats logging (upstream/downstream packet counts)
|
||||
|
||||
### wzp-client
|
||||
- `CallEncoder`: PCM -> audio encode -> FEC block management -> source + repair MediaPackets
|
||||
- `CallDecoder`: MediaPacket -> FEC decode -> jitter buffer -> audio decode -> PCM
|
||||
- Client-side handshake: `perform_handshake()` with ephemeral key exchange and signature
|
||||
- CLI modes: silence test, tone generation (440 Hz), file send, file record, echo test, live audio
|
||||
- `AudioCapture`/`AudioPlayback` via cpal (behind `audio` feature flag), supporting both i16 and f32 sample formats
|
||||
- Automated echo test with windowed analysis (loss, SNR, correlation, degradation detection)
|
||||
- Benchmark suite: codec roundtrip (1000 frames), FEC recovery (100 blocks), crypto throughput (30000 packets), full pipeline (50 frames)
|
||||
|
||||
**Tests**: 25+ tests for pipeline creation, packet generation, FEC repair generation, session management
|
||||
|
||||
## Phase 4: Web Bridge, Rooms, PTT, TLS
|
||||
|
||||
**Scope**: Browser support and multi-party calling.
|
||||
|
||||
### wzp-web
|
||||
- Axum-based HTTP/WebSocket server
|
||||
- Browser audio capture via AudioWorklet (primary) with ScriptProcessorNode fallback
|
||||
- Browser audio playback via AudioWorklet with scheduled BufferSource fallback
|
||||
- Room-based routing: `/ws/<room-name>` WebSocket endpoint
|
||||
- Room name passed as QUIC SNI to the relay
|
||||
- Push-to-talk (PTT) support: button, mouse hold, spacebar
|
||||
- Audio level meter in the UI
|
||||
- TLS support via `--tls` flag with self-signed certificate generation
|
||||
- Auto-reconnection on WebSocket disconnect
|
||||
- Static file serving for the web UI
|
||||
|
||||
## Current Status
|
||||
|
||||
### What Works
|
||||
|
||||
- Full encode/decode pipeline: PCM -> Opus/Codec2 -> FEC -> MediaPacket -> FEC decode -> audio decode -> PCM
|
||||
- Adaptive codec switching between Opus and Codec2 (including resampling)
|
||||
- RaptorQ FEC recovery at various loss rates (tested up to 50% loss)
|
||||
- ChaCha20-Poly1305 encryption with deterministic nonces
|
||||
- X25519 key exchange with Ed25519 identity signatures
|
||||
- QUIC transport with DATAGRAM frames for media and reliable streams for signaling
|
||||
- Single relay echo mode (connectivity test)
|
||||
- Multi-party room calls (SFU)
|
||||
- Two-relay forwarding chain
|
||||
- Web browser audio via WebSocket bridge
|
||||
- File-based send/record for testing
|
||||
- Live microphone/speaker mode (with `audio` feature)
|
||||
- Push-to-talk in the web UI
|
||||
- Automated echo quality test with windowed analysis
|
||||
- Performance benchmarks
|
||||
- Cross-compilation CI for amd64, arm64, armv7
|
||||
|
||||
### Known Issues
|
||||
|
||||
- **Jitter buffer drift**: During long echo tests, the jitter buffer depth can drift because there is no adaptive depth adjustment based on observed jitter. The buffer uses sequence-number ordering only, without timestamp-based playout scheduling.
|
||||
|
||||
- **Web audio drift**: The browser AudioWorklet playback buffer caps at 200ms, but clock drift between the WebSocket message arrival rate and the AudioContext output rate can cause occasional underruns or accumulation. The cap prevents unbounded growth but may cause glitches.
|
||||
|
||||
- **No adaptive loop integration**: The `PathMonitor` feeds and `AdaptiveQualityController` are implemented but not wired together in the client's main loop. Quality reports are consumed when present in packets, but the client does not currently generate periodic quality reports from transport metrics.
|
||||
|
||||
- **Relay FEC pass-through**: In room mode, the relay forwards packets opaquely without FEC decode/re-encode. This means FEC protection is end-to-end only, not per-hop. In forward mode, the relay pipeline does perform FEC decode/re-encode.
|
||||
|
||||
- **No certificate verification**: The QUIC client config uses `SkipServerVerification` (accepts any certificate). This is intentional for testing but must be addressed for production deployments.
|
||||
|
||||
## Test Coverage
|
||||
|
||||
119 tests across 7 crates (wzp-web has no Rust tests):
|
||||
|
||||
| Crate | Test Files | Test Count |
|
||||
|-------|-----------|------------|
|
||||
| wzp-proto | 5 | 27 |
|
||||
| wzp-codec | 3 | 24 |
|
||||
| wzp-fec | 5 | 21 |
|
||||
| wzp-crypto | 5 | 21 |
|
||||
| wzp-transport | 3 | 12 |
|
||||
| wzp-relay | 4 | 10 |
|
||||
| wzp-client | 3 | 8 |
|
||||
| **Total** | **28** | **119** |
|
||||
|
||||
Tests cover:
|
||||
- Wire format roundtrip (header, quality report, full packet)
|
||||
- Codec encode/decode for all 5 codec IDs
|
||||
- Adaptive codec switching (Opus <-> Codec2)
|
||||
- FEC recovery at 0%, 30%, 50% loss
|
||||
- Concurrent FEC block decoding
|
||||
- Full key exchange handshake (Alice/Bob derive same session key)
|
||||
- Encrypt/decrypt roundtrip, wrong-key rejection, wrong-AAD rejection
|
||||
- Anti-replay window: sequential, out-of-order, duplicate, wrapping
|
||||
- Rekeying: interval trigger, key derivation, old key zeroization
|
||||
- QUIC datagram serialization roundtrip
|
||||
- Path quality EWMA smoothing
|
||||
- Jitter buffer: ordering, reordering, missing packets, min depth, duplicates
|
||||
- Session state machine: happy path, invalid transitions, connection loss
|
||||
- Pipeline packet generation and FEC repair
|
||||
- Benchmark correctness (codec, FEC, crypto, pipeline)
|
||||
|
||||
## Performance Benchmarks
|
||||
|
||||
Run with `wzp-bench --all`. Representative results (Apple M-series, single core):
|
||||
|
||||
### Codec Roundtrip (Opus 24kbps)
|
||||
- 1000 frames of 440 Hz sine wave (20ms each, 48 kHz mono)
|
||||
- Encode: ~20-40 us/frame average
|
||||
- Decode: ~10-20 us/frame average
|
||||
- Throughput: >10,000 frames/sec (200x real-time)
|
||||
- Compression ratio: ~30x (960 i16 samples = 1920 bytes -> ~60 bytes encoded)
|
||||
|
||||
### FEC Recovery
|
||||
- 100 blocks of 5 frames each
|
||||
- At 20% loss: ~100% recovery rate
|
||||
- At 30% loss with scaled FEC ratio: >95% recovery rate
|
||||
|
||||
### Crypto (ChaCha20-Poly1305)
|
||||
- 30,000 packets (60/120/256 byte payloads)
|
||||
- Throughput: >500,000 packets/sec
|
||||
- Bandwidth: >50 MB/sec
|
||||
- Average latency: <2 us per encrypt+decrypt cycle
|
||||
|
||||
### Full Pipeline (E2E)
|
||||
- 50 frames through CallEncoder -> CallDecoder
|
||||
- Average E2E latency: ~100-200 us/frame (codec + FEC, no network)
|
||||
- Wire overhead ratio: ~0.05-0.10x of raw PCM (high compression from Opus)
|
||||
|
||||
## Deployment Status
|
||||
|
||||
- **Local testing**: All modes tested on localhost (single relay, room mode, forward mode, web bridge)
|
||||
- **Hetzner VPS**: Build script (`scripts/build-linux.sh`) tested for provisioning, building, and downloading Linux binaries
|
||||
- **CI**: Gitea workflow defined for amd64/arm64/armv7 builds
|
||||
- **Production**: Not yet deployed to production networks
|
||||
158
docs/TELEMETRY.md
Normal file
158
docs/TELEMETRY.md
Normal file
@@ -0,0 +1,158 @@
|
||||
# WZP Telemetry & Observability
|
||||
|
||||
## Overview
|
||||
|
||||
WarzonePhone exports Prometheus-compatible metrics from all services (relay, web bridge, client) for Grafana dashboards. Inter-relay health probes provide always-on monitoring with negligible bandwidth overhead via multiplexed test lines.
|
||||
|
||||
## Architecture
|
||||
|
||||
```
|
||||
┌──────────┐ probe (1 pkt/s) ┌──────────┐
|
||||
│ Relay A │◄─────────────────────►│ Relay B │
|
||||
│ :4433 │ │ :4433 │
|
||||
│ /metrics │ │ /metrics │
|
||||
└────┬─────┘ └────┬─────┘
|
||||
│ │
|
||||
│ scrape │ scrape
|
||||
▼ ▼
|
||||
┌─────────────────────────────────────────────┐
|
||||
│ Prometheus │
|
||||
└─────────────────┬───────────────────────────┘
|
||||
│
|
||||
▼
|
||||
┌─────────────────────────────────────────────┐
|
||||
│ Grafana │
|
||||
│ ┌─────────┐ ┌──────────┐ ┌──────────────┐ │
|
||||
│ │ Relay │ │ Per-call │ │ Inter-relay │ │
|
||||
│ │ Health │ │ Quality │ │ Latency Map │ │
|
||||
│ └─────────┘ └──────────┘ └──────────────┘ │
|
||||
└─────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## Metrics Exported
|
||||
|
||||
### Relay (`/metrics` on HTTP port, default :9090)
|
||||
|
||||
| Metric | Type | Labels | Description |
|
||||
|--------|------|--------|-------------|
|
||||
| `wzp_relay_active_sessions` | Gauge | — | Current active sessions |
|
||||
| `wzp_relay_active_rooms` | Gauge | — | Current active rooms |
|
||||
| `wzp_relay_packets_forwarded_total` | Counter | `room` | Total packets forwarded |
|
||||
| `wzp_relay_bytes_forwarded_total` | Counter | `room` | Total bytes forwarded |
|
||||
| `wzp_relay_auth_attempts_total` | Counter | `result` (ok/fail) | Auth validation attempts |
|
||||
| `wzp_relay_handshake_duration_seconds` | Histogram | — | Crypto handshake time |
|
||||
| `wzp_relay_session_jitter_buffer_depth` | Gauge | `session_id` | Buffer depth per session |
|
||||
| `wzp_relay_session_loss_pct` | Gauge | `session_id` | Packet loss percentage |
|
||||
| `wzp_relay_session_rtt_ms` | Gauge | `session_id` | Round-trip time |
|
||||
| `wzp_relay_session_underruns_total` | Counter | `session_id` | Jitter buffer underruns |
|
||||
| `wzp_relay_session_overruns_total` | Counter | `session_id` | Jitter buffer overruns |
|
||||
|
||||
### Web Bridge (`/metrics` on same HTTP port)
|
||||
|
||||
| Metric | Type | Labels | Description |
|
||||
|--------|------|--------|-------------|
|
||||
| `wzp_web_active_connections` | Gauge | — | Current WebSocket connections |
|
||||
| `wzp_web_frames_bridged_total` | Counter | `direction` (up/down) | Audio frames bridged |
|
||||
| `wzp_web_auth_failures_total` | Counter | — | Browser auth failures |
|
||||
| `wzp_web_handshake_latency_seconds` | Histogram | — | Relay handshake time |
|
||||
|
||||
### Inter-Relay Probes
|
||||
|
||||
| Metric | Type | Labels | Description |
|
||||
|--------|------|--------|-------------|
|
||||
| `wzp_probe_rtt_ms` | Gauge | `target` | RTT to peer relay |
|
||||
| `wzp_probe_loss_pct` | Gauge | `target` | Loss to peer relay |
|
||||
| `wzp_probe_jitter_ms` | Gauge | `target` | Jitter to peer relay |
|
||||
| `wzp_probe_up` | Gauge | `target` | 1 if reachable, 0 if not |
|
||||
|
||||
### Client (JSONL file)
|
||||
|
||||
When `--metrics-file <path>` is used, the client writes one JSON object per second:
|
||||
|
||||
```json
|
||||
{
|
||||
"ts": "2026-03-28T06:30:00Z",
|
||||
"buffer_depth": 45,
|
||||
"underruns": 0,
|
||||
"overruns": 0,
|
||||
"loss_pct": 1.2,
|
||||
"rtt_ms": 34,
|
||||
"jitter_ms": 8,
|
||||
"frames_sent": 50,
|
||||
"frames_received": 49,
|
||||
"quality_profile": "GOOD"
|
||||
}
|
||||
```
|
||||
|
||||
## Task Breakdown
|
||||
|
||||
### WZP-P2-T5: Telemetry & Observability
|
||||
|
||||
| ID | Task | Dependencies | Effort |
|
||||
|----|------|-------------|--------|
|
||||
| **S1** | Prometheus `/metrics` on relay | None | 2-3h |
|
||||
| **S2** | Per-session metrics (jitter, loss, RTT) | S1 | 2-3h |
|
||||
| **S3** | Prometheus `/metrics` on web bridge | None | 2h |
|
||||
| **S4** | Client `--metrics-file` JSONL export | None | 2h |
|
||||
| **S5** | Inter-relay health probe (`--probe`) | S1 | 4-6h |
|
||||
| **S6** | Probe mesh mode (all relays probe each other) | S5 | 2-3h |
|
||||
| **S7** | Grafana dashboard JSON | S1-S6 | 2h |
|
||||
|
||||
### Parallelization
|
||||
|
||||
- **Group A** (parallel): S1, S3, S4 — three different binaries, no file overlap
|
||||
- **Group B** (sequential): S2 after S1, then S5 → S6
|
||||
- **Last**: S7 after all metrics are defined
|
||||
|
||||
## Inter-Relay Health Probes
|
||||
|
||||
The probe is a multiplexed test line: one QUIC connection per peer relay, one silent media packet per second (~50 bytes/s). This provides:
|
||||
|
||||
- **Continuous RTT measurement**: Ping/Pong signals timed to <1ms precision
|
||||
- **Loss detection**: Sequence gaps tracked over sliding 60s window
|
||||
- **Jitter monitoring**: Variation in inter-packet arrival times
|
||||
- **Outage detection**: `wzp_probe_up` drops to 0 within seconds
|
||||
|
||||
### Why multiplexed?
|
||||
|
||||
WZP already multiplexes media on a single QUIC connection. The probe session shares the same connection pool — no extra ports, no extra TLS handshakes. At 1 pkt/s of silence (~50 bytes after Opus encoding + headers), the overhead is negligible even on metered links.
|
||||
|
||||
### Probe mesh example
|
||||
|
||||
With 3 relays (A, B, C), each probes the other 2:
|
||||
|
||||
```
|
||||
A → B: rtt=12ms loss=0.0% jitter=2ms
|
||||
A → C: rtt=45ms loss=0.1% jitter=5ms
|
||||
B → A: rtt=13ms loss=0.0% jitter=2ms
|
||||
B → C: rtt=38ms loss=0.0% jitter=4ms
|
||||
C → A: rtt=44ms loss=0.2% jitter=6ms
|
||||
C → B: rtt=37ms loss=0.0% jitter=3ms
|
||||
```
|
||||
|
||||
This matrix feeds the Grafana latency heatmap and triggers alerts on degradation.
|
||||
|
||||
## Usage
|
||||
|
||||
```bash
|
||||
# Relay with metrics
|
||||
wzp-relay --listen 0.0.0.0:4433 --metrics-port 9090
|
||||
|
||||
# Relay with metrics + probe peer
|
||||
wzp-relay --listen 0.0.0.0:4433 --metrics-port 9090 --probe relay-b:4433
|
||||
|
||||
# Web bridge with metrics
|
||||
wzp-web --port 8080 --relay 127.0.0.1:4433 --metrics-port 9091
|
||||
|
||||
# Client with JSONL telemetry
|
||||
wzp-client --live --metrics-file /tmp/call-metrics.jsonl relay:4433
|
||||
```
|
||||
|
||||
## Grafana Dashboard
|
||||
|
||||
The pre-built dashboard (`docs/grafana-dashboard.json`) includes:
|
||||
|
||||
1. **Relay Health** — active sessions, rooms, packets/s, bytes/s
|
||||
2. **Call Quality** — per-session jitter depth, loss%, RTT, underruns over time
|
||||
3. **Inter-Relay Mesh** — latency heatmap, probe status, loss trends
|
||||
4. **Web Bridge** — active connections, frames bridged, auth failures
|
||||
269
docs/USAGE.md
Normal file
269
docs/USAGE.md
Normal file
@@ -0,0 +1,269 @@
|
||||
# WarzonePhone Usage Guide
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- **Rust** 1.85+ (2024 edition)
|
||||
- **System libraries** (Linux): `cmake`, `pkg-config`, `libasound2-dev` (for audio feature)
|
||||
- **System libraries** (macOS): Xcode command line tools (CoreAudio is included)
|
||||
|
||||
## Building from Source
|
||||
|
||||
### All Binaries (Headless)
|
||||
|
||||
```bash
|
||||
cargo build --release --bin wzp-relay --bin wzp-client --bin wzp-bench --bin wzp-web
|
||||
```
|
||||
|
||||
### Client with Live Audio Support
|
||||
|
||||
```bash
|
||||
cargo build --release --bin wzp-client --features audio
|
||||
```
|
||||
|
||||
### Run All Tests
|
||||
|
||||
```bash
|
||||
cargo test --workspace --lib
|
||||
```
|
||||
|
||||
### Building for Linux (Remote Build Script)
|
||||
|
||||
The project includes `scripts/build-linux.sh` which provisions a temporary Hetzner Cloud VPS, builds all binaries, and downloads them:
|
||||
|
||||
```bash
|
||||
# Requires: hcloud CLI authenticated, SSH key "wz" registered
|
||||
./scripts/build-linux.sh
|
||||
# Outputs to: target/linux-x86_64/
|
||||
```
|
||||
|
||||
The build script produces:
|
||||
- `wzp-relay` -- relay daemon
|
||||
- `wzp-client` -- headless client
|
||||
- `wzp-client-audio` -- client with mic/speaker support (needs libasound2)
|
||||
- `wzp-web` -- web bridge server
|
||||
- `wzp-bench` -- performance benchmarks
|
||||
|
||||
### CI Build
|
||||
|
||||
The `.gitea/workflows/build.yml` workflow builds release binaries for:
|
||||
- Linux amd64
|
||||
- Linux arm64 (cross-compiled)
|
||||
- Linux armv7 (cross-compiled)
|
||||
|
||||
Triggered on version tags (`v*`) or manual dispatch.
|
||||
|
||||
---
|
||||
|
||||
## Binaries and CLI Flags
|
||||
|
||||
### wzp-relay
|
||||
|
||||
The relay daemon that forwards media between clients.
|
||||
|
||||
```
|
||||
Usage: wzp-relay [--listen <addr>] [--remote <addr>]
|
||||
|
||||
Options:
|
||||
--listen <addr> Listen address (default: 0.0.0.0:4433)
|
||||
--remote <addr> Remote relay for forwarding (disables room mode)
|
||||
```
|
||||
|
||||
**Room mode** (default): Clients join rooms by name. Packets are forwarded to all other participants in the same room (SFU model). Room name comes from QUIC SNI or defaults to "default".
|
||||
|
||||
**Forward mode** (`--remote`): All traffic is forwarded to a remote relay. Used for chaining relays across lossy/censored links.
|
||||
|
||||
### wzp-client
|
||||
|
||||
The CLI test client for sending and receiving audio.
|
||||
|
||||
```
|
||||
Usage: wzp-client [options] [relay-addr]
|
||||
|
||||
Options:
|
||||
--live Live mic/speaker mode (requires --features audio)
|
||||
--send-tone <secs> Send a 440Hz test tone for N seconds
|
||||
--send-file <file> Send a raw PCM file (48kHz mono s16le)
|
||||
--record <file.raw> Record received audio to raw PCM file
|
||||
--echo-test <secs> Run automated echo quality test
|
||||
```
|
||||
|
||||
Default relay address: `127.0.0.1:4433`
|
||||
|
||||
### wzp-bench
|
||||
|
||||
Performance benchmark tool.
|
||||
|
||||
```
|
||||
Usage: wzp-bench [OPTIONS]
|
||||
|
||||
Options:
|
||||
--codec Run codec roundtrip benchmark (Opus 24kbps, 1000 frames)
|
||||
--fec Run FEC recovery benchmark (100 blocks)
|
||||
--crypto Run encryption benchmark (30000 packets)
|
||||
--pipeline Run full pipeline benchmark (50 frames E2E)
|
||||
--all Run all benchmarks (default if no flag given)
|
||||
--loss <N> FEC loss percentage for --fec (default: 20)
|
||||
```
|
||||
|
||||
### wzp-web
|
||||
|
||||
Web bridge server that connects browser audio via WebSocket to the relay.
|
||||
|
||||
```
|
||||
Usage: wzp-web [--port 8080] [--relay 127.0.0.1:4433] [--tls]
|
||||
|
||||
Options:
|
||||
--port <port> HTTP/WebSocket port (default: 8080)
|
||||
--relay <addr> WZP relay address (default: 127.0.0.1:4433)
|
||||
--tls Enable HTTPS (self-signed cert, required for mic on Android/remote)
|
||||
```
|
||||
|
||||
Room URLs: `http://host:port/<room-name>` or `https://host:port/<room-name>` with `--tls`.
|
||||
|
||||
---
|
||||
|
||||
## Deployment Examples
|
||||
|
||||
### 1. Single Relay Echo Test
|
||||
|
||||
Start a relay, send a tone, and record the echo:
|
||||
|
||||
```bash
|
||||
# Terminal 1: Start relay
|
||||
wzp-relay --listen 0.0.0.0:4433
|
||||
|
||||
# Terminal 2: Send 10s of 440Hz tone and record the response
|
||||
wzp-client --send-tone 10 --record echo.raw 127.0.0.1:4433
|
||||
```
|
||||
|
||||
Play the recording:
|
||||
```bash
|
||||
ffplay -f s16le -ar 48000 -ac 1 echo.raw
|
||||
```
|
||||
|
||||
### 2. Two-Party Call Through Relay
|
||||
|
||||
Two clients connected to the same relay default room:
|
||||
|
||||
```bash
|
||||
# Terminal 1: Relay
|
||||
wzp-relay
|
||||
|
||||
# Terminal 2: Client A — send tone
|
||||
wzp-client --send-tone 30 127.0.0.1:4433
|
||||
|
||||
# Terminal 3: Client B — record
|
||||
wzp-client --record call.raw 127.0.0.1:4433
|
||||
```
|
||||
|
||||
### 3. Multi-Party Room Call
|
||||
|
||||
Multiple clients join the same named room. The relay QUIC SNI determines the room. With the web bridge, room names come from the URL path:
|
||||
|
||||
```bash
|
||||
# Relay
|
||||
wzp-relay
|
||||
|
||||
# Web bridge
|
||||
wzp-web --port 8080 --relay 127.0.0.1:4433
|
||||
|
||||
# Browser clients open:
|
||||
# http://localhost:8080/my-room
|
||||
# All clients on /my-room hear each other.
|
||||
```
|
||||
|
||||
### 4. Two-Relay Chain (Lossy Link)
|
||||
|
||||
Chain two relays for crossing a censored or lossy network boundary:
|
||||
|
||||
```bash
|
||||
# Destination-side relay (receives from the forward relay)
|
||||
wzp-relay --listen 0.0.0.0:4433
|
||||
|
||||
# Client-side relay (forwards to the destination relay)
|
||||
wzp-relay --listen 0.0.0.0:5433 --remote <dest-relay-ip>:4433
|
||||
|
||||
# Client connects to the client-side relay
|
||||
wzp-client --send-tone 10 127.0.0.1:5433
|
||||
```
|
||||
|
||||
### 5. Web Browser Call with TLS
|
||||
|
||||
TLS is required for microphone access on non-localhost origins (Android, remote browsers):
|
||||
|
||||
```bash
|
||||
# Relay
|
||||
wzp-relay
|
||||
|
||||
# Web bridge with TLS (self-signed certificate)
|
||||
wzp-web --port 8443 --relay 127.0.0.1:4433 --tls
|
||||
|
||||
# Open in browser (accept self-signed cert warning):
|
||||
# https://your-server:8443/room-name
|
||||
```
|
||||
|
||||
The web UI supports:
|
||||
- Open mic (default) and push-to-talk modes
|
||||
- PTT via on-screen button, mouse hold, or spacebar
|
||||
- Audio level meter
|
||||
- Auto-reconnection on disconnect
|
||||
|
||||
### 6. Automated Echo Quality Test
|
||||
|
||||
```bash
|
||||
wzp-relay &
|
||||
wzp-client --echo-test 30 127.0.0.1:4433
|
||||
```
|
||||
|
||||
Produces a windowed analysis report showing loss percentage, SNR, correlation, and detects quality degradation trends over time.
|
||||
|
||||
### 7. Live Audio Call (requires `--features audio`)
|
||||
|
||||
```bash
|
||||
wzp-relay &
|
||||
|
||||
# Terminal 2
|
||||
wzp-client --live 127.0.0.1:4433
|
||||
|
||||
# Terminal 3
|
||||
wzp-client --live 127.0.0.1:4433
|
||||
```
|
||||
|
||||
Both clients capture from the default microphone and play received audio through the default speaker. Press Ctrl+C to stop.
|
||||
|
||||
---
|
||||
|
||||
## Audio File Format
|
||||
|
||||
All raw PCM files use:
|
||||
- Sample rate: **48 kHz**
|
||||
- Channels: **1** (mono)
|
||||
- Sample format: **signed 16-bit little-endian** (s16le)
|
||||
|
||||
### ffmpeg Conversion Commands
|
||||
|
||||
```bash
|
||||
# WAV to raw PCM
|
||||
ffmpeg -i input.wav -f s16le -ar 48000 -ac 1 output.raw
|
||||
|
||||
# MP3 to raw PCM
|
||||
ffmpeg -i input.mp3 -f s16le -ar 48000 -ac 1 output.raw
|
||||
|
||||
# Raw PCM to WAV
|
||||
ffmpeg -f s16le -ar 48000 -ac 1 -i input.raw output.wav
|
||||
|
||||
# Play raw PCM directly
|
||||
ffplay -f s16le -ar 48000 -ac 1 file.raw
|
||||
# or with the newer channel layout syntax:
|
||||
ffplay -f s16le -ar 48000 -ch_layout mono file.raw
|
||||
```
|
||||
|
||||
### Sending an Audio File
|
||||
|
||||
```bash
|
||||
# Convert your audio to raw PCM first
|
||||
ffmpeg -i song.mp3 -f s16le -ar 48000 -ac 1 song.raw
|
||||
|
||||
# Send through relay
|
||||
wzp-client --send-file song.raw 127.0.0.1:4433
|
||||
```
|
||||
257
docs/WS_RELAY_SPEC.md
Normal file
257
docs/WS_RELAY_SPEC.md
Normal file
@@ -0,0 +1,257 @@
|
||||
# WS Support in wzp-relay — Implementation Spec
|
||||
|
||||
## Goal
|
||||
|
||||
Add WebSocket listener to `wzp-relay` so browsers connect directly, eliminating `wzp-web` bridge.
|
||||
|
||||
```
|
||||
Before: Browser → WS → wzp-web → QUIC → wzp-relay
|
||||
After: Browser → WS → wzp-relay (handles both WS + QUIC)
|
||||
```
|
||||
|
||||
## Architecture
|
||||
|
||||
```
|
||||
wzp-relay
|
||||
├── QUIC listener (:4433) — native clients, inter-relay
|
||||
├── WS listener (:8080) — browsers via Caddy
|
||||
│ ├── GET /ws/{room} — WebSocket upgrade
|
||||
│ └── Auth: first msg = {"type":"auth","token":"..."}
|
||||
└── Shared RoomManager — both transports in same rooms
|
||||
```
|
||||
|
||||
## Key Changes
|
||||
|
||||
### 1. Abstract `Participant` over transport type
|
||||
|
||||
**File: `room.rs`**
|
||||
|
||||
Currently:
|
||||
```rust
|
||||
struct Participant {
|
||||
id: ParticipantId,
|
||||
_addr: std::net::SocketAddr,
|
||||
transport: Arc<wzp_transport::QuinnTransport>,
|
||||
}
|
||||
```
|
||||
|
||||
Change to:
|
||||
```rust
|
||||
struct Participant {
|
||||
id: ParticipantId,
|
||||
_addr: std::net::SocketAddr,
|
||||
sender: ParticipantSender,
|
||||
}
|
||||
|
||||
/// How to send a media packet to a participant.
|
||||
enum ParticipantSender {
|
||||
Quic(Arc<wzp_transport::QuinnTransport>),
|
||||
WebSocket(tokio::sync::mpsc::Sender<bytes::Bytes>),
|
||||
}
|
||||
```
|
||||
|
||||
The `others()` method returns `Vec<ParticipantSender>` instead of `Vec<Arc<QuinnTransport>>`.
|
||||
|
||||
`ParticipantSender` implements a `send_pcm(&self, data: &[u8])` method:
|
||||
- **Quic**: wraps in `MediaPacket`, calls `transport.send_media()`
|
||||
- **WebSocket**: sends raw binary frame via the mpsc channel
|
||||
|
||||
### 2. Add `join_ws()` to RoomManager
|
||||
|
||||
```rust
|
||||
pub fn join_ws(
|
||||
&mut self,
|
||||
room_name: &str,
|
||||
addr: std::net::SocketAddr,
|
||||
sender: tokio::sync::mpsc::Sender<bytes::Bytes>,
|
||||
fingerprint: Option<&str>,
|
||||
) -> Result<ParticipantId, String>
|
||||
```
|
||||
|
||||
### 3. Add WS listener in `main.rs`
|
||||
|
||||
New flag: `--ws-port 8080`
|
||||
|
||||
```rust
|
||||
if let Some(ws_port) = config.ws_port {
|
||||
let room_mgr = room_mgr.clone();
|
||||
let auth_url = config.auth_url.clone();
|
||||
let metrics = metrics.clone();
|
||||
tokio::spawn(run_ws_server(ws_port, room_mgr, auth_url, metrics));
|
||||
}
|
||||
```
|
||||
|
||||
### 4. WebSocket handler (`ws.rs` — new file)
|
||||
|
||||
```rust
|
||||
use axum::{
|
||||
extract::{ws::{Message, WebSocket}, Path, WebSocketUpgrade},
|
||||
routing::get,
|
||||
Router,
|
||||
};
|
||||
|
||||
async fn ws_handler(
|
||||
Path(room): Path<String>,
|
||||
ws: WebSocketUpgrade,
|
||||
/* state */
|
||||
) -> impl IntoResponse {
|
||||
ws.on_upgrade(move |socket| handle_ws(socket, room, state))
|
||||
}
|
||||
|
||||
async fn handle_ws(mut socket: WebSocket, room: String, state: WsState) {
|
||||
let addr = /* peer addr */;
|
||||
|
||||
// 1. Auth: first message must be {"type":"auth","token":"..."}
|
||||
let fingerprint = if let Some(ref auth_url) = state.auth_url {
|
||||
match socket.recv().await {
|
||||
Some(Ok(Message::Text(text))) => {
|
||||
let parsed: serde_json::Value = serde_json::from_str(&text)?;
|
||||
if parsed["type"] == "auth" {
|
||||
let token = parsed["token"].as_str().unwrap();
|
||||
let client = auth::validate_token(auth_url, token).await?;
|
||||
Some(client.fingerprint)
|
||||
} else { return; }
|
||||
}
|
||||
_ => return,
|
||||
}
|
||||
} else { None };
|
||||
|
||||
// 2. Create mpsc channel for outbound frames
|
||||
let (tx, mut rx) = tokio::sync::mpsc::channel::<bytes::Bytes>(64);
|
||||
|
||||
// 3. Join room
|
||||
let participant_id = {
|
||||
let mut mgr = state.room_mgr.lock().await;
|
||||
mgr.join_ws(&room, addr, tx, fingerprint.as_deref())?
|
||||
};
|
||||
|
||||
// 4. Run send/recv loops
|
||||
let (mut ws_tx, mut ws_rx) = socket.split();
|
||||
|
||||
// Outbound: mpsc rx → WS send
|
||||
let send_task = tokio::spawn(async move {
|
||||
while let Some(data) = rx.recv().await {
|
||||
if ws_tx.send(Message::Binary(data.to_vec())).await.is_err() {
|
||||
break;
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// Inbound: WS recv → fan-out to room
|
||||
loop {
|
||||
match ws_rx.next().await {
|
||||
Some(Ok(Message::Binary(data))) => {
|
||||
// Raw PCM Int16 from browser — fan-out to all others
|
||||
let others = {
|
||||
let mgr = state.room_mgr.lock().await;
|
||||
mgr.others(&room, participant_id)
|
||||
};
|
||||
for other in &others {
|
||||
other.send_raw(&data);
|
||||
}
|
||||
}
|
||||
Some(Ok(Message::Close(_))) | None => break,
|
||||
_ => continue,
|
||||
}
|
||||
}
|
||||
|
||||
// 5. Cleanup
|
||||
send_task.abort();
|
||||
let mut mgr = state.room_mgr.lock().await;
|
||||
mgr.leave(&room, participant_id);
|
||||
}
|
||||
```
|
||||
|
||||
### 5. Cross-transport fan-out
|
||||
|
||||
When a QUIC participant sends audio → WS participants receive raw PCM bytes.
|
||||
When a WS participant sends audio → QUIC participants receive a `MediaPacket`.
|
||||
|
||||
The `ParticipantSender::send_raw()` method:
|
||||
```rust
|
||||
impl ParticipantSender {
|
||||
async fn send_raw(&self, pcm_bytes: &[u8]) {
|
||||
match self {
|
||||
ParticipantSender::WebSocket(tx) => {
|
||||
let _ = tx.try_send(bytes::Bytes::copy_from_slice(pcm_bytes));
|
||||
}
|
||||
ParticipantSender::Quic(transport) => {
|
||||
// Wrap raw PCM in a MediaPacket
|
||||
let pkt = MediaPacket {
|
||||
header: MediaHeader::default_pcm(),
|
||||
payload: bytes::Bytes::copy_from_slice(pcm_bytes),
|
||||
quality_report: None,
|
||||
};
|
||||
let _ = transport.send_media(&pkt).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
For QUIC→WS direction, `run_participant` extracts `pkt.payload` bytes and sends to WS channels.
|
||||
|
||||
### 6. Dependencies to add
|
||||
|
||||
```toml
|
||||
# wzp-relay/Cargo.toml
|
||||
axum = { version = "0.8", features = ["ws"] }
|
||||
tokio = { version = "1", features = ["full"] } # already present
|
||||
```
|
||||
|
||||
### 7. Config change
|
||||
|
||||
```rust
|
||||
// config.rs
|
||||
pub struct RelayConfig {
|
||||
// ... existing fields ...
|
||||
pub ws_port: Option<u16>,
|
||||
}
|
||||
```
|
||||
|
||||
### 8. Docker compose change (featherChat side)
|
||||
|
||||
Remove `wzp-web` service entirely. Update Caddy to proxy `/audio/*` to relay's WS port:
|
||||
|
||||
```yaml
|
||||
# Before:
|
||||
wzp-web:
|
||||
entrypoint: ["wzp-web"]
|
||||
command: ["--port", "8080", "--relay", "172.28.0.10:4433"]
|
||||
|
||||
# After: REMOVED. Relay handles WS directly.
|
||||
|
||||
wzp-relay:
|
||||
command:
|
||||
- "--listen"
|
||||
- "0.0.0.0:4433"
|
||||
- "--ws-port"
|
||||
- "8080"
|
||||
- "--auth-url"
|
||||
- "http://warzone-server:7700/v1/auth/validate"
|
||||
```
|
||||
|
||||
## What Stays the Same
|
||||
|
||||
- Browser's `startAudio()` — unchanged, still connects WS to `/audio/ws/ROOM`
|
||||
- Caddy proxies `/audio/*` → relay:8080 (same path, different backend)
|
||||
- Auth flow — same JSON token as first message
|
||||
- PCM format — same Int16 binary frames
|
||||
- QUIC clients — unchanged, still connect to :4433
|
||||
- Room naming, ACL, session management — all unchanged
|
||||
|
||||
## Testing
|
||||
|
||||
1. Start relay with `--ws-port 8080 --listen 0.0.0.0:4433`
|
||||
2. Open browser, initiate call via featherChat
|
||||
3. Verify audio flows (both directions)
|
||||
4. Verify QUIC + WS clients can be in same room (mixed mode)
|
||||
5. Verify auth works
|
||||
6. Verify room cleanup on disconnect
|
||||
|
||||
## Migration Path
|
||||
|
||||
1. Implement WS in relay
|
||||
2. Test with featherChat (no featherChat changes needed)
|
||||
3. Remove wzp-web from Docker stack
|
||||
4. Later: add WebTransport alongside WS
|
||||
230
docs/WZP-FC-SHARED-CRATES.md
Normal file
230
docs/WZP-FC-SHARED-CRATES.md
Normal file
@@ -0,0 +1,230 @@
|
||||
# Shared Crate Strategy: WZP ↔ featherChat
|
||||
|
||||
**Goal:** Both projects import each other's crates directly instead of duplicating code. A change to identity derivation in featherChat automatically applies in WZP, and vice versa for call signaling types.
|
||||
|
||||
---
|
||||
|
||||
## Current Problem
|
||||
|
||||
- `warzone-protocol` uses workspace dependency inheritance (`Cargo.toml` has `ed25519-dalek.workspace = true`). When WZP tries to use it as a path dep, Cargo fails because it can't resolve workspace references from outside the featherChat workspace.
|
||||
- WZP had to mirror featherChat's `identity.rs`, `mnemonic.rs`, and `Fingerprint` type in `wzp-crypto/src/identity.rs` — duplicate code that can drift.
|
||||
- featherChat will need `wzp_proto::SignalMessage` for the `WireMessage::CallSignal` variant — another potential duplication.
|
||||
|
||||
## Solution: Make Key Crates Standalone-Importable
|
||||
|
||||
### What featherChat Needs to Do
|
||||
|
||||
#### FC-CRATE-1: Make `warzone-protocol` standalone-publishable
|
||||
|
||||
**File:** `warzone/crates/warzone-protocol/Cargo.toml`
|
||||
|
||||
Replace all `workspace = true` references with explicit versions:
|
||||
|
||||
```toml
|
||||
# Before:
|
||||
ed25519-dalek.workspace = true
|
||||
x25519-dalek.workspace = true
|
||||
|
||||
# After:
|
||||
ed25519-dalek = { version = "2", features = ["serde", "rand_core"] }
|
||||
x25519-dalek = { version = "2", features = ["serde", "static_secrets"] }
|
||||
chacha20poly1305 = "0.10"
|
||||
hkdf = "0.12"
|
||||
sha2 = "0.10"
|
||||
rand = "0.8"
|
||||
bip39 = "2"
|
||||
serde = { version = "1", features = ["derive"] }
|
||||
serde_json = "1"
|
||||
bincode = "1"
|
||||
thiserror = "2"
|
||||
hex = "0.4"
|
||||
base64 = "0.22"
|
||||
uuid = { version = "1", features = ["v4"] }
|
||||
zeroize = { version = "1", features = ["derive"] }
|
||||
chrono = { version = "0.4", features = ["serde"] }
|
||||
k256 = { version = "0.13", features = ["ecdsa", "serde"] }
|
||||
tiny-keccak = { version = "2", features = ["keccak"] }
|
||||
```
|
||||
|
||||
**Keep workspace inheritance working too** by using the `[package]` fallback pattern:
|
||||
```toml
|
||||
[package]
|
||||
name = "warzone-protocol"
|
||||
version = "0.0.20"
|
||||
edition = "2021"
|
||||
# Remove version.workspace and edition.workspace — use explicit values
|
||||
```
|
||||
|
||||
This way the crate still works inside the featherChat workspace AND can be imported by WZP as a path dependency.
|
||||
|
||||
**Test:** From the WZP repo, this should work:
|
||||
```toml
|
||||
# In wzp-crypto/Cargo.toml:
|
||||
warzone-protocol = { path = "../../deps/featherchat/warzone/crates/warzone-protocol" }
|
||||
```
|
||||
|
||||
**Effort:** 30 minutes. Mechanical replacement, then `cargo build` to verify.
|
||||
|
||||
#### FC-CRATE-2: Add `wzp-proto` as a git dependency for `CallSignal`
|
||||
|
||||
**File:** `warzone/crates/warzone-protocol/Cargo.toml`
|
||||
|
||||
```toml
|
||||
[dependencies]
|
||||
# WarzonePhone signaling types (for CallSignal WireMessage variant)
|
||||
wzp-proto = { git = "ssh://git@git.manko.yoga:222/manawenuz/wz-phone.git", optional = true }
|
||||
|
||||
[features]
|
||||
default = []
|
||||
wzp = ["wzp-proto"]
|
||||
```
|
||||
|
||||
**File:** `warzone/crates/warzone-protocol/src/message.rs`
|
||||
|
||||
```rust
|
||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||
pub enum WireMessage {
|
||||
// ... existing variants ...
|
||||
|
||||
/// Voice/video call signaling (requires "wzp" feature).
|
||||
#[cfg(feature = "wzp")]
|
||||
CallSignal {
|
||||
id: String,
|
||||
sender_fingerprint: String,
|
||||
signal: wzp_proto::SignalMessage, // Typed, not opaque bytes
|
||||
},
|
||||
|
||||
/// Voice/video call signaling (without wzp feature — opaque bytes).
|
||||
#[cfg(not(feature = "wzp"))]
|
||||
CallSignal {
|
||||
id: String,
|
||||
sender_fingerprint: String,
|
||||
signal: Vec<u8>, // Opaque JSON bytes
|
||||
},
|
||||
}
|
||||
```
|
||||
|
||||
**Alternative (simpler):** Always use `Vec<u8>` for the signal field and let the consumer deserialize. This avoids the feature flag complexity:
|
||||
|
||||
```rust
|
||||
CallSignal {
|
||||
id: String,
|
||||
sender_fingerprint: String,
|
||||
signal_json: String, // JSON-serialized wzp_proto::SignalMessage
|
||||
},
|
||||
```
|
||||
|
||||
featherChat server treats it as opaque. WZP client deserializes it to `SignalMessage`.
|
||||
|
||||
**Effort:** 1-2 hours.
|
||||
|
||||
#### FC-CRATE-3: Extract shared identity types to a micro-crate (optional, long-term)
|
||||
|
||||
Create `warzone-identity` crate containing only:
|
||||
- `Seed` (generation, from_bytes, from_hex, from_mnemonic, to_mnemonic)
|
||||
- `IdentityKeyPair` (derive from seed)
|
||||
- `PublicIdentity` (verifying key, encryption key, fingerprint)
|
||||
- `Fingerprint` (SHA-256 truncated, display format)
|
||||
- `hkdf_derive()` helper
|
||||
|
||||
Both `warzone-protocol` and `wzp-crypto` depend on `warzone-identity` instead of each implementing their own. This is the cleanest long-term solution but requires more refactoring.
|
||||
|
||||
**Crate structure:**
|
||||
```
|
||||
warzone-identity/
|
||||
├── Cargo.toml (standalone, no workspace inheritance)
|
||||
├── src/
|
||||
│ ├── lib.rs
|
||||
│ ├── seed.rs
|
||||
│ ├── identity.rs
|
||||
│ ├── fingerprint.rs
|
||||
│ └── mnemonic.rs
|
||||
```
|
||||
|
||||
**Dependencies:** ed25519-dalek, x25519-dalek, hkdf, sha2, bip39, hex, zeroize
|
||||
|
||||
Both projects import it:
|
||||
```toml
|
||||
# featherChat:
|
||||
warzone-identity = { path = "../warzone-identity" }
|
||||
|
||||
# WZP (via submodule):
|
||||
warzone-identity = { path = "deps/featherchat/warzone-identity" }
|
||||
```
|
||||
|
||||
**Effort:** Half a day. Extract code from warzone-protocol, update imports in both projects.
|
||||
|
||||
---
|
||||
|
||||
### What WZP Needs to Do (after featherChat completes FC-CRATE-1)
|
||||
|
||||
#### WZP-CRATE-1: Replace identity mirror with real dependency
|
||||
|
||||
Once `warzone-protocol` is standalone-importable:
|
||||
|
||||
**File:** `crates/wzp-crypto/Cargo.toml`
|
||||
```toml
|
||||
# Remove bip39 and hex (now comes from warzone-protocol)
|
||||
# Add:
|
||||
warzone-protocol = { path = "../../deps/featherchat/warzone/crates/warzone-protocol" }
|
||||
```
|
||||
|
||||
**File:** `crates/wzp-crypto/src/identity.rs`
|
||||
Replace the entire file with re-exports:
|
||||
```rust
|
||||
//! featherChat identity — re-exported from warzone-protocol.
|
||||
pub use warzone_protocol::identity::{IdentityKeyPair, Seed};
|
||||
pub use warzone_protocol::types::Fingerprint;
|
||||
```
|
||||
|
||||
**File:** `crates/wzp-crypto/src/handshake.rs`
|
||||
Use `warzone_protocol::identity::Seed` internally instead of raw HKDF calls.
|
||||
|
||||
**Effort:** 1 hour (after FC-CRATE-1 is done).
|
||||
|
||||
#### WZP-CRATE-2: Make `wzp-proto` standalone-importable
|
||||
|
||||
`wzp-proto` already has explicit dependency versions (not workspace-inherited for external deps). It should work as a git dependency from featherChat. Verify:
|
||||
|
||||
```bash
|
||||
# From a scratch project:
|
||||
cargo add --git ssh://git@git.manko.yoga:222/manawenuz/wz-phone.git wzp-proto
|
||||
```
|
||||
|
||||
If this fails, replace any remaining workspace references in `wzp-proto/Cargo.toml` with explicit versions.
|
||||
|
||||
**Key types featherChat needs from wzp-proto:**
|
||||
- `SignalMessage` (CallOffer, CallAnswer, IceCandidate, Hangup, etc.)
|
||||
- `QualityProfile` (for codec negotiation)
|
||||
- `HangupReason`
|
||||
|
||||
**Effort:** 30 minutes to verify and fix.
|
||||
|
||||
---
|
||||
|
||||
## Recommended Order
|
||||
|
||||
1. **FC-CRATE-1** — Make warzone-protocol standalone (30 min, unblocks everything)
|
||||
2. **WZP-CRATE-2** — Verify wzp-proto works as git dep (30 min)
|
||||
3. **FC-CRATE-2** — Add CallSignal with opaque signal_json field (1-2 hours)
|
||||
4. **WZP-CRATE-1** — Replace identity mirror with real dep (1 hour)
|
||||
5. **FC-CRATE-3** — Extract warzone-identity micro-crate (optional, half day)
|
||||
|
||||
After steps 1-4, both projects share types directly:
|
||||
- WZP imports `warzone-protocol` for identity/seed/fingerprint
|
||||
- featherChat imports `wzp-proto` (via git) for `SignalMessage` types
|
||||
- No duplicated code, no drift risk
|
||||
|
||||
---
|
||||
|
||||
## Dependency Graph After Integration
|
||||
|
||||
```
|
||||
warzone-identity (shared micro-crate, optional step 5)
|
||||
↑ ↑
|
||||
warzone-protocol wzp-crypto
|
||||
↑ ↑
|
||||
warzone-server wzp-proto ← wzp-codec, wzp-fec, wzp-transport
|
||||
↑ ↑
|
||||
warzone-client wzp-client, wzp-relay, wzp-web
|
||||
```
|
||||
885
docs/grafana-dashboard.json
Normal file
885
docs/grafana-dashboard.json
Normal file
@@ -0,0 +1,885 @@
|
||||
{
|
||||
"__inputs": [
|
||||
{
|
||||
"name": "DS_PROMETHEUS",
|
||||
"label": "Prometheus",
|
||||
"description": "",
|
||||
"type": "datasource",
|
||||
"pluginId": "prometheus",
|
||||
"pluginName": "Prometheus"
|
||||
}
|
||||
],
|
||||
"__requires": [
|
||||
{
|
||||
"type": "grafana",
|
||||
"id": "grafana",
|
||||
"name": "Grafana",
|
||||
"version": "10.0.0"
|
||||
},
|
||||
{
|
||||
"type": "datasource",
|
||||
"id": "prometheus",
|
||||
"name": "Prometheus",
|
||||
"version": "1.0.0"
|
||||
},
|
||||
{
|
||||
"type": "panel",
|
||||
"id": "gauge",
|
||||
"name": "Gauge",
|
||||
"version": ""
|
||||
},
|
||||
{
|
||||
"type": "panel",
|
||||
"id": "timeseries",
|
||||
"name": "Time series",
|
||||
"version": ""
|
||||
},
|
||||
{
|
||||
"type": "panel",
|
||||
"id": "barchart",
|
||||
"name": "Bar chart",
|
||||
"version": ""
|
||||
},
|
||||
{
|
||||
"type": "panel",
|
||||
"id": "histogram",
|
||||
"name": "Histogram",
|
||||
"version": ""
|
||||
},
|
||||
{
|
||||
"type": "panel",
|
||||
"id": "table",
|
||||
"name": "Table",
|
||||
"version": ""
|
||||
},
|
||||
{
|
||||
"type": "panel",
|
||||
"id": "stat",
|
||||
"name": "Stat",
|
||||
"version": ""
|
||||
}
|
||||
],
|
||||
"id": null,
|
||||
"uid": "wzp-relay-v1",
|
||||
"title": "WarzonePhone Relay Dashboard",
|
||||
"description": "Monitoring dashboard for WarzonePhone relay, call quality, inter-relay mesh, and web bridge.",
|
||||
"tags": ["wzp", "voip", "relay"],
|
||||
"style": "dark",
|
||||
"timezone": "browser",
|
||||
"editable": true,
|
||||
"graphTooltip": 1,
|
||||
"fiscalYearStartMonth": 0,
|
||||
"liveNow": false,
|
||||
"refresh": "10s",
|
||||
"schemaVersion": 39,
|
||||
"version": 1,
|
||||
"time": {
|
||||
"from": "now-1h",
|
||||
"to": "now"
|
||||
},
|
||||
"templating": {
|
||||
"list": []
|
||||
},
|
||||
"annotations": {
|
||||
"list": [
|
||||
{
|
||||
"builtIn": 1,
|
||||
"datasource": { "type": "grafana", "uid": "-- Grafana --" },
|
||||
"enable": true,
|
||||
"hide": true,
|
||||
"iconColor": "rgba(0, 211, 255, 1)",
|
||||
"name": "Annotations & Alerts",
|
||||
"type": "dashboard"
|
||||
}
|
||||
]
|
||||
},
|
||||
"panels": [
|
||||
{
|
||||
"type": "row",
|
||||
"title": "Relay Health",
|
||||
"collapsed": false,
|
||||
"gridPos": { "h": 1, "w": 24, "x": 0, "y": 0 },
|
||||
"id": 1,
|
||||
"panels": []
|
||||
},
|
||||
{
|
||||
"type": "gauge",
|
||||
"title": "Active Sessions",
|
||||
"gridPos": { "h": 8, "w": 4, "x": 0, "y": 1 },
|
||||
"id": 2,
|
||||
"datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" },
|
||||
"targets": [
|
||||
{
|
||||
"expr": "wzp_relay_active_sessions",
|
||||
"legendFormat": "sessions",
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": { "mode": "thresholds" },
|
||||
"thresholds": {
|
||||
"mode": "absolute",
|
||||
"steps": [
|
||||
{ "color": "green", "value": null },
|
||||
{ "color": "yellow", "value": 50 },
|
||||
{ "color": "red", "value": 100 }
|
||||
]
|
||||
},
|
||||
"unit": "none",
|
||||
"min": 0
|
||||
},
|
||||
"overrides": []
|
||||
},
|
||||
"options": {
|
||||
"reduceOptions": {
|
||||
"calcs": ["lastNotNull"],
|
||||
"fields": "",
|
||||
"values": false
|
||||
},
|
||||
"showThresholdLabels": false,
|
||||
"showThresholdMarkers": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "gauge",
|
||||
"title": "Active Rooms",
|
||||
"gridPos": { "h": 8, "w": 4, "x": 4, "y": 1 },
|
||||
"id": 3,
|
||||
"datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" },
|
||||
"targets": [
|
||||
{
|
||||
"expr": "wzp_relay_active_rooms",
|
||||
"legendFormat": "rooms",
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": { "mode": "thresholds" },
|
||||
"thresholds": {
|
||||
"mode": "absolute",
|
||||
"steps": [
|
||||
{ "color": "green", "value": null },
|
||||
{ "color": "yellow", "value": 25 },
|
||||
{ "color": "red", "value": 50 }
|
||||
]
|
||||
},
|
||||
"unit": "none",
|
||||
"min": 0
|
||||
},
|
||||
"overrides": []
|
||||
},
|
||||
"options": {
|
||||
"reduceOptions": {
|
||||
"calcs": ["lastNotNull"],
|
||||
"fields": "",
|
||||
"values": false
|
||||
},
|
||||
"showThresholdLabels": false,
|
||||
"showThresholdMarkers": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "timeseries",
|
||||
"title": "Packets/sec",
|
||||
"gridPos": { "h": 8, "w": 4, "x": 8, "y": 1 },
|
||||
"id": 4,
|
||||
"datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" },
|
||||
"targets": [
|
||||
{
|
||||
"expr": "rate(wzp_relay_packets_forwarded_total[1m])",
|
||||
"legendFormat": "packets/s",
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": { "mode": "palette-classic" },
|
||||
"custom": {
|
||||
"drawStyle": "line",
|
||||
"lineInterpolation": "smooth",
|
||||
"fillOpacity": 20,
|
||||
"lineWidth": 2,
|
||||
"pointSize": 5,
|
||||
"showPoints": "auto",
|
||||
"spanNulls": false,
|
||||
"stacking": { "mode": "none", "group": "A" },
|
||||
"axisPlacement": "auto",
|
||||
"gradientMode": "scheme"
|
||||
},
|
||||
"unit": "pps",
|
||||
"min": 0
|
||||
},
|
||||
"overrides": []
|
||||
},
|
||||
"options": {
|
||||
"tooltip": { "mode": "single", "sort": "none" },
|
||||
"legend": { "displayMode": "list", "placement": "bottom" }
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "timeseries",
|
||||
"title": "Bytes/sec",
|
||||
"gridPos": { "h": 8, "w": 4, "x": 12, "y": 1 },
|
||||
"id": 5,
|
||||
"datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" },
|
||||
"targets": [
|
||||
{
|
||||
"expr": "rate(wzp_relay_bytes_forwarded_total[1m])",
|
||||
"legendFormat": "bytes/s",
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": { "mode": "palette-classic" },
|
||||
"custom": {
|
||||
"drawStyle": "line",
|
||||
"lineInterpolation": "smooth",
|
||||
"fillOpacity": 20,
|
||||
"lineWidth": 2,
|
||||
"pointSize": 5,
|
||||
"showPoints": "auto",
|
||||
"spanNulls": false,
|
||||
"stacking": { "mode": "none", "group": "A" },
|
||||
"axisPlacement": "auto",
|
||||
"gradientMode": "scheme"
|
||||
},
|
||||
"unit": "Bps",
|
||||
"min": 0
|
||||
},
|
||||
"overrides": []
|
||||
},
|
||||
"options": {
|
||||
"tooltip": { "mode": "single", "sort": "none" },
|
||||
"legend": { "displayMode": "list", "placement": "bottom" }
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "barchart",
|
||||
"title": "Auth Success vs Failure",
|
||||
"gridPos": { "h": 8, "w": 4, "x": 16, "y": 1 },
|
||||
"id": 6,
|
||||
"datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" },
|
||||
"targets": [
|
||||
{
|
||||
"expr": "rate(wzp_relay_auth_attempts_total[5m])",
|
||||
"legendFormat": "{{result}}",
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": { "mode": "palette-classic" },
|
||||
"custom": {
|
||||
"stacking": "normal",
|
||||
"fillOpacity": 80,
|
||||
"lineWidth": 1,
|
||||
"gradientMode": "none",
|
||||
"axisCenteredZero": false
|
||||
},
|
||||
"unit": "ops"
|
||||
},
|
||||
"overrides": [
|
||||
{
|
||||
"matcher": { "id": "byName", "options": "ok" },
|
||||
"properties": [
|
||||
{ "id": "color", "value": { "fixedColor": "green", "mode": "fixed" } }
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": { "id": "byName", "options": "fail" },
|
||||
"properties": [
|
||||
{ "id": "color", "value": { "fixedColor": "red", "mode": "fixed" } }
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
"options": {
|
||||
"tooltip": { "mode": "single", "sort": "none" },
|
||||
"legend": { "displayMode": "list", "placement": "bottom" },
|
||||
"orientation": "auto",
|
||||
"barWidth": 0.9,
|
||||
"groupWidth": 0.7,
|
||||
"xTickLabelRotation": 0,
|
||||
"showValue": "auto",
|
||||
"stacking": "normal"
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "histogram",
|
||||
"title": "Handshake Duration",
|
||||
"gridPos": { "h": 8, "w": 4, "x": 20, "y": 1 },
|
||||
"id": 7,
|
||||
"datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" },
|
||||
"targets": [
|
||||
{
|
||||
"expr": "wzp_relay_handshake_duration_seconds_bucket",
|
||||
"legendFormat": "{{le}}",
|
||||
"refId": "A",
|
||||
"format": "heatmap"
|
||||
}
|
||||
],
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": { "mode": "palette-classic" },
|
||||
"custom": {
|
||||
"fillOpacity": 80,
|
||||
"lineWidth": 1,
|
||||
"gradientMode": "scheme"
|
||||
},
|
||||
"unit": "s"
|
||||
},
|
||||
"overrides": []
|
||||
},
|
||||
"options": {
|
||||
"tooltip": { "mode": "single", "sort": "none" },
|
||||
"legend": { "displayMode": "list", "placement": "bottom" },
|
||||
"bucketOffset": 0,
|
||||
"combine": false,
|
||||
"fillOpacity": 80,
|
||||
"gradientMode": "scheme"
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "row",
|
||||
"title": "Call Quality (per-session)",
|
||||
"collapsed": false,
|
||||
"gridPos": { "h": 1, "w": 24, "x": 0, "y": 9 },
|
||||
"id": 10,
|
||||
"panels": []
|
||||
},
|
||||
{
|
||||
"type": "timeseries",
|
||||
"title": "Buffer Depth",
|
||||
"gridPos": { "h": 8, "w": 6, "x": 0, "y": 10 },
|
||||
"id": 11,
|
||||
"datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" },
|
||||
"targets": [
|
||||
{
|
||||
"expr": "wzp_relay_session_jitter_buffer_depth",
|
||||
"legendFormat": "{{session_id}}",
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": { "mode": "palette-classic" },
|
||||
"custom": {
|
||||
"drawStyle": "line",
|
||||
"lineInterpolation": "smooth",
|
||||
"fillOpacity": 10,
|
||||
"lineWidth": 2,
|
||||
"pointSize": 5,
|
||||
"showPoints": "auto",
|
||||
"spanNulls": false,
|
||||
"stacking": { "mode": "none", "group": "A" },
|
||||
"axisPlacement": "auto"
|
||||
},
|
||||
"unit": "none",
|
||||
"min": 0
|
||||
},
|
||||
"overrides": []
|
||||
},
|
||||
"options": {
|
||||
"tooltip": { "mode": "multi", "sort": "desc" },
|
||||
"legend": { "displayMode": "table", "placement": "bottom", "calcs": ["lastNotNull", "mean"] }
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "timeseries",
|
||||
"title": "Loss %",
|
||||
"gridPos": { "h": 8, "w": 6, "x": 6, "y": 10 },
|
||||
"id": 12,
|
||||
"datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" },
|
||||
"targets": [
|
||||
{
|
||||
"expr": "wzp_relay_session_loss_pct",
|
||||
"legendFormat": "{{session_id}}",
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": { "mode": "palette-classic" },
|
||||
"custom": {
|
||||
"drawStyle": "line",
|
||||
"lineInterpolation": "smooth",
|
||||
"fillOpacity": 10,
|
||||
"lineWidth": 2,
|
||||
"pointSize": 5,
|
||||
"showPoints": "auto",
|
||||
"spanNulls": false,
|
||||
"stacking": { "mode": "none", "group": "A" },
|
||||
"axisPlacement": "auto"
|
||||
},
|
||||
"unit": "percent",
|
||||
"min": 0,
|
||||
"max": 100,
|
||||
"thresholds": {
|
||||
"mode": "absolute",
|
||||
"steps": [
|
||||
{ "color": "green", "value": null },
|
||||
{ "color": "yellow", "value": 2 },
|
||||
{ "color": "red", "value": 5 }
|
||||
]
|
||||
}
|
||||
},
|
||||
"overrides": []
|
||||
},
|
||||
"options": {
|
||||
"tooltip": { "mode": "multi", "sort": "desc" },
|
||||
"legend": { "displayMode": "table", "placement": "bottom", "calcs": ["lastNotNull", "mean", "max"] }
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "timeseries",
|
||||
"title": "RTT",
|
||||
"gridPos": { "h": 8, "w": 6, "x": 12, "y": 10 },
|
||||
"id": 13,
|
||||
"datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" },
|
||||
"targets": [
|
||||
{
|
||||
"expr": "wzp_relay_session_rtt_ms",
|
||||
"legendFormat": "{{session_id}}",
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": { "mode": "palette-classic" },
|
||||
"custom": {
|
||||
"drawStyle": "line",
|
||||
"lineInterpolation": "smooth",
|
||||
"fillOpacity": 10,
|
||||
"lineWidth": 2,
|
||||
"pointSize": 5,
|
||||
"showPoints": "auto",
|
||||
"spanNulls": false,
|
||||
"stacking": { "mode": "none", "group": "A" },
|
||||
"axisPlacement": "auto"
|
||||
},
|
||||
"unit": "ms",
|
||||
"min": 0,
|
||||
"thresholds": {
|
||||
"mode": "absolute",
|
||||
"steps": [
|
||||
{ "color": "green", "value": null },
|
||||
{ "color": "yellow", "value": 100 },
|
||||
{ "color": "red", "value": 300 }
|
||||
]
|
||||
}
|
||||
},
|
||||
"overrides": []
|
||||
},
|
||||
"options": {
|
||||
"tooltip": { "mode": "multi", "sort": "desc" },
|
||||
"legend": { "displayMode": "table", "placement": "bottom", "calcs": ["lastNotNull", "mean", "max"] }
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "timeseries",
|
||||
"title": "Underruns",
|
||||
"gridPos": { "h": 8, "w": 6, "x": 18, "y": 10 },
|
||||
"id": 14,
|
||||
"datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" },
|
||||
"targets": [
|
||||
{
|
||||
"expr": "rate(wzp_relay_session_underruns_total[1m])",
|
||||
"legendFormat": "{{session_id}}",
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": { "mode": "palette-classic" },
|
||||
"custom": {
|
||||
"drawStyle": "line",
|
||||
"lineInterpolation": "smooth",
|
||||
"fillOpacity": 10,
|
||||
"lineWidth": 2,
|
||||
"pointSize": 5,
|
||||
"showPoints": "auto",
|
||||
"spanNulls": false,
|
||||
"stacking": { "mode": "none", "group": "A" },
|
||||
"axisPlacement": "auto"
|
||||
},
|
||||
"unit": "ops",
|
||||
"min": 0
|
||||
},
|
||||
"overrides": []
|
||||
},
|
||||
"options": {
|
||||
"tooltip": { "mode": "multi", "sort": "desc" },
|
||||
"legend": { "displayMode": "table", "placement": "bottom", "calcs": ["lastNotNull", "mean"] }
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "row",
|
||||
"title": "Inter-Relay Mesh",
|
||||
"collapsed": false,
|
||||
"gridPos": { "h": 1, "w": 24, "x": 0, "y": 18 },
|
||||
"id": 20,
|
||||
"panels": []
|
||||
},
|
||||
{
|
||||
"type": "table",
|
||||
"title": "RTT Heatmap",
|
||||
"gridPos": { "h": 8, "w": 6, "x": 0, "y": 19 },
|
||||
"id": 21,
|
||||
"datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" },
|
||||
"targets": [
|
||||
{
|
||||
"expr": "wzp_probe_rtt_ms",
|
||||
"legendFormat": "{{target}}",
|
||||
"refId": "A",
|
||||
"instant": true,
|
||||
"format": "table"
|
||||
}
|
||||
],
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": { "mode": "thresholds" },
|
||||
"thresholds": {
|
||||
"mode": "absolute",
|
||||
"steps": [
|
||||
{ "color": "green", "value": null },
|
||||
{ "color": "yellow", "value": 50 },
|
||||
{ "color": "orange", "value": 100 },
|
||||
{ "color": "red", "value": 200 }
|
||||
]
|
||||
},
|
||||
"unit": "ms",
|
||||
"custom": {
|
||||
"displayMode": "color-background",
|
||||
"align": "auto",
|
||||
"inspect": false
|
||||
}
|
||||
},
|
||||
"overrides": []
|
||||
},
|
||||
"options": {
|
||||
"showHeader": true,
|
||||
"sortBy": [{ "displayName": "Value", "desc": true }],
|
||||
"cellHeight": "sm",
|
||||
"footer": { "show": false }
|
||||
},
|
||||
"transformations": [
|
||||
{
|
||||
"id": "organize",
|
||||
"options": {
|
||||
"excludeByName": { "Time": true, "__name__": true, "instance": true, "job": true },
|
||||
"renameByName": { "target": "Target", "Value": "RTT (ms)" }
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"type": "timeseries",
|
||||
"title": "Loss",
|
||||
"gridPos": { "h": 8, "w": 6, "x": 6, "y": 19 },
|
||||
"id": 22,
|
||||
"datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" },
|
||||
"targets": [
|
||||
{
|
||||
"expr": "wzp_probe_loss_pct",
|
||||
"legendFormat": "{{target}}",
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": { "mode": "palette-classic" },
|
||||
"custom": {
|
||||
"drawStyle": "line",
|
||||
"lineInterpolation": "smooth",
|
||||
"fillOpacity": 10,
|
||||
"lineWidth": 2,
|
||||
"pointSize": 5,
|
||||
"showPoints": "auto",
|
||||
"spanNulls": false,
|
||||
"stacking": { "mode": "none", "group": "A" },
|
||||
"axisPlacement": "auto"
|
||||
},
|
||||
"unit": "percent",
|
||||
"min": 0,
|
||||
"max": 100,
|
||||
"thresholds": {
|
||||
"mode": "absolute",
|
||||
"steps": [
|
||||
{ "color": "green", "value": null },
|
||||
{ "color": "yellow", "value": 1 },
|
||||
{ "color": "red", "value": 5 }
|
||||
]
|
||||
}
|
||||
},
|
||||
"overrides": []
|
||||
},
|
||||
"options": {
|
||||
"tooltip": { "mode": "multi", "sort": "desc" },
|
||||
"legend": { "displayMode": "table", "placement": "bottom", "calcs": ["lastNotNull", "mean", "max"] }
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "timeseries",
|
||||
"title": "Jitter",
|
||||
"gridPos": { "h": 8, "w": 6, "x": 12, "y": 19 },
|
||||
"id": 23,
|
||||
"datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" },
|
||||
"targets": [
|
||||
{
|
||||
"expr": "wzp_probe_jitter_ms",
|
||||
"legendFormat": "{{target}}",
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": { "mode": "palette-classic" },
|
||||
"custom": {
|
||||
"drawStyle": "line",
|
||||
"lineInterpolation": "smooth",
|
||||
"fillOpacity": 10,
|
||||
"lineWidth": 2,
|
||||
"pointSize": 5,
|
||||
"showPoints": "auto",
|
||||
"spanNulls": false,
|
||||
"stacking": { "mode": "none", "group": "A" },
|
||||
"axisPlacement": "auto"
|
||||
},
|
||||
"unit": "ms",
|
||||
"min": 0,
|
||||
"thresholds": {
|
||||
"mode": "absolute",
|
||||
"steps": [
|
||||
{ "color": "green", "value": null },
|
||||
{ "color": "yellow", "value": 10 },
|
||||
{ "color": "red", "value": 30 }
|
||||
]
|
||||
}
|
||||
},
|
||||
"overrides": []
|
||||
},
|
||||
"options": {
|
||||
"tooltip": { "mode": "multi", "sort": "desc" },
|
||||
"legend": { "displayMode": "table", "placement": "bottom", "calcs": ["lastNotNull", "mean", "max"] }
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "stat",
|
||||
"title": "Probe Status",
|
||||
"gridPos": { "h": 8, "w": 6, "x": 18, "y": 19 },
|
||||
"id": 24,
|
||||
"datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" },
|
||||
"targets": [
|
||||
{
|
||||
"expr": "wzp_probe_up",
|
||||
"legendFormat": "{{target}}",
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": { "mode": "thresholds" },
|
||||
"thresholds": {
|
||||
"mode": "absolute",
|
||||
"steps": [
|
||||
{ "color": "red", "value": null },
|
||||
{ "color": "green", "value": 1 }
|
||||
]
|
||||
},
|
||||
"mappings": [
|
||||
{ "type": "value", "options": { "0": { "text": "DOWN", "color": "red" }, "1": { "text": "UP", "color": "green" } } }
|
||||
],
|
||||
"unit": "none",
|
||||
"min": 0,
|
||||
"max": 1
|
||||
},
|
||||
"overrides": []
|
||||
},
|
||||
"options": {
|
||||
"reduceOptions": {
|
||||
"calcs": ["lastNotNull"],
|
||||
"fields": "",
|
||||
"values": false
|
||||
},
|
||||
"textMode": "auto",
|
||||
"colorMode": "background",
|
||||
"graphMode": "none",
|
||||
"justifyMode": "auto",
|
||||
"orientation": "auto"
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "row",
|
||||
"title": "Web Bridge",
|
||||
"collapsed": false,
|
||||
"gridPos": { "h": 1, "w": 24, "x": 0, "y": 27 },
|
||||
"id": 30,
|
||||
"panels": []
|
||||
},
|
||||
{
|
||||
"type": "gauge",
|
||||
"title": "Active Connections",
|
||||
"gridPos": { "h": 8, "w": 6, "x": 0, "y": 28 },
|
||||
"id": 31,
|
||||
"datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" },
|
||||
"targets": [
|
||||
{
|
||||
"expr": "wzp_web_active_connections",
|
||||
"legendFormat": "connections",
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": { "mode": "thresholds" },
|
||||
"thresholds": {
|
||||
"mode": "absolute",
|
||||
"steps": [
|
||||
{ "color": "green", "value": null },
|
||||
{ "color": "yellow", "value": 50 },
|
||||
{ "color": "red", "value": 100 }
|
||||
]
|
||||
},
|
||||
"unit": "none",
|
||||
"min": 0
|
||||
},
|
||||
"overrides": []
|
||||
},
|
||||
"options": {
|
||||
"reduceOptions": {
|
||||
"calcs": ["lastNotNull"],
|
||||
"fields": "",
|
||||
"values": false
|
||||
},
|
||||
"showThresholdLabels": false,
|
||||
"showThresholdMarkers": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "timeseries",
|
||||
"title": "Frames Bridged",
|
||||
"gridPos": { "h": 8, "w": 6, "x": 6, "y": 28 },
|
||||
"id": 32,
|
||||
"datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" },
|
||||
"targets": [
|
||||
{
|
||||
"expr": "rate(wzp_web_frames_bridged_total[1m])",
|
||||
"legendFormat": "{{direction}}",
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": { "mode": "palette-classic" },
|
||||
"custom": {
|
||||
"drawStyle": "line",
|
||||
"lineInterpolation": "smooth",
|
||||
"fillOpacity": 20,
|
||||
"lineWidth": 2,
|
||||
"pointSize": 5,
|
||||
"showPoints": "auto",
|
||||
"spanNulls": false,
|
||||
"stacking": { "mode": "none", "group": "A" },
|
||||
"axisPlacement": "auto",
|
||||
"gradientMode": "scheme"
|
||||
},
|
||||
"unit": "ops",
|
||||
"min": 0
|
||||
},
|
||||
"overrides": [
|
||||
{
|
||||
"matcher": { "id": "byName", "options": "up" },
|
||||
"properties": [
|
||||
{ "id": "color", "value": { "fixedColor": "blue", "mode": "fixed" } }
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": { "id": "byName", "options": "down" },
|
||||
"properties": [
|
||||
{ "id": "color", "value": { "fixedColor": "purple", "mode": "fixed" } }
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
"options": {
|
||||
"tooltip": { "mode": "multi", "sort": "desc" },
|
||||
"legend": { "displayMode": "list", "placement": "bottom" }
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "timeseries",
|
||||
"title": "Auth Failures",
|
||||
"gridPos": { "h": 8, "w": 6, "x": 12, "y": 28 },
|
||||
"id": 33,
|
||||
"datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" },
|
||||
"targets": [
|
||||
{
|
||||
"expr": "rate(wzp_web_auth_failures_total[5m])",
|
||||
"legendFormat": "auth failures/s",
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": { "mode": "fixed", "fixedColor": "red" },
|
||||
"custom": {
|
||||
"drawStyle": "line",
|
||||
"lineInterpolation": "smooth",
|
||||
"fillOpacity": 20,
|
||||
"lineWidth": 2,
|
||||
"pointSize": 5,
|
||||
"showPoints": "auto",
|
||||
"spanNulls": false,
|
||||
"stacking": { "mode": "none", "group": "A" },
|
||||
"axisPlacement": "auto"
|
||||
},
|
||||
"unit": "ops",
|
||||
"min": 0
|
||||
},
|
||||
"overrides": []
|
||||
},
|
||||
"options": {
|
||||
"tooltip": { "mode": "single", "sort": "none" },
|
||||
"legend": { "displayMode": "list", "placement": "bottom" }
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "histogram",
|
||||
"title": "Handshake Latency",
|
||||
"gridPos": { "h": 8, "w": 6, "x": 18, "y": 28 },
|
||||
"id": 34,
|
||||
"datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" },
|
||||
"targets": [
|
||||
{
|
||||
"expr": "wzp_web_handshake_latency_seconds_bucket",
|
||||
"legendFormat": "{{le}}",
|
||||
"refId": "A",
|
||||
"format": "heatmap"
|
||||
}
|
||||
],
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": { "mode": "palette-classic" },
|
||||
"custom": {
|
||||
"fillOpacity": 80,
|
||||
"lineWidth": 1,
|
||||
"gradientMode": "scheme"
|
||||
},
|
||||
"unit": "s"
|
||||
},
|
||||
"overrides": []
|
||||
},
|
||||
"options": {
|
||||
"tooltip": { "mode": "single", "sort": "none" },
|
||||
"legend": { "displayMode": "list", "placement": "bottom" },
|
||||
"bucketOffset": 0,
|
||||
"combine": false,
|
||||
"fillOpacity": 80,
|
||||
"gradientMode": "scheme"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
143
notes
Normal file
143
notes
Normal file
@@ -0,0 +1,143 @@
|
||||
|
||||
|
||||
|
||||
1. Add trunking (biggest win): Multiplex multiple sessions into a single QUIC datagram batch. A TrunkFrame could pack N mini-packets (session_id:2 + payload) into one datagram, sharing the QUIC overhead. This is your multiplexing idea
|
||||
from the telemetry discussion — the probe test lines are already a step toward this.
|
||||
2. Mini-frame format: For consecutive packets from the same session, use a 4-byte mini-header (just timestamp delta + payload length) instead of the full 12-byte MediaHeader. IAX2 does this and it cuts header overhead by 67%.
|
||||
3. Comfort noise / silence suppression: IAX2 supports CN frames — don't send packets during silence, saving ~50% bandwidth in typical conversations. WZP always sends frames even during silence.
|
||||
4. Jitter buffer from IAX2's design: IAX2 uses adaptive playout delay based on observed jitter, not fixed target depth. This is exactly what T1-S4 should implement — your sweep tool (S3) can guide the parameters.
|
||||
5. Call control completeness: IAX2 has HOLD, TRANSFER, QUELCH (mute from server), UNQUELCH. WZP's SignalMessage only has Offer/Answer/Hangup/Ringing. Adding these would help featherChat integration.
|
||||
|
||||
Priority Recommendations
|
||||
|
||||
┌──────────────────────┬────────────────────────┬───────────────────────────────────────────────────┐
|
||||
│ From IAX2 │ Maps to │ Impact │
|
||||
├──────────────────────┼────────────────────────┼───────────────────────────────────────────────────┤
|
||||
│ Trunking │ New P2 task │ High — reduces per-packet overhead at relay scale │
|
||||
├──────────────────────┼────────────────────────┼───────────────────────────────────────────────────┤
|
||||
│ Mini-frames │ Wire format change │ Medium — 67% header reduction │
|
||||
├──────────────────────┼────────────────────────┼───────────────────────────────────────────────────┤
|
||||
│ Silence suppression │ Codec layer │ Medium — 50% BW savings in typical calls │
|
||||
├──────────────────────┼────────────────────────┼───────────────────────────────────────────────────┤
|
||||
│ Adaptive playout │ P2-T1-S4 │ Medium — fixes the jitter/drift issue │
|
||||
├──────────────────────┼────────────────────────┼───────────────────────────────────────────────────┤
|
||||
│ Call control signals │ SignalMessage variants │ Low — UX feature for featherChat │
|
||||
└──────────────────────┴────────────────────────┴───────────────────────────────────────────────────┘
|
||||
|
||||
The trunking gap is the most significant architectural difference. Everything else WZP is either on par or ahead (crypto, FEC, identity).
|
||||
|
||||
|
||||
/btwanswer my question , imagine we are connected to Mesh one, our id is 1 , (some eth wallet address) , and i'm trying to call 2 (some other eth address) which is not connected to our relay, like i'm connected to relay 1 , relay one is
|
||||
connected to relay 3, and relay 3 to relay 2, how do we achieve a connection ?
|
||||
|
||||
|
||||
Great question — this is relay-to-relay routing, and we don't have it yet. Here's what exists vs what's needed:
|
||||
|
||||
What We Have
|
||||
|
||||
- Client → Relay (direct QUIC)
|
||||
- Relay → Relay probing (health metrics only, no media forwarding)
|
||||
- Room-based forwarding within a single relay (SFU)
|
||||
- Forward mode (relay → one remote relay, hardcoded pair)
|
||||
|
||||
What's Missing: Multi-Hop Call Routing
|
||||
|
||||
You (wallet 0x1) Peer (wallet 0x2)
|
||||
│ │
|
||||
▼ ▼
|
||||
Relay 1 ──── Relay 3 ──── Relay 2
|
||||
|
||||
This requires:
|
||||
|
||||
1. Relay Discovery / Routing Table
|
||||
|
||||
Each relay needs to know which other relays exist and which users are connected where. Options:
|
||||
- Gossip protocol: relays share their connected fingerprints with neighbors
|
||||
- Central registry: featherChat server knows "user 0x2 is on Relay 2"
|
||||
- DHT: fingerprint → relay mapping distributed across the mesh (like Kademlia)
|
||||
|
||||
2. Signaling Path Resolution
|
||||
|
||||
When you call 0x2:
|
||||
1. Your client sends CallOffer through featherChat WS (already works)
|
||||
2. featherChat knows 0x2's relay (from FC-5 group-to-room mapping or a presence API)
|
||||
3. CallAnswer comes back with the relay chain: [relay1, relay3, relay2]
|
||||
|
||||
3. Media Path Setup
|
||||
|
||||
Two approaches:
|
||||
|
||||
A. Relay chaining (simpler):
|
||||
You → Relay1 → Relay3 → Relay2 → Peer
|
||||
Each relay forwards packets to the next hop. We already have forward mode (--remote) for a single pair — extend it to be per-session with a routing table. Latency adds up but it works.
|
||||
|
||||
B. QUIC relay-assisted NAT traversal (better):
|
||||
You → Relay1 → Relay3 → Relay2 → Peer (signaling)
|
||||
You ←──────── direct QUIC ──────────→ Peer (media, if NAT allows)
|
||||
Use relays for signaling + ICE candidate exchange, then attempt a direct connection. Fall back to relay chain if direct fails.
|
||||
|
||||
4. Suggested Task Breakdown
|
||||
|
||||
┌───────────┬──────────────────────────┬─────────────────────────────────────────────────────────────────────────┐
|
||||
│ ID │ Task │ Description │
|
||||
├───────────┼──────────────────────────┼─────────────────────────────────────────────────────────────────────────┤
|
||||
│ WZP-P3-T4 │ Relay peer registry │ Each relay announces connected fingerprints to neighbors │
|
||||
├───────────┼──────────────────────────┼─────────────────────────────────────────────────────────────────────────┤
|
||||
│ WZP-P3-T5 │ Route resolution │ Given a target fingerprint, find the relay chain │
|
||||
├───────────┼──────────────────────────┼─────────────────────────────────────────────────────────────────────────┤
|
||||
│ WZP-P3-T6 │ Per-session forwarding │ Relay forwards media for a specific session to next-hop relay │
|
||||
├───────────┼──────────────────────────┼─────────────────────────────────────────────────────────────────────────┤
|
||||
│ WZP-P3-T7 │ Call setup over mesh │ Client → FC signaling → resolve route → setup relay chain → media flows │
|
||||
├───────────┼──────────────────────────┼─────────────────────────────────────────────────────────────────────────┤
|
||||
│ WZP-P3-T8 │ Direct path optimization │ After relay chain works, attempt ICE/STUN direct connection │
|
||||
└───────────┴──────────────────────────┴─────────────────────────────────────────────────────────────────────────┘
|
||||
|
||||
5. The Practical Shortcut
|
||||
|
||||
Since featherChat already has presence (FC-6) and group membership (FC-5), the easiest first pass:
|
||||
|
||||
1. featherChat server knows which relay each user registered with (add relay_addr to the auth/connect flow)
|
||||
2. CallOffer payload already carries relay_addr — the callee's client reads it and connects directly to Relay 1
|
||||
3. Both clients connect to the same relay (caller's), joining the same hashed room
|
||||
|
||||
This avoids multi-hop entirely — the callee just connects to the caller's relay. Only fails if the callee can't reach that relay (firewall, geo), which is when you need the full mesh routing.
|
||||
|
||||
Want me to add these as P3 tasks and start with the shortcut approach?
|
||||
|
||||
|
||||
|
||||
│ Silence suppression VAD/DTX in Opus vs our Yes (comfort noise) vs iax (native)
|
||||
2. Oalgorithms: Signal pioneered adaptive jitter buffers, dynamic bitrate switching, and bandwidth estimation tuned across millions of calls. WZP's QualityAdapter is a first pass — their algorithms use ML-based predictors.
|
||||
|
||||
4. Echo cancellation / noise suppression: Signal uses RNNoise (ML-based), WhatsApp has proprietary DSP. WZP doesn't have any — it relies on the platform's built-in AEC.
|
||||
3. Group calls: WebRTC SFUs (used by Signal, WhatsApp) handle 32+ participants with simulcast (multiple quality layers). WZP's room mode is a basic SFU without simulcast or quality layering.
|
||||
|
||||
|
||||
Yes, partially. Here's what you can realistically use:
|
||||
|
||||
RNNoise (echo/noise suppression) — This is the easiest win. RNNoise is BSD-licensed C code, and there's a Rust crate (nnnoiseless — a pure Rust port). You could add it as a pre-processing step in CallEncoder before Opus encoding. No
|
||||
legal issues, no dependency on Signal.
|
||||
|
||||
Jitter buffer algorithms — Signal's adaptive jitter buffer code is in their open-source ringrtc repo (GPLv3). You can study the algorithm (NetEq-style adaptive playout) but can't copy code directly into WZP (MIT) due to GPL. However,
|
||||
the concepts (ML-based delay prediction, talkspurt detection) are well-documented in academic papers and WebRTC's NetEq source (BSD-licensed). You could:
|
||||
- Port ideas from WebRTC's NetEq (BSD) — this is what Signal built on anyway
|
||||
- Use your T5 telemetry + sweep tool to train a simple predictor specific to WZP
|
||||
|
||||
Bandwidth estimation — WebRTC's GCC (Google Congestion Control) is BSD-licensed and well-documented. You could implement a simplified version using your existing QualityReport data. Signal's version is derivative of this.
|
||||
|
||||
Simulcast / group quality layers — The concept is straightforward (encode at 2-3 quality levels, relay picks per-receiver). You already have QualityProfile with GOOD/DEGRADED/CATASTROPHIC — extend the encoder to produce multiple
|
||||
streams and the relay to select per-participant. No need to copy Signal code for this.
|
||||
|
||||
What you CAN'T directly use:
|
||||
- Signal's proprietary ML models (not published)
|
||||
- WhatsApp's DSP (closed source)
|
||||
- GPL code in MIT project without relicensing
|
||||
|
||||
Practical priority:
|
||||
1. nnnoiseless crate for noise suppression (drop-in, huge quality win)
|
||||
2. WebRTC NetEq-style adaptive playout for T1-S4 (BSD, replaces your fixed jitter buffer)
|
||||
3. GCC-style bandwidth estimation for P3-T3
|
||||
4. Simulcast for group calls (design from scratch, concept is simple)
|
||||
|
||||
The biggest gap is noise suppression — adding nnnoiseless would be maybe 2-3 hours of work and immediately noticeable.
|
||||
|
||||
243
scripts/build-linux.sh
Executable file
243
scripts/build-linux.sh
Executable file
@@ -0,0 +1,243 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
# Build WarzonePhone Linux x86_64 release binaries using a Hetzner Cloud VPS.
|
||||
# Prerequisites: hcloud CLI authenticated, SSH key "wz" registered.
|
||||
#
|
||||
# Usage:
|
||||
# ./scripts/build-linux.sh --prepare Create VM, install deps, upload source
|
||||
# ./scripts/build-linux.sh --build Build release binaries on the VM
|
||||
# ./scripts/build-linux.sh --transfer Download binaries from VM to local
|
||||
# ./scripts/build-linux.sh --destroy Delete the VM
|
||||
# ./scripts/build-linux.sh --all Run prepare + build + transfer (no destroy)
|
||||
#
|
||||
# The VM persists between steps so you can iterate on build errors.
|
||||
|
||||
SSH_KEY_NAME="wz"
|
||||
SSH_KEY_PATH="/Users/manwe/CascadeProjects/wzp"
|
||||
SERVER_TYPE="cx33"
|
||||
IMAGE="debian-12"
|
||||
REMOTE_USER="root"
|
||||
OUTPUT_DIR="target/linux-x86_64"
|
||||
PROJECT_DIR="/Users/manwe/CascadeProjects/warzonePhone"
|
||||
|
||||
SSH_OPTS="-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o ConnectTimeout=10"
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Helpers
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
get_vm_ip() {
|
||||
local ip
|
||||
ip=$(hcloud server list -o columns=ipv4 -o noheader 2>/dev/null | tail -1 | tr -d ' ')
|
||||
if [ -z "$ip" ]; then
|
||||
echo "ERROR: No Hetzner VM found. Run --prepare first." >&2
|
||||
exit 1
|
||||
fi
|
||||
echo "$ip"
|
||||
}
|
||||
|
||||
ssh_cmd() {
|
||||
local ip
|
||||
ip=$(get_vm_ip)
|
||||
ssh $SSH_OPTS -i "$SSH_KEY_PATH" "$REMOTE_USER@$ip" "$@"
|
||||
}
|
||||
|
||||
scp_cmd() {
|
||||
local ip
|
||||
ip=$(get_vm_ip)
|
||||
scp $SSH_OPTS -i "$SSH_KEY_PATH" "$@"
|
||||
}
|
||||
|
||||
get_vm_name() {
|
||||
hcloud server list -o columns=name -o noheader 2>/dev/null | tail -1 | tr -d ' '
|
||||
}
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# --prepare: Create VM, install deps, upload source
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
do_prepare() {
|
||||
local server_name="wzp-builder"
|
||||
|
||||
# Check if VM already exists
|
||||
local existing
|
||||
existing=$(hcloud server list -o columns=name -o noheader 2>/dev/null | grep wzp-builder || true)
|
||||
if [ -n "$existing" ]; then
|
||||
echo "VM already exists: $existing"
|
||||
echo "Reusing it. Uploading fresh source..."
|
||||
do_upload
|
||||
return
|
||||
fi
|
||||
|
||||
echo "[1/5] Creating Hetzner VM..."
|
||||
hcloud server create \
|
||||
--name "$server_name" \
|
||||
--type "$SERVER_TYPE" \
|
||||
--image "$IMAGE" \
|
||||
--ssh-key "$SSH_KEY_NAME" \
|
||||
--location fsn1 \
|
||||
--quiet
|
||||
|
||||
local ip
|
||||
ip=$(get_vm_ip)
|
||||
echo " VM: $server_name @ $ip"
|
||||
|
||||
# Wait for SSH
|
||||
echo "[2/5] Waiting for SSH..."
|
||||
for i in $(seq 1 30); do
|
||||
if ssh $SSH_OPTS -i "$SSH_KEY_PATH" "$REMOTE_USER@$ip" "echo ok" &>/dev/null; then
|
||||
break
|
||||
fi
|
||||
sleep 2
|
||||
done
|
||||
|
||||
# Install build dependencies
|
||||
echo "[3/5] Installing build dependencies..."
|
||||
ssh_cmd "apt-get update -qq && apt-get install -y -qq build-essential cmake pkg-config libasound2-dev libssl-dev curl git libstdc++-12-dev > /dev/null 2>&1"
|
||||
|
||||
# Install Rust
|
||||
echo "[4/5] Installing Rust..."
|
||||
ssh_cmd "curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --default-toolchain stable > /dev/null 2>&1"
|
||||
|
||||
# Upload source
|
||||
echo "[5/5] Uploading source code..."
|
||||
do_upload
|
||||
|
||||
echo ""
|
||||
echo "=== VM Ready ==="
|
||||
echo "IP: $ip"
|
||||
echo "SSH: ssh -i $SSH_KEY_PATH root@$ip"
|
||||
echo ""
|
||||
echo "Next: ./scripts/build-linux.sh --build"
|
||||
}
|
||||
|
||||
do_upload() {
|
||||
echo " Creating source tarball..."
|
||||
tar czf /tmp/wzp-src.tar.gz \
|
||||
--exclude='target' \
|
||||
--exclude='.git' \
|
||||
--exclude='.claude' \
|
||||
--exclude='notes' \
|
||||
-C "$PROJECT_DIR" . 2>/dev/null
|
||||
|
||||
local ip
|
||||
ip=$(get_vm_ip)
|
||||
echo " Uploading to VM..."
|
||||
scp $SSH_OPTS -i "$SSH_KEY_PATH" /tmp/wzp-src.tar.gz "$REMOTE_USER@$ip:/root/wzp-src.tar.gz" 2>/dev/null
|
||||
ssh_cmd "rm -rf /root/warzonePhone && mkdir -p /root/warzonePhone && tar xzf /root/wzp-src.tar.gz -C /root/warzonePhone" 2>/dev/null
|
||||
rm -f /tmp/wzp-src.tar.gz
|
||||
echo " Source uploaded."
|
||||
}
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# --build: Build release binaries on the VM
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
do_build() {
|
||||
local ip
|
||||
ip=$(get_vm_ip)
|
||||
echo "=== Building on $ip ==="
|
||||
|
||||
echo "[1/3] Building relay + client + web..."
|
||||
ssh_cmd "source ~/.cargo/env && cd /root/warzonePhone && cargo build --release --bin wzp-relay --bin wzp-client --bin wzp-bench --bin wzp-web 2>&1"
|
||||
|
||||
echo ""
|
||||
echo "[2/3] Building audio-enabled client..."
|
||||
ssh_cmd "source ~/.cargo/env && cd /root/warzonePhone && cargo build --release --bin wzp-client --features audio 2>&1" | tail -5
|
||||
ssh_cmd "cp /root/warzonePhone/target/release/wzp-client /root/warzonePhone/target/release/wzp-client-audio"
|
||||
ssh_cmd "source ~/.cargo/env && cd /root/warzonePhone && cargo build --release --bin wzp-client 2>&1" | tail -3
|
||||
|
||||
echo ""
|
||||
echo "[3/3] Verifying binaries..."
|
||||
ssh_cmd "ls -lh /root/warzonePhone/target/release/wzp-relay /root/warzonePhone/target/release/wzp-client /root/warzonePhone/target/release/wzp-web /root/warzonePhone/target/release/wzp-bench /root/warzonePhone/target/release/wzp-client-audio"
|
||||
|
||||
echo ""
|
||||
echo "=== Build Complete ==="
|
||||
echo "Next: ./scripts/build-linux.sh --transfer"
|
||||
}
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# --transfer: Download binaries from VM to local
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
do_transfer() {
|
||||
local ip
|
||||
ip=$(get_vm_ip)
|
||||
echo "=== Downloading binaries from $ip ==="
|
||||
|
||||
mkdir -p "$OUTPUT_DIR/static"
|
||||
|
||||
for bin in wzp-relay wzp-client wzp-client-audio wzp-bench wzp-web; do
|
||||
echo " $bin..."
|
||||
scp $SSH_OPTS -i "$SSH_KEY_PATH" "$REMOTE_USER@$ip:/root/warzonePhone/target/release/$bin" "$OUTPUT_DIR/$bin" 2>/dev/null
|
||||
done
|
||||
|
||||
# Static files for web bridge
|
||||
scp $SSH_OPTS -i "$SSH_KEY_PATH" "$REMOTE_USER@$ip:/root/warzonePhone/crates/wzp-web/static/index.html" "$OUTPUT_DIR/static/index.html" 2>/dev/null
|
||||
scp $SSH_OPTS -i "$SSH_KEY_PATH" "$REMOTE_USER@$ip:/root/warzonePhone/crates/wzp-web/static/audio-processor.js" "$OUTPUT_DIR/static/audio-processor.js" 2>/dev/null
|
||||
|
||||
echo ""
|
||||
echo "=== Transfer Complete ==="
|
||||
ls -lh "$OUTPUT_DIR"/wzp-*
|
||||
echo ""
|
||||
echo "Deploy with:"
|
||||
echo " scp $OUTPUT_DIR/wzp-relay $OUTPUT_DIR/wzp-client user@server:~/wzp/"
|
||||
}
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# --destroy: Delete the VM
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
do_destroy() {
|
||||
local name
|
||||
name=$(get_vm_name)
|
||||
if [ -z "$name" ]; then
|
||||
echo "No VM to destroy."
|
||||
return
|
||||
fi
|
||||
echo "Deleting VM: $name"
|
||||
hcloud server delete "$name"
|
||||
echo "Done."
|
||||
}
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Main
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
case "${1:-}" in
|
||||
--prepare)
|
||||
do_prepare
|
||||
;;
|
||||
--build)
|
||||
do_build
|
||||
;;
|
||||
--transfer)
|
||||
do_transfer
|
||||
;;
|
||||
--destroy)
|
||||
do_destroy
|
||||
;;
|
||||
--all)
|
||||
do_prepare
|
||||
do_build
|
||||
do_transfer
|
||||
echo ""
|
||||
echo "VM is still running. Destroy with: ./scripts/build-linux.sh --destroy"
|
||||
;;
|
||||
--upload)
|
||||
do_upload
|
||||
;;
|
||||
*)
|
||||
echo "Usage: $0 {--prepare|--build|--transfer|--destroy|--all|--upload}"
|
||||
echo ""
|
||||
echo "Steps:"
|
||||
echo " --prepare Create VM, install deps, upload source"
|
||||
echo " --build Build release binaries (shows full output)"
|
||||
echo " --transfer Download binaries to target/linux-x86_64/"
|
||||
echo " --destroy Delete the VM"
|
||||
echo " --all prepare + build + transfer (VM persists)"
|
||||
echo " --upload Re-upload source to existing VM"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
11
scripts/cleanup-builder.sh
Executable file
11
scripts/cleanup-builder.sh
Executable file
@@ -0,0 +1,11 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
# Clean up any wzp-builder servers left running
|
||||
echo "Looking for wzp-builder servers..."
|
||||
hcloud server list -o noheader | grep wzp-builder | while read -r line; do
|
||||
id=$(echo "$line" | awk '{print $1}')
|
||||
name=$(echo "$line" | awk '{print $2}')
|
||||
echo " Deleting $name (id=$id)..."
|
||||
hcloud server delete "$id"
|
||||
done
|
||||
echo "Done."
|
||||
Reference in New Issue
Block a user