Compare commits
30 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
3afbfb42cf | ||
|
|
bba9b0512c | ||
|
|
205030ce33 | ||
|
|
b3c12b7f8b | ||
|
|
a655d3bbe8 | ||
|
|
82ea10f2d5 | ||
|
|
e6cecc7bd8 | ||
|
|
da76c76c93 | ||
|
|
27c69d8982 | ||
|
|
2cb8519c95 | ||
|
|
9ca124cb76 | ||
|
|
c06a4d0c9a | ||
|
|
817535a0ad | ||
|
|
ba02ed36b5 | ||
|
|
4cdcc4e6c4 | ||
|
|
7dd4820d2c | ||
|
|
2087e5a75f | ||
|
|
9e3cd6d6d4 | ||
|
|
4403eae4b9 | ||
|
|
c08bcffaff | ||
|
|
d61fdb1b94 | ||
|
|
89391e1781 | ||
|
|
d2fdc9c6ae | ||
|
|
8c853c3605 | ||
|
|
fe28c04c19 | ||
|
|
66be99bef0 | ||
|
|
94b122ac25 | ||
|
|
a07158ed22 | ||
|
|
1cd552d2dc | ||
|
|
3af40cb275 |
@@ -14,7 +14,7 @@ jobs:
|
|||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
apt-get update && apt-get install -y --no-install-recommends \
|
apt-get update && apt-get install -y --no-install-recommends \
|
||||||
git curl jq ca-certificates zip \
|
git curl jq ca-certificates zip unzip \
|
||||||
musl-tools \
|
musl-tools \
|
||||||
gcc-aarch64-linux-gnu \
|
gcc-aarch64-linux-gnu \
|
||||||
gcc-arm-linux-gnueabihf \
|
gcc-arm-linux-gnueabihf \
|
||||||
@@ -23,7 +23,14 @@ jobs:
|
|||||||
x86_64-unknown-linux-musl \
|
x86_64-unknown-linux-musl \
|
||||||
aarch64-unknown-linux-musl \
|
aarch64-unknown-linux-musl \
|
||||||
armv7-unknown-linux-musleabihf \
|
armv7-unknown-linux-musleabihf \
|
||||||
x86_64-pc-windows-gnu
|
x86_64-pc-windows-gnu \
|
||||||
|
aarch64-linux-android \
|
||||||
|
armv7-linux-androideabi
|
||||||
|
# Install Android NDK for cross-compilation
|
||||||
|
NDK_VER=r27c
|
||||||
|
curl -sL https://dl.google.com/android/repository/android-ndk-${NDK_VER}-linux.zip -o /tmp/ndk.zip
|
||||||
|
unzip -q /tmp/ndk.zip -d /opt && rm /tmp/ndk.zip
|
||||||
|
export ANDROID_NDK_HOME=/opt/android-ndk-${NDK_VER}
|
||||||
|
|
||||||
- name: Ensure code is present
|
- name: Ensure code is present
|
||||||
run: |
|
run: |
|
||||||
@@ -47,6 +54,12 @@ jobs:
|
|||||||
|
|
||||||
[target.x86_64-pc-windows-gnu]
|
[target.x86_64-pc-windows-gnu]
|
||||||
linker = "x86_64-w64-mingw32-gcc"
|
linker = "x86_64-w64-mingw32-gcc"
|
||||||
|
|
||||||
|
[target.aarch64-linux-android]
|
||||||
|
linker = "/opt/android-ndk-r27c/toolchains/llvm/prebuilt/linux-x86_64/bin/aarch64-linux-android35-clang"
|
||||||
|
|
||||||
|
[target.armv7-linux-androideabi]
|
||||||
|
linker = "/opt/android-ndk-r27c/toolchains/llvm/prebuilt/linux-x86_64/bin/armv7a-linux-androideabi35-clang"
|
||||||
TOML
|
TOML
|
||||||
|
|
||||||
- name: Build Linux x86_64
|
- name: Build Linux x86_64
|
||||||
@@ -61,6 +74,12 @@ jobs:
|
|||||||
- name: Build Windows x86_64
|
- name: Build Windows x86_64
|
||||||
run: cargo build --release --target x86_64-pc-windows-gnu
|
run: cargo build --release --target x86_64-pc-windows-gnu
|
||||||
|
|
||||||
|
- name: Build Android aarch64 (ARMv8)
|
||||||
|
run: cargo build --release --target aarch64-linux-android
|
||||||
|
|
||||||
|
- name: Build Android armv7 (ARMv7)
|
||||||
|
run: cargo build --release --target armv7-linux-androideabi
|
||||||
|
|
||||||
- name: Package all
|
- name: Package all
|
||||||
run: |
|
run: |
|
||||||
mkdir -p /artifacts
|
mkdir -p /artifacts
|
||||||
@@ -81,6 +100,14 @@ jobs:
|
|||||||
zip /artifacts/btest-windows-x86_64.zip btest.exe
|
zip /artifacts/btest-windows-x86_64.zip btest.exe
|
||||||
cd -
|
cd -
|
||||||
|
|
||||||
|
cd target/aarch64-linux-android/release
|
||||||
|
tar czf /artifacts/btest-android-aarch64.tar.gz btest
|
||||||
|
cd -
|
||||||
|
|
||||||
|
cd target/armv7-linux-androideabi/release
|
||||||
|
tar czf /artifacts/btest-android-armv7.tar.gz btest
|
||||||
|
cd -
|
||||||
|
|
||||||
cd /artifacts
|
cd /artifacts
|
||||||
sha256sum * > checksums-sha256.txt
|
sha256sum * > checksums-sha256.txt
|
||||||
cat checksums-sha256.txt
|
cat checksums-sha256.txt
|
||||||
@@ -103,6 +130,8 @@ jobs:
|
|||||||
| Linux | aarch64 (RPi 64-bit) | btest-linux-aarch64.tar.gz |
|
| Linux | aarch64 (RPi 64-bit) | btest-linux-aarch64.tar.gz |
|
||||||
| Linux | armv7 (RPi 32-bit) | btest-linux-armv7.tar.gz |
|
| Linux | armv7 (RPi 32-bit) | btest-linux-armv7.tar.gz |
|
||||||
| Windows | x86_64 | btest-windows-x86_64.zip |
|
| Windows | x86_64 | btest-windows-x86_64.zip |
|
||||||
|
| Android | aarch64 (ARMv8, Termux) | btest-android-aarch64.tar.gz |
|
||||||
|
| Android | armv7 (ARMv7, Termux) | btest-android-armv7.tar.gz |
|
||||||
| macOS | aarch64 / x86_64 | Run \`scripts/build-macos-release.sh --upload ${TAG}\` |
|
| macOS | aarch64 / x86_64 | Run \`scripts/build-macos-release.sh --upload ${TAG}\` |
|
||||||
| Docker | x86_64 | \`docker pull ${REGISTRY}/manawenuz/btest-rs:${TAG}\` |
|
| Docker | x86_64 | \`docker pull ${REGISTRY}/manawenuz/btest-rs:${TAG}\` |
|
||||||
|
|
||||||
|
|||||||
54
BENCHMARKS.md
Normal file
54
BENCHMARKS.md
Normal file
@@ -0,0 +1,54 @@
|
|||||||
|
# Benchmarks
|
||||||
|
|
||||||
|
This project uses [Criterion.rs](https://bheisler.github.io/criterion.rs/book/) for performance benchmarking and regression detection.
|
||||||
|
|
||||||
|
## Running Benchmarks
|
||||||
|
|
||||||
|
Run all benchmarks:
|
||||||
|
```bash
|
||||||
|
cargo bench
|
||||||
|
```
|
||||||
|
|
||||||
|
Run a specific benchmark suite:
|
||||||
|
```bash
|
||||||
|
cargo bench --bench protocol
|
||||||
|
cargo bench --bench bandwidth
|
||||||
|
cargo bench --bench tcp_rx_scan
|
||||||
|
cargo bench --bench ecsrp5
|
||||||
|
```
|
||||||
|
|
||||||
|
Run in "quick" mode (fewer iterations, useful for development):
|
||||||
|
```bash
|
||||||
|
cargo bench --bench tcp_rx_scan -- --quick
|
||||||
|
```
|
||||||
|
|
||||||
|
## Benchmark Suites
|
||||||
|
|
||||||
|
### `protocol` — Protocol Serialization
|
||||||
|
Measures the zero-allocation serialization/deserialization of `Command` (16 bytes) and `StatusMessage` (12 bytes) structs.
|
||||||
|
|
||||||
|
### `bandwidth` — Bandwidth State Atomics
|
||||||
|
Measures `BandwidthState` hot-path operations: `fetch_add`, `spend_budget`, `calc_send_interval`, `advance_next_send`, and `summary`.
|
||||||
|
|
||||||
|
### `tcp_rx_scan` — TCP RX Status Message Scan
|
||||||
|
Compares the optimized `memchr`-based scan against the old naive O(n) byte-by-byte loop on 256KB buffers. Key scenarios:
|
||||||
|
- **All zeros** (common case — data packets contain no status)
|
||||||
|
- **Status at start**
|
||||||
|
- **Status at end** (worst case for naive scan)
|
||||||
|
- **Split messages** (status spans two TCP reads)
|
||||||
|
|
||||||
|
### `ecsrp5` — EC-SRP5 Curve Construction
|
||||||
|
Compares `WCurve::new()` (heavy `BigUint` modular arithmetic) against the cached `&*WCURVE` access to demonstrate the Sprint 1 optimization.
|
||||||
|
|
||||||
|
## Interpreting Results
|
||||||
|
|
||||||
|
Criterion generates HTML reports in `target/criterion/`. Open `target/criterion/report/index.html` after running benchmarks to view interactive charts.
|
||||||
|
|
||||||
|
Example results (Apple M3 Pro, release profile):
|
||||||
|
|
||||||
|
| Benchmark | Naive/Uncached | Optimized/Cached | Speedup |
|
||||||
|
|-----------|---------------|------------------|---------|
|
||||||
|
| TCP RX scan 256KB (status at end) | 251 µs | 4.5 µs | **~55×** |
|
||||||
|
| WCurve construction | 126 µs | 1.0 ns | **~123,000×** |
|
||||||
|
| Command serialize | — | 7.7 ns | — |
|
||||||
|
| Bandwidth `fetch_add` | — | ~1 ns | — |
|
||||||
2155
Cargo.lock
generated
2155
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
56
Cargo.toml
56
Cargo.toml
@@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "btest-rs"
|
name = "btest-rs"
|
||||||
version = "0.6.0"
|
version = "0.6.3"
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
description = "MikroTik Bandwidth Test (btest) server and client with EC-SRP5 auth — a Rust reimplementation"
|
description = "MikroTik Bandwidth Test (btest) server and client with EC-SRP5 auth — a Rust reimplementation"
|
||||||
license = "MIT AND Apache-2.0"
|
license = "MIT AND Apache-2.0"
|
||||||
@@ -16,6 +16,23 @@ path = "src/lib.rs"
|
|||||||
name = "btest"
|
name = "btest"
|
||||||
path = "src/main.rs"
|
path = "src/main.rs"
|
||||||
|
|
||||||
|
[[bin]]
|
||||||
|
name = "btest-client"
|
||||||
|
path = "src/bin/client_only.rs"
|
||||||
|
|
||||||
|
[[bin]]
|
||||||
|
name = "btest-server"
|
||||||
|
path = "src/bin/server_only.rs"
|
||||||
|
|
||||||
|
[[bin]]
|
||||||
|
name = "btest-server-pro"
|
||||||
|
path = "src/server_pro/main.rs"
|
||||||
|
required-features = ["pro"]
|
||||||
|
|
||||||
|
[features]
|
||||||
|
default = []
|
||||||
|
pro = ["dep:rusqlite", "dep:ldap3", "dep:axum", "dep:tower-http", "dep:serde", "dep:serde_json", "dep:askama"]
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
tokio = { version = "1", features = ["full"] }
|
tokio = { version = "1", features = ["full"] }
|
||||||
clap = { version = "4", features = ["derive"] }
|
clap = { version = "4", features = ["derive"] }
|
||||||
@@ -32,9 +49,46 @@ num-traits = "0.2.19"
|
|||||||
num-integer = "0.1.46"
|
num-integer = "0.1.46"
|
||||||
sha2 = "0.11.0"
|
sha2 = "0.11.0"
|
||||||
hostname = "0.4.2"
|
hostname = "0.4.2"
|
||||||
|
chrono = "0.4"
|
||||||
|
memchr = "2"
|
||||||
|
rusqlite = { version = "0.39.0", features = ["bundled"], optional = true }
|
||||||
|
ldap3 = { version = "0.12.1", optional = true }
|
||||||
|
axum = { version = "0.8.8", features = ["tokio"], optional = true }
|
||||||
|
tower-http = { version = "0.6.8", features = ["fs", "cors"], optional = true }
|
||||||
|
serde = { version = "1.0.228", features = ["derive"], optional = true }
|
||||||
|
serde_json = { version = "1.0.149", optional = true }
|
||||||
|
askama = { version = "0.15.6", optional = true }
|
||||||
|
|
||||||
[profile.release]
|
[profile.release]
|
||||||
opt-level = 3
|
opt-level = 3
|
||||||
lto = true
|
lto = true
|
||||||
strip = true
|
strip = true
|
||||||
codegen-units = 1
|
codegen-units = 1
|
||||||
|
|
||||||
|
# Minimal size profile for embedded/OpenWrt targets
|
||||||
|
[profile.release-small]
|
||||||
|
inherits = "release"
|
||||||
|
opt-level = "z"
|
||||||
|
panic = "abort"
|
||||||
|
|
||||||
|
# --- Benchmarks ---
|
||||||
|
|
||||||
|
[dev-dependencies]
|
||||||
|
criterion = { version = "0.5", features = ["html_reports"] }
|
||||||
|
pprof = { version = "0.14", features = ["criterion", "flamegraph"] }
|
||||||
|
|
||||||
|
[[bench]]
|
||||||
|
name = "protocol"
|
||||||
|
harness = false
|
||||||
|
|
||||||
|
[[bench]]
|
||||||
|
name = "bandwidth"
|
||||||
|
harness = false
|
||||||
|
|
||||||
|
[[bench]]
|
||||||
|
name = "tcp_rx_scan"
|
||||||
|
harness = false
|
||||||
|
|
||||||
|
[[bench]]
|
||||||
|
name = "ecsrp5"
|
||||||
|
harness = false
|
||||||
|
|||||||
302
PERFORMANCE_AUDIT.md
Normal file
302
PERFORMANCE_AUDIT.md
Normal file
@@ -0,0 +1,302 @@
|
|||||||
|
# btest-rs Performance Audit
|
||||||
|
|
||||||
|
**Date:** 2026-04-30
|
||||||
|
**Scope:** Full codebase (`src/`, `tests/`, `Cargo.toml`)
|
||||||
|
**Methodology:** Static code analysis, hot-path tracing, lock/contention review, algorithmic complexity analysis
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Executive Summary
|
||||||
|
|
||||||
|
The codebase is generally well-structured for a network I/O tool, with good use of atomics in the per-packet hot path and zero-allocation protocol serialization. However, **three critical bottlenecks** significantly limit throughput and scalability:
|
||||||
|
|
||||||
|
1. **O(n) buffer scan on every TCP read** in the client RX loop (up to 256KB scanned per `read()` call)
|
||||||
|
2. **Expensive EC curve reconstruction on every authentication** (heavy `BigUint` modular arithmetic)
|
||||||
|
3. **Single SQLite connection mutex** serializing all DB operations in `server_pro`
|
||||||
|
|
||||||
|
Additionally, there is **no benchmark or profiling infrastructure** in the project, making it impossible to measure improvements or catch regressions.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Severity Legend
|
||||||
|
|
||||||
|
| Icon | Severity | Impact |
|
||||||
|
|------|----------|--------|
|
||||||
|
| 🔴 | **Critical** | Direct throughput/latency hit in hot path; fix immediately |
|
||||||
|
| 🟠 | **High** | Significant overhead under load; fix in next sprint |
|
||||||
|
| 🟡 | **Medium** | Noticeable at scale or under specific conditions |
|
||||||
|
| 🟢 | **Low** | Cosmetic / easy wins; batch with other work |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 🔴 Critical Bottlenecks
|
||||||
|
|
||||||
|
### 1. O(n) Linear Buffer Scan in `tcp_client_rx_loop` (`src/client.rs:210-216`)
|
||||||
|
|
||||||
|
**Problem:** On every TCP `read()` call (up to 256KB), the client performs a byte-by-byte scan looking for interleaved 12-byte status messages:
|
||||||
|
|
||||||
|
```rust
|
||||||
|
for i in 0..=(n - STATUS_MSG_SIZE) {
|
||||||
|
if buf[i] == STATUS_MSG_TYPE && buf[i + 1] >= 0x80 {
|
||||||
|
// ...
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Since data packets are all zeros and status bytes are extremely rare, this **almost always scans the entire 256KB buffer** uselessly. At high bandwidth (many reads/sec), this wastes massive CPU cycles and pollutes cache lines.
|
||||||
|
|
||||||
|
**Impact:** CPU-bound slowdown on the client RX side during bidirectional TCP tests. The compiler *may* auto-vectorize the simple loop, but it still processes ~256K bytes per read.
|
||||||
|
|
||||||
|
**Fix Options (pick one):**
|
||||||
|
- **Best:** Use `memchr` (crate or `std::slice::memchr` on nightly) to find `0x07` bytes. On all-zero buffers this exits after a few SIMD-width checks.
|
||||||
|
- **Alternative:** Since status messages are injected at `write_all` boundaries and data is all zeros, maintain a small 12-byte sliding ring buffer across reads. Process the stream with a tiny state machine instead of scanning the whole buffer.
|
||||||
|
- **Alternative:** Track read bytes modulo expected packet size. Status messages are injected between full packets, so they will appear at predictable offsets *if* the client knows the server's `effective_size`. This requires protocol coordination.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 2. `WCurve::new()` Recomputes Generator on Every Auth (`src/ecsrp5.rs:363,499`)
|
||||||
|
|
||||||
|
**Problem:** Every EC-SRP5 authentication (client and server) calls `WCurve::new()`, which performs `lift_x(9)` → `prime_mod_sqrt()` — heavy `BigUint` modular arithmetic to derive the curve generator point.
|
||||||
|
|
||||||
|
```rust
|
||||||
|
pub async fn client_authenticate<S: ...>(stream: &mut S, username: &str, password: &str) -> Result<()> {
|
||||||
|
let w = WCurve::new(); // <-- expensive, same result every time
|
||||||
|
// ...
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
The curve constants (`P`, `CURVE_ORDER`, `WEIERSTRASS_A`) are already cached as `LazyLock` statics, but the generator point is not.
|
||||||
|
|
||||||
|
**Impact:** Auth latency spikes, especially on the server under many concurrent connections. Each auth does redundant `BigUint` allocations and modular square roots.
|
||||||
|
|
||||||
|
**Fix:** Cache `WCurve` (or at least the generator point) in a global `LazyLock`:
|
||||||
|
|
||||||
|
```rust
|
||||||
|
static WCURVE: std::sync::LazyLock<WCurve> = std::sync::LazyLock::new(WCurve::new);
|
||||||
|
```
|
||||||
|
|
||||||
|
Then use `&*WCURVE` in both `client_authenticate` and `server_authenticate`.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 3. Single SQLite Mutex Serializes All DB Operations (`src/server_pro/user_db.rs:15-18`)
|
||||||
|
|
||||||
|
**Problem:** The entire `server_pro` database layer uses a single shared `Connection` behind a `std::sync::Mutex`:
|
||||||
|
|
||||||
|
```rust
|
||||||
|
pub struct UserDb {
|
||||||
|
conn: Arc<Mutex<Connection>>,
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
While SQLite WAL mode is enabled (allowing readers to proceed during writes), **the Rust mutex still serializes all access to the connection object**. Under concurrent load with many tests starting/finishing, this becomes the primary bottleneck.
|
||||||
|
|
||||||
|
**Critical sub-issue:** `QuotaManager::remaining_budget()` (`src/server_pro/quota.rs:387`) performs **up to 15 separate SQLite queries** in sequence, locking the mutex 15+ times per pre-test check.
|
||||||
|
|
||||||
|
**Impact:** Connection setup/teardown latency increases linearly with concurrency. Quota checks and usage recording block each other.
|
||||||
|
|
||||||
|
**Fix Options:**
|
||||||
|
- **Connection pooling:** Use `r2d2_sqlite` or `deadpool-sqlite` to maintain a small pool of connections (SQLite handles this well in WAL mode).
|
||||||
|
- **Separate read/write paths:** Open a read-only connection for quota checks (`remaining_budget`) and a dedicated write connection for usage recording. SQLite WAL allows this.
|
||||||
|
- **Batch quota checks:** Cache quota results for a few seconds per user/IP to avoid redundant queries.
|
||||||
|
- **Channel-based writer:** Use a single dedicated DB writer task with an `mpsc` channel so only one task ever touches the connection, eliminating lock contention entirely.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 🟠 High Severity Issues
|
||||||
|
|
||||||
|
### 4. 100ms Busy-Poll Wait in Multi-Connection TCP (`src/server.rs:313-332`)
|
||||||
|
|
||||||
|
**Problem:** When waiting for secondary TCP connections to join a multi-connection session, the primary connection busy-polls the session map every 100ms:
|
||||||
|
|
||||||
|
```rust
|
||||||
|
loop {
|
||||||
|
let count = { let map = sessions.lock().await; ... };
|
||||||
|
if count + 1 >= conn_count as usize { break; }
|
||||||
|
tokio::time::sleep(Duration::from_millis(100)).await;
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
This adds up to **100ms of unnecessary latency** to every multi-connection test startup. It also hammers the async mutex needlessly.
|
||||||
|
|
||||||
|
**Fix:** Replace with `tokio::sync::Notify`. When a secondary connection registers itself, it calls `notify_one()`. The primary waits on `notified().await` with a timeout, waking instantly when ready.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 5. FreeBSD CPU Sampling Spawns Process Every Second (`src/cpu.rs:142`)
|
||||||
|
|
||||||
|
**Problem:** On FreeBSD, `get_cpu_times()` spawns `sysctl -n kern.cp_time` via `std::process::Command` every second:
|
||||||
|
|
||||||
|
```rust
|
||||||
|
fn get_cpu_times() -> (u64, u64) {
|
||||||
|
if let Ok(output) = std::process::Command::new("sysctl")
|
||||||
|
.arg("-n").arg("kern.cp_time").output()
|
||||||
|
{ ... }
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
`fork()` + `exec()` is extremely expensive relative to the work being done (reading 5 integers).
|
||||||
|
|
||||||
|
**Fix:** Use `libc::sysctl()` via FFI, matching the macOS implementation style. Cache the `mib` array and call the syscall directly.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 6. Per-Call Timer Registration in UDP RX Loops (`src/client.rs:393`, `src/server.rs:925`)
|
||||||
|
|
||||||
|
**Problem:** Both UDP RX loops create a new `tokio::time::timeout` timer on **every single `recv`/`recv_from` call**:
|
||||||
|
|
||||||
|
```rust
|
||||||
|
match tokio::time::timeout(Duration::from_secs(5), socket.recv(&mut buf)).await
|
||||||
|
```
|
||||||
|
|
||||||
|
At high packet rates (e.g., 100K pps), registering and canceling timers on the Tokio timer wheel adds measurable overhead.
|
||||||
|
|
||||||
|
**Fix:** Use `tokio::select!` with a long-lived `tokio::time::sleep` future that is reset, or use the socket's built-in SO_RCVTIMEO if available via `socket2`. Alternatively, since UDP is connectionless, consider whether a 5-second timeout is needed on every call or if the outer test duration timer is sufficient.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 🟡 Medium Severity Issues
|
||||||
|
|
||||||
|
### 7. String Error Matching with Allocation (`src/server_pro/enforcer.rs:157-161`)
|
||||||
|
|
||||||
|
```rust
|
||||||
|
match format!("{}", e).as_str() {
|
||||||
|
s if s.contains("daily") => ...
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
`format!("{}", e)` allocates a `String` from the error just to do substring matching. Use `e.to_string().contains(...)` or match on error types directly if possible.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 8. `ip.to_string()` Called Repeatedly in Quota Checks (`src/server_pro/quota.rs:389`)
|
||||||
|
|
||||||
|
```rust
|
||||||
|
let ip_str = ip.to_string();
|
||||||
|
// ... used in 6+ DB calls
|
||||||
|
```
|
||||||
|
|
||||||
|
This allocates a `String` on every quota check. Accept `&str` or `IpAddr` directly in DB methods, or cache the string.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 9. `chrono_date_today()` Recomputes Calendar from Epoch (`src/server_pro/user_db.rs:617-638`)
|
||||||
|
|
||||||
|
A hand-rolled date calculation loops through years from 1970 and months every time it's called (which is before almost every DB write). The `chrono` crate is already used indirectly by `rusqlite`; add it as a direct dependency and replace with `chrono::Local::now().format("%Y-%m-%d")`.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 🟢 Low Severity / Easy Wins
|
||||||
|
|
||||||
|
### 10. CSV File Reopened on Every Write (`src/csv_output.rs:77`)
|
||||||
|
|
||||||
|
```rust
|
||||||
|
if let Ok(mut f) = OpenOptions::new().append(true).open(path) {
|
||||||
|
let _ = writeln!(f, "{}", row);
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Called once per test, not per-packet, but still suboptimal. Consider keeping a lazily-initialized `Mutex<Option<File>>` or using `std::fs::OpenOptions` once at init and storing the handle.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 11. Global Syslog Mutex Held During I/O (`src/syslog_logger.rs`)
|
||||||
|
|
||||||
|
```rust
|
||||||
|
static SYSLOG: Mutex<Option<SyslogSender>> = Mutex::new(None);
|
||||||
|
```
|
||||||
|
|
||||||
|
The global `std::sync::Mutex` is held while formatting the timestamp (expensive manual calendar math) and sending UDP. Switch to `parking_lot::Mutex` (faster) or `tokio::sync::Mutex` if async, and format the message outside the lock. Better yet, use `tracing`'s built-in syslog integration or a structured appender.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 12. `hash_password()` Uses `format!` + `format!` in Hex Loop (`src/server_pro/user_db.rs:612-614`)
|
||||||
|
|
||||||
|
```rust
|
||||||
|
hasher.update(format!("{}:{}", username, password).as_bytes());
|
||||||
|
result.iter().map(|b| format!("{:02x}", b)).collect() // N allocs for N bytes
|
||||||
|
```
|
||||||
|
|
||||||
|
The hex encoding allocates one `String` per byte. Use a small fixed buffer or `hex` crate (already used elsewhere in `ecsrp5.rs`).
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 13. Redundant `Instant::now()` Calls in TX Loop (`src/server.rs:593,606`)
|
||||||
|
|
||||||
|
```rust
|
||||||
|
if send_status && Instant::now() >= next_status {
|
||||||
|
// ...
|
||||||
|
next_status = Instant::now() + Duration::from_secs(1);
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Two monotonic clock reads per loop iteration. Cache `let now = Instant::now();` at the top of the loop.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Architecture Observations
|
||||||
|
|
||||||
|
### What the Code Does Well
|
||||||
|
- **Zero-allocation protocol layer:** `serialize()` returns fixed-size stack arrays (`[u8; 12]`, `[u8; 16]`). Excellent.
|
||||||
|
- **Atomic bandwidth tracking:** `BandwidthState` uses `AtomicU64` with `Relaxed` ordering in the per-packet path. No locks in the data plane.
|
||||||
|
- **Buffer reuse:** TX/RX loops allocate `vec![0u8; ...]` once before the loop. Good.
|
||||||
|
- **Aggressive release profile:** `lto = true`, `codegen-units = 1`, `opt-level = 3`.
|
||||||
|
|
||||||
|
### Async Runtime Usage
|
||||||
|
- `tokio` with `full` features is used. For a primarily I/O-bound tool, this is appropriate.
|
||||||
|
- `tokio::task::yield_now().await` is used in unlimited-rate mode to prevent starving the runtime. This is correct but consider whether `tokio::task::spawn_blocking` or dedicated CPU pinning is needed for the EC-SRP5 math (which is CPU-bound and currently runs on the async runtime during auth).
|
||||||
|
|
||||||
|
### Memory Safety
|
||||||
|
- Several `unwrap()`/`expect()` calls in setup paths (socket binding, address parsing). These are acceptable for config errors but should use `?` propagation where possible to allow graceful degradation.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Missing Performance Infrastructure
|
||||||
|
|
||||||
|
| Infrastructure | Status | Recommendation |
|
||||||
|
|----------------|--------|----------------|
|
||||||
|
| **Benchmarks** | ❌ None | Add `criterion` + `benches/` for `BandwidthState`, protocol ser/de, and EC-SRP5 auth |
|
||||||
|
| **Profiling hooks** | ❌ None | Add optional `pprof` or `dhat` dev-deps for heap profiling |
|
||||||
|
| **Throughput regression tests** | ⚠️ Partial | Integration tests assert `tx > 0` and `rx > 0` but don't measure sustained throughput |
|
||||||
|
| **Load tests** | ❌ None | Add a `benches/load_test.rs` that spawns 100+ concurrent tests against a local server |
|
||||||
|
| **CI performance gates** | ❌ None | Consider a benchmark action that fails on >5% regression |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Priority Action Plan
|
||||||
|
|
||||||
|
### Phase 1: Hot-Path Fixes (1-2 days)
|
||||||
|
1. Replace buffer scan with `memchr` or ring-buffer approach in `tcp_client_rx_loop`
|
||||||
|
2. Cache `WCurve` in a global `LazyLock`
|
||||||
|
3. Replace 100ms poll with `tokio::sync::Notify` in multi-conn wait
|
||||||
|
|
||||||
|
### Phase 2: Scalability (2-3 days)
|
||||||
|
4. Add SQLite connection pooling or channel-based writer in `server_pro`
|
||||||
|
5. Cache `remaining_budget()` results for 5-10 seconds
|
||||||
|
6. Fix FreeBSD CPU sampling to use `libc::sysctl` FFI
|
||||||
|
|
||||||
|
### Phase 3: Polish & Tooling (1-2 days)
|
||||||
|
7. Replace manual date arithmetic with `chrono`
|
||||||
|
8. Add `criterion` benchmarks for auth and bandwidth state
|
||||||
|
9. Fix low-severity allocation issues (CSV, syslog, hex encoding)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Appendix: File-by-File Quick Reference
|
||||||
|
|
||||||
|
| File | Lines | Hot Path? | Key Concern |
|
||||||
|
|------|-------|-----------|-------------|
|
||||||
|
| `src/client.rs` | 531 | ✅ Yes | O(n) 256KB scan per TCP read |
|
||||||
|
| `src/server.rs` | 1094 | ✅ Yes | 100ms poll wait, status injection timing |
|
||||||
|
| `src/ecsrp5.rs` | 660 | ✅ Yes (auth) | `WCurve::new()` recomputed per auth |
|
||||||
|
| `src/bandwidth.rs` | 263 | ✅ Yes (atomics) | Well-designed; no issues |
|
||||||
|
| `src/protocol.rs` | 214 | ✅ Yes (ser/de) | Zero-allocation; excellent |
|
||||||
|
| `src/cpu.rs` | 215 | ⚠️ Periodic | FreeBSD `fork+exec` every second |
|
||||||
|
| `src/server_pro/quota.rs` | 470 | ⚠️ Periodic | 15 DB queries per budget check |
|
||||||
|
| `src/server_pro/user_db.rs` | 641 | ⚠️ All DB ops | Single mutex serializes everything |
|
||||||
|
| `src/server_pro/server_loop.rs` | 449 | ✅ Yes | DB auth locks during connection setup |
|
||||||
|
| `src/server_pro/enforcer.rs` | 411 | ⚠️ Periodic | String error matching allocates |
|
||||||
|
| `src/csv_output.rs` | 86 | ❌ No | File reopen per write |
|
||||||
|
| `src/syslog_logger.rs` | 154 | ❌ No | Global mutex + manual calendar math |
|
||||||
|
| `src/auth.rs` | 164 | ⚠️ Auth only | Minor; double MD5 per auth |
|
||||||
|
| `src/main.rs` | 243 | ❌ No | Entry point only |
|
||||||
634
PERFORMANCE_PRDS.md
Normal file
634
PERFORMANCE_PRDS.md
Normal file
@@ -0,0 +1,634 @@
|
|||||||
|
# Performance Improvement PRDs
|
||||||
|
|
||||||
|
**Project:** btest-rs
|
||||||
|
**Constraint:** 100% MikroTik BTest protocol compatibility — no wire-format or behavioral changes visible to MikroTik devices
|
||||||
|
**Date:** 2026-04-30
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## How to Read This Document
|
||||||
|
|
||||||
|
Each PRD is sorted by **recommended execution order**, which balances:
|
||||||
|
- **Effort** (development + review + test time)
|
||||||
|
- **Risk** (probability of regression or compatibility break)
|
||||||
|
- **Performance Effect** (measured or estimated throughput/latency improvement)
|
||||||
|
- **MikroTik Compatibility Risk** (whether the change could affect interoperability)
|
||||||
|
|
||||||
|
**Sorting rationale:** Execute *quick wins* first to build velocity and reduce risk surface, then tackle *high-impact* items with full attention.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Summary Matrix
|
||||||
|
|
||||||
|
| # | PRD | Effort | Risk | Perf Impact | MikroTik Risk | Tier |
|
||||||
|
|---|-----|--------|------|-------------|---------------|------|
|
||||||
|
| 1 | WCurve Global Cache | 30 min | None | Medium | None | Quick Win |
|
||||||
|
| 2 | Redundant `Instant::now()` | 15 min | None | Low | None | Quick Win |
|
||||||
|
| 3 | `hash_password` Hex Fix | 30 min | None | Low | None | Quick Win |
|
||||||
|
| 4 | CSV File Handle Cache | 30 min | None | Low | None | Quick Win |
|
||||||
|
| 5 | Error String Matching | 30 min | None | Low | None | Quick Win |
|
||||||
|
| 6 | `chrono_date_today` Replace | 1 hr | Low | Low | None | Quick Win |
|
||||||
|
| 7 | Syslog Mutex + Timestamp | 1 hr | Low | Low | None | Quick Win |
|
||||||
|
| 8 | `ip.to_string()` Cache | 1 hr | Low | Low | None | Quick Win |
|
||||||
|
| 9 | FreeBSD CPU FFI | 3 hrs | Medium | Medium | None | Platform Fix |
|
||||||
|
| 10 | Multi-Conn Notify Wake | 2 hrs | Medium | Medium | None | Latency Fix |
|
||||||
|
| 11 | UDP Timer Reuse | 2 hrs | Medium | Medium | None | Throughput Fix |
|
||||||
|
| 12 | TCP RX Scan Optimization | 4 hrs | Medium | **High** | Low | Hot Path Fix |
|
||||||
|
| 13 | SQLite Connection Pool | 1–2 days | High | **High** | None | Scalability Fix |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Tier 1: Quick Wins (Do These First)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### PRD-001: Cache `WCurve` in Global `LazyLock`
|
||||||
|
|
||||||
|
**Background:**
|
||||||
|
`WCurve::new()` is called on every EC-SRP5 authentication (client and server). It recomputes the Weierstrass curve generator point via `lift_x(9)` → `prime_mod_sqrt()`, which performs heavy `BigUint` modular arithmetic. The result is deterministic and immutable.
|
||||||
|
|
||||||
|
**MikroTik Compatibility:**
|
||||||
|
- **100% safe.** This is pure internal mathematics. The wire bytes, auth handshake order, and hash outputs are identical. No protocol-visible change.
|
||||||
|
|
||||||
|
**Objective:**
|
||||||
|
Eliminate redundant `BigUint` modular square root computation per authentication.
|
||||||
|
|
||||||
|
**Design:**
|
||||||
|
```rust
|
||||||
|
// src/ecsrp5.rs
|
||||||
|
static WCURVE: std::sync::LazyLock<WCurve> = std::sync::LazyLock::new(WCurve::new);
|
||||||
|
```
|
||||||
|
|
||||||
|
Replace all call sites:
|
||||||
|
- `src/ecsrp5.rs:363` (`client_authenticate`)
|
||||||
|
- `src/ecsrp5.rs:499` (`server_authenticate`)
|
||||||
|
|
||||||
|
Change `let w = WCurve::new();` to `let w = &*WCURVE;`. Update any `WCurve` methods that take `self` to take `&self` if they don't already.
|
||||||
|
|
||||||
|
**Acceptance Criteria:**
|
||||||
|
- [ ] `ecsrp5_test.rs` passes unchanged.
|
||||||
|
- [ ] `full_integration_test.rs` EC-SRP5 tests pass unchanged.
|
||||||
|
- [ ] `WCurve::new()` is called exactly once per process lifetime.
|
||||||
|
- [ ] No change to serialized auth bytes on the wire.
|
||||||
|
|
||||||
|
**Effort:** 30 min
|
||||||
|
**Risk:** None — stateless deterministic cache
|
||||||
|
**Performance Impact:** Medium — reduces per-auth CPU time by ~30-50% (estimated), especially noticeable under concurrent logins.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### PRD-002: Deduplicate `Instant::now()` in `tcp_tx_loop_inner`
|
||||||
|
|
||||||
|
**Background:**
|
||||||
|
The TCP TX loop calls `Instant::now()` twice per iteration (status check and interval scheduling). Monotonic clock reads are cheap but not free, and occur in the hottest loop in the system.
|
||||||
|
|
||||||
|
**MikroTik Compatibility:**
|
||||||
|
- **100% safe.** Timing granularity remains identical.
|
||||||
|
|
||||||
|
**Objective:**
|
||||||
|
Reduce syscalls in the per-packet hot path.
|
||||||
|
|
||||||
|
**Design:**
|
||||||
|
```rust
|
||||||
|
let now = Instant::now();
|
||||||
|
if send_status && now >= next_status { ... next_status = now + Duration::from_secs(1); }
|
||||||
|
// ... reuse `now` for interval math
|
||||||
|
```
|
||||||
|
|
||||||
|
**Acceptance Criteria:**
|
||||||
|
- [ ] TCP send/receive/both integration tests pass.
|
||||||
|
- [ ] No behavioral change in status injection timing.
|
||||||
|
|
||||||
|
**Effort:** 15 min
|
||||||
|
**Risk:** None
|
||||||
|
**Performance Impact:** Low — micro-optimization, but trivial.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### PRD-003: Fix `hash_password()` Hex Encoding Allocations
|
||||||
|
|
||||||
|
**Background:**
|
||||||
|
`user_db.rs:614` allocates one `String` per byte when hex-encoding a 32-byte SHA256 hash:
|
||||||
|
```rust
|
||||||
|
result.iter().map(|b| format!("{:02x}", b)).collect()
|
||||||
|
```
|
||||||
|
|
||||||
|
**MikroTik Compatibility:**
|
||||||
|
- **100% safe.** Output string is identical.
|
||||||
|
|
||||||
|
**Objective:**
|
||||||
|
Replace N-allocation hex encoding with a single-allocation approach.
|
||||||
|
|
||||||
|
**Design:**
|
||||||
|
Use `hex` crate (already in dependency tree via `ecsrp5.rs` debug logging) or a small `[u8; 64]` buffer with `write!` to a `String::with_capacity(64)`.
|
||||||
|
|
||||||
|
**Acceptance Criteria:**
|
||||||
|
- [ ] Same hex string output for all inputs.
|
||||||
|
- [ ] `pro` feature tests pass.
|
||||||
|
|
||||||
|
**Effort:** 30 min
|
||||||
|
**Risk:** None
|
||||||
|
**Performance Impact:** Low — removes 32 allocations per password hash.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### PRD-004: Cache CSV File Handle
|
||||||
|
|
||||||
|
**Background:**
|
||||||
|
`csv_output::write_result()` re-opens the file via `OpenOptions::new().append(true).open(path)` on every call (once per test). Safe but wasteful.
|
||||||
|
|
||||||
|
**MikroTik Compatibility:**
|
||||||
|
- **100% safe.** No protocol involvement.
|
||||||
|
|
||||||
|
**Objective:**
|
||||||
|
Hold the file handle open for the process lifetime.
|
||||||
|
|
||||||
|
**Design:**
|
||||||
|
Change `static CSV_FILE: Mutex<Option<String>>` to `Mutex<Option<(String, std::fs::File)>>`, or open once during `init()` and store `Mutex<Option<File>>`.
|
||||||
|
|
||||||
|
**Acceptance Criteria:**
|
||||||
|
- [ ] CSV tests in `full_integration_test.rs` pass.
|
||||||
|
- [ ] File is created with headers on `init()`.
|
||||||
|
- [ ] Multiple `write_result` calls append correctly.
|
||||||
|
|
||||||
|
**Effort:** 30 min
|
||||||
|
**Risk:** None
|
||||||
|
**Performance Impact:** Low — removes one `open()` syscall per test.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### PRD-005: Remove Allocating Error String Matching
|
||||||
|
|
||||||
|
**Background:**
|
||||||
|
`src/server_pro/enforcer.rs:157-161` does:
|
||||||
|
```rust
|
||||||
|
match format!("{}", e).as_str() {
|
||||||
|
s if s.contains("daily") => ...
|
||||||
|
}
|
||||||
|
```
|
||||||
|
This allocates a `String` from the error just for substring matching.
|
||||||
|
|
||||||
|
**MikroTik Compatibility:**
|
||||||
|
- **100% safe.** Server-pro internal logic only.
|
||||||
|
|
||||||
|
**Objective:**
|
||||||
|
Match without allocation.
|
||||||
|
|
||||||
|
**Design:**
|
||||||
|
Use `e.to_string().contains("daily")` (still allocates but clearer) or, better, downcast the `rusqlite::Error` or match on structured error variants. If the error is `anyhow::Error`, use `.downcast_ref::<rusqlite::Error>()`.
|
||||||
|
|
||||||
|
**Acceptance Criteria:**
|
||||||
|
- [ ] Quota enforcement behavior unchanged.
|
||||||
|
- [ ] Enforcer tests pass.
|
||||||
|
|
||||||
|
**Effort:** 30 min
|
||||||
|
**Risk:** None
|
||||||
|
**Performance Impact:** Low — removes one allocation per enforcer tick.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### PRD-006: Replace `chrono_date_today()` with `chrono` Crate
|
||||||
|
|
||||||
|
**Background:**
|
||||||
|
`user_db.rs:617-638` contains a hand-rolled Gregorian calendar converter that loops from 1970 to compute today's date. Called before almost every DB write. The `chrono` crate is already pulled in transitively by `rusqlite`.
|
||||||
|
|
||||||
|
**MikroTik Compatibility:**
|
||||||
|
- **100% safe.** No protocol involvement.
|
||||||
|
|
||||||
|
**Objective:**
|
||||||
|
Replace 30 lines of error-prone manual date math with one `chrono` call.
|
||||||
|
|
||||||
|
**Design:**
|
||||||
|
Add `chrono = { version = "0.4", optional = true }` gated behind `pro` feature (or use the transitive dep directly). Replace `chrono_date_today()` with:
|
||||||
|
```rust
|
||||||
|
chrono::Local::now().format("%Y-%m-%d").to_string()
|
||||||
|
```
|
||||||
|
|
||||||
|
**Acceptance Criteria:**
|
||||||
|
- [ ] `pro` feature compiles.
|
||||||
|
- [ ] Date strings match format `YYYY-MM-DD`.
|
||||||
|
- [ ] DB write tests pass.
|
||||||
|
|
||||||
|
**Effort:** 1 hr
|
||||||
|
**Risk:** Low — adds explicit dep that already exists transitively
|
||||||
|
**Performance Impact:** Low — eliminates loop overhead, but called infrequently.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### PRD-007: Optimize Syslog Mutex and Timestamp Formatting
|
||||||
|
|
||||||
|
**Background:**
|
||||||
|
`syslog_logger.rs` holds a global `std::sync::Mutex` while formatting a timestamp (manual calendar math) and sending UDP. `std::sync::Mutex` is relatively slow, and the timestamp logic duplicates `chrono_date_today()` issues.
|
||||||
|
|
||||||
|
**MikroTik Compatibility:**
|
||||||
|
- **100% safe.** No protocol involvement.
|
||||||
|
|
||||||
|
**Objective:**
|
||||||
|
Reduce lock contention and allocation in logging path.
|
||||||
|
|
||||||
|
**Design:**
|
||||||
|
1. Use `parking_lot::Mutex` (faster, no poisoning) OR switch to `std::sync::Mutex` but clone the `SyslogSender` config outside the lock.
|
||||||
|
2. Replace `bsd_timestamp()` with `chrono::Local::now().format("%b %e %H:%M:%S")`.
|
||||||
|
3. Pre-allocate the `String` with `with_capacity(256)`.
|
||||||
|
|
||||||
|
**Acceptance Criteria:**
|
||||||
|
- [ ] Syslog output format remains RFC 3164 compliant.
|
||||||
|
- [ ] `test_syslog_events` in `full_integration_test.rs` passes.
|
||||||
|
|
||||||
|
**Effort:** 1 hr
|
||||||
|
**Risk:** Low
|
||||||
|
**Performance Impact:** Low — logging is not a hot path, but reduces global lock hold time.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### PRD-008: Cache `ip.to_string()` in Quota Checks
|
||||||
|
|
||||||
|
**Background:**
|
||||||
|
`quota.rs:389` calls `ip.to_string()` and then passes `&ip_str` to multiple DB methods, allocating a new `String` on every `remaining_budget()` call.
|
||||||
|
|
||||||
|
**MikroTik Compatibility:**
|
||||||
|
- **100% safe.** Server-pro internal logic.
|
||||||
|
|
||||||
|
**Objective:**
|
||||||
|
Eliminate redundant IP stringification.
|
||||||
|
|
||||||
|
**Design:**
|
||||||
|
Change DB methods to accept `&std::net::IpAddr` directly and stringify inside only when needed for SQL parameter binding (which `rusqlite` may already handle via `ToSql`). Alternatively, pass `ip_str: &str` from a single `to_string()` call and avoid re-stringifying in sub-calls.
|
||||||
|
|
||||||
|
**Acceptance Criteria:**
|
||||||
|
- [ ] Quota checks return identical results.
|
||||||
|
- [ ] `pro` feature tests pass.
|
||||||
|
|
||||||
|
**Effort:** 1 hr
|
||||||
|
**Risk:** Low
|
||||||
|
**Performance Impact:** Low — one allocation removed per quota check.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Tier 2: Moderate Fixes (Platform & Latency)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### PRD-009: FreeBSD CPU Sampling via `libc::sysctl` FFI
|
||||||
|
|
||||||
|
**Background:**
|
||||||
|
On FreeBSD, `cpu.rs` spawns `sysctl -n kern.cp_time` as a child process every second. `fork()` + `exec()` is orders of magnitude slower than a direct syscall.
|
||||||
|
|
||||||
|
**MikroTik Compatibility:**
|
||||||
|
- **100% safe.** No protocol involvement. Platform-specific internal code.
|
||||||
|
|
||||||
|
**Objective:**
|
||||||
|
Replace subprocess with direct `sysctl(3)` syscall.
|
||||||
|
|
||||||
|
**Design:**
|
||||||
|
```rust
|
||||||
|
#[cfg(target_os = "freebsd")]
|
||||||
|
fn get_cpu_times() -> (u64, u64) {
|
||||||
|
let mut mib = [libc::CTL_KERN, libc::KERN_CP_TIME];
|
||||||
|
let mut cp_time: [libc::c_ulong; 5] = [0; 5];
|
||||||
|
let mut len = std::mem::size_of_val(&cp_time);
|
||||||
|
unsafe {
|
||||||
|
if libc::sysctl(
|
||||||
|
mib.as_mut_ptr(),
|
||||||
|
mib.len() as u32,
|
||||||
|
&mut cp_time as *mut _ as *mut libc::c_void,
|
||||||
|
&mut len,
|
||||||
|
std::ptr::null_mut(),
|
||||||
|
0,
|
||||||
|
) == 0 {
|
||||||
|
let total = cp_time[0] + cp_time[1] + cp_time[2] + cp_time[3] + cp_time[4];
|
||||||
|
return (total as u64, cp_time[4] as u64);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
(0, 0)
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**Acceptance Criteria:**
|
||||||
|
- [ ] Compiles on FreeBSD.
|
||||||
|
- [ ] Returns same values as previous `sysctl` command approach.
|
||||||
|
- [ ] No child process spawned (verify with `ktrace` or `ps`).
|
||||||
|
|
||||||
|
**Effort:** 3 hrs
|
||||||
|
**Risk:** Medium — requires FreeBSD test environment; FFI is unsafe
|
||||||
|
**Performance Impact:** Medium — eliminates 1 fork/exec per second on FreeBSD.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### PRD-010: Replace 100ms Poll with `tokio::sync::Notify`
|
||||||
|
|
||||||
|
**Background:**
|
||||||
|
In `server.rs:313-332`, the primary connection of a multi-connection TCP test busy-polls the session map every 100ms waiting for secondary connections to join.
|
||||||
|
|
||||||
|
**MikroTik Compatibility:**
|
||||||
|
- **100% safe.** This is internal server-side coordination. The wire behavior (waiting for connections, then starting the test) is unchanged. MikroTik clients will not observe a difference except potentially faster test startup.
|
||||||
|
|
||||||
|
**Objective:**
|
||||||
|
Eliminate polling latency and unnecessary mutex acquisitions.
|
||||||
|
|
||||||
|
**Design:**
|
||||||
|
1. Add a `tokio::sync::Notify` to `TcpSession`:
|
||||||
|
```rust
|
||||||
|
struct TcpSession {
|
||||||
|
peer_ip: IpAddr,
|
||||||
|
streams: Vec<OwnedTcpStream>,
|
||||||
|
expected: u8,
|
||||||
|
notify: tokio::sync::Notify,
|
||||||
|
}
|
||||||
|
```
|
||||||
|
2. In the secondary connection handler, after pushing to `streams`, call `session.notify.notify_one()`.
|
||||||
|
3. In the primary wait loop, replace the sleep loop with:
|
||||||
|
```rust
|
||||||
|
let count = { /* lock, get count, drop lock */ };
|
||||||
|
if count + 1 >= conn_count { break; }
|
||||||
|
|
||||||
|
// Wait for notification or 10s deadline
|
||||||
|
let timeout = tokio::time::sleep(Duration::from_secs(10));
|
||||||
|
tokio::pin!(timeout);
|
||||||
|
|
||||||
|
loop {
|
||||||
|
tokio::select! {
|
||||||
|
_ = session.notify.notified() => {
|
||||||
|
let count = { /* lock, get count */ };
|
||||||
|
if count + 1 >= conn_count { break; }
|
||||||
|
}
|
||||||
|
_ = &mut timeout => { break; }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**Acceptance Criteria:**
|
||||||
|
- [ ] Multi-connection TCP tests pass.
|
||||||
|
- [ ] Test startup latency is ≤ 1ms after last connection joins (was up to 100ms).
|
||||||
|
- [ ] No deadlock under concurrent multi-connection tests.
|
||||||
|
|
||||||
|
**Effort:** 2 hrs
|
||||||
|
**Risk:** Medium — concurrency change; must carefully manage lock/notify ordering to avoid races
|
||||||
|
**Performance Impact:** Medium — improves multi-conn test startup latency by up to 100ms per test.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### PRD-011: Reuse UDP RX Timer Instead of Per-Call Timeout
|
||||||
|
|
||||||
|
**Background:**
|
||||||
|
Both client and server UDP RX loops create a new `tokio::time::timeout` on every `recv`/`recv_from` call:
|
||||||
|
```rust
|
||||||
|
tokio::time::timeout(Duration::from_secs(5), socket.recv(&mut buf)).await
|
||||||
|
```
|
||||||
|
At high packet rates, this registers and cancels timers on Tokio's timer wheel constantly.
|
||||||
|
|
||||||
|
**MikroTik Compatibility:**
|
||||||
|
- **100% safe.** Internal async timing only. UDP packet processing is unchanged.
|
||||||
|
|
||||||
|
**Objective:**
|
||||||
|
Reduce timer wheel churn in high-rate UDP RX loops.
|
||||||
|
|
||||||
|
**Design:**
|
||||||
|
Option A — `tokio::select!` with a pinned sleep future:
|
||||||
|
```rust
|
||||||
|
let mut timeout = tokio::time::sleep(Duration::from_secs(5));
|
||||||
|
tokio::pin!(timeout);
|
||||||
|
|
||||||
|
loop {
|
||||||
|
tokio::select! {
|
||||||
|
biased; // prioritize recv
|
||||||
|
res = socket.recv(&mut buf) => { /* handle */ timeout.as_mut().reset(Instant::now() + Duration::from_secs(5)); }
|
||||||
|
_ = &mut timeout => { tracing::debug!("UDP RX timeout"); }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Option B — Use `socket2` to set `SO_RCVTIMEO` on the underlying socket, then use blocking/async recv without Tokio timeouts. This moves timeout handling into the kernel, which is even cheaper.
|
||||||
|
|
||||||
|
**Recommendation:** Start with Option A (pure Tokio, no platform risk). Option B can be a follow-up.
|
||||||
|
|
||||||
|
**Acceptance Criteria:**
|
||||||
|
- [ ] UDP send/receive/both tests pass.
|
||||||
|
- [ ] UDP RX still times out correctly when no packets arrive.
|
||||||
|
- [ ] No change to packet parsing or sequence tracking.
|
||||||
|
|
||||||
|
**Effort:** 2 hrs
|
||||||
|
**Risk:** Medium — changes timeout behavior; must ensure test abortion still works correctly
|
||||||
|
**Performance Impact:** Medium — reduces timer wheel registration overhead, noticeable at >50K pps.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Tier 3: High Impact (Do These With Full Focus)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### PRD-012: Optimize TCP Client RX Status Message Scan
|
||||||
|
|
||||||
|
**Background:**
|
||||||
|
`tcp_client_rx_loop` (`client.rs:210-216`) scans up to 256KB byte-by-byte on every `read()` call looking for a 12-byte status marker (`0x07` + `0x80|cpu`). Since data is all zeros, this is almost always a full scan.
|
||||||
|
|
||||||
|
**MikroTik Compatibility Consideration:**
|
||||||
|
- **High confidence of safety.** The protocol is: MikroTik injects 12-byte status messages into the TCP stream. Our client must detect them. Changing *how* we detect them (faster scan) does not change:
|
||||||
|
- What bytes are sent on the wire
|
||||||
|
- What bytes we expect
|
||||||
|
- How we respond to status messages
|
||||||
|
- **One edge case to handle:** TCP is a stream. A status message may be split across two `read()` calls. The current code does **not** handle this correctly (it scans each buffer independently). The optimized version *should* handle split messages to be strictly more correct than the current implementation.
|
||||||
|
|
||||||
|
**Objective:**
|
||||||
|
Replace O(n) byte-by-byte scan with SIMD-accelerated or state-machine-based detection, while correctly handling split messages.
|
||||||
|
|
||||||
|
**Design — Recommended: Ring Buffer Approach**
|
||||||
|
|
||||||
|
Since status messages are 12 bytes and all other bytes are zeros, maintain a 12-byte ring buffer across reads:
|
||||||
|
|
||||||
|
```rust
|
||||||
|
const STATUS_MSG_SIZE: usize = 12;
|
||||||
|
|
||||||
|
async fn tcp_client_rx_loop(mut reader: OwnedReadHalf, state: Arc<BandwidthState>) {
|
||||||
|
let mut buf = vec![0u8; 256 * 1024];
|
||||||
|
let mut carry = [0u8; STATUS_MSG_SIZE - 1]; // up to 11 bytes from previous read
|
||||||
|
let mut carry_len = 0usize;
|
||||||
|
|
||||||
|
while state.running.load(Ordering::Relaxed) {
|
||||||
|
match reader.read(&mut buf).await {
|
||||||
|
Ok(0) | Err(_) => break,
|
||||||
|
Ok(n) => {
|
||||||
|
state.rx_bytes.fetch_add(n as u64, Ordering::Relaxed);
|
||||||
|
|
||||||
|
// Check if a status message spans the carry + start of buf
|
||||||
|
if carry_len > 0 {
|
||||||
|
let needed = STATUS_MSG_SIZE - carry_len;
|
||||||
|
if n >= needed {
|
||||||
|
let mut candidate = [0u8; STATUS_MSG_SIZE];
|
||||||
|
candidate[..carry_len].copy_from_slice(&carry[..carry_len]);
|
||||||
|
candidate[carry_len..].copy_from_slice(&buf[..needed]);
|
||||||
|
if candidate[0] == STATUS_MSG_TYPE && candidate[1] >= 0x80 {
|
||||||
|
state.remote_cpu.store(candidate[1] & 0x7F, Ordering::Relaxed);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Scan within buf for status messages
|
||||||
|
// Since data is zeros, use memchr to find 0x07 candidates
|
||||||
|
if n >= STATUS_MSG_SIZE {
|
||||||
|
let search_end = n - STATUS_MSG_SIZE + 1;
|
||||||
|
let mut offset = 0;
|
||||||
|
while let Some(pos) = memchr::memchr(STATUS_MSG_TYPE, &buf[offset..search_end]) {
|
||||||
|
let i = offset + pos;
|
||||||
|
if buf[i + 1] >= 0x80 {
|
||||||
|
state.remote_cpu.store(buf[i + 1] & 0x7F, Ordering::Relaxed);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
offset = i + 1;
|
||||||
|
if offset >= search_end { break; }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Save trailing bytes for next read
|
||||||
|
carry_len = (n).min(STATUS_MSG_SIZE - 1);
|
||||||
|
if n >= carry_len {
|
||||||
|
carry[..carry_len].copy_from_slice(&buf[n - carry_len..n]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**Alternative: `memchr` crate only**
|
||||||
|
If we determine split messages are extremely rare and the current behavior is "good enough," simply replace the `for` loop with:
|
||||||
|
```rust
|
||||||
|
if let Some(pos) = memchr::memchr(STATUS_MSG_TYPE, &buf[..n - STATUS_MSG_SIZE + 1]) {
|
||||||
|
if buf[pos + 1] >= 0x80 { /* ... */ }
|
||||||
|
}
|
||||||
|
```
|
||||||
|
This is a 5-line change with massive speedup (SIMD scan). However, the ring buffer approach is strictly more correct and not much more complex.
|
||||||
|
|
||||||
|
**Acceptance Criteria:**
|
||||||
|
- [ ] TCP bidirectional tests pass.
|
||||||
|
- [ ] Remote CPU reporting still works.
|
||||||
|
- [ ] Status messages split across reads are correctly detected (unit test for this).
|
||||||
|
- [ ] `memchr` crate added to deps (very lightweight).
|
||||||
|
- [ ] No change to wire bytes or server behavior.
|
||||||
|
|
||||||
|
**Effort:** 4 hrs
|
||||||
|
**Risk:** Medium — hot path change; must be carefully reviewed and tested
|
||||||
|
**Performance Impact:** **High** — eliminates 256KB byte scan per read. At 10K reads/sec, saves ~2.5GB of memory scanning per second.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### PRD-013: SQLite Connection Pool / Channel-Based Writer
|
||||||
|
|
||||||
|
**Background:**
|
||||||
|
`server_pro` uses a single `Arc<Mutex<Connection>>`. All quota checks, usage recordings, and auth lookups serialize through one lock. `remaining_budget()` issues 15 queries, locking 15+ times. This is the primary scalability bottleneck for the pro server.
|
||||||
|
|
||||||
|
**MikroTik Compatibility:**
|
||||||
|
- **100% safe.** Server-side infrastructure only. No protocol change.
|
||||||
|
|
||||||
|
**Objective:**
|
||||||
|
Enable concurrent quota checks and usage recording without mutex contention.
|
||||||
|
|
||||||
|
**Design — Option A: Connection Pool (Recommended for reads)**
|
||||||
|
Use `r2d2_sqlite` or `deadpool-sqlite`:
|
||||||
|
1. Open a pool of ~4-8 connections to the same SQLite file (WAL mode supports this).
|
||||||
|
2. Read-only operations (`remaining_budget`, `get_user`, `check_user`) borrow a connection from the pool.
|
||||||
|
3. Write operations (`record_usage`, `record_session`) also borrow from the pool (WAL allows concurrent readers + one writer).
|
||||||
|
|
||||||
|
**Design — Option B: Channel-Based Writer (Recommended for writes)**
|
||||||
|
1. Keep one dedicated `Connection` owned by a single Tokio task.
|
||||||
|
2. Expose an `mpsc::channel` where other tasks send write requests (`RecordUsage { user, tx, rx }`).
|
||||||
|
3. The writer task batches or sequentially executes writes without any mutex.
|
||||||
|
4. Reads use a separate read-only connection or pool.
|
||||||
|
|
||||||
|
**Hybrid Recommendation:**
|
||||||
|
- **Reads:** Small connection pool (4 connections) for quota checks and auth lookups.
|
||||||
|
- **Writes:** Single dedicated async task with an `mpsc::unbounded_channel` for usage recording.
|
||||||
|
- **Cache:** Add a 5-second TTL cache for `remaining_budget()` results per user+IP to avoid redundant DB hits during test setup.
|
||||||
|
|
||||||
|
**Acceptance Criteria:**
|
||||||
|
- [ ] `pro` feature compiles and all tests pass.
|
||||||
|
- [ ] Concurrent test launches scale linearly up to at least 50 concurrent sessions.
|
||||||
|
- [ ] Quota enforcement remains correct (no over-quota usage).
|
||||||
|
- [ ] Session logging and interval recording remain accurate.
|
||||||
|
- [ ] No SQLite "database is locked" errors under load.
|
||||||
|
|
||||||
|
**Effort:** 1–2 days
|
||||||
|
**Risk:** High — touches every DB interaction in `server_pro`; potential for data races, quota leaks, or connection exhaustion
|
||||||
|
**Performance Impact:** **High** — enables horizontal scaling of concurrent tests; removes the primary pro server bottleneck.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Execution Roadmap
|
||||||
|
|
||||||
|
### Sprint 1: Quick Wins + Foundation (1 day)
|
||||||
|
- [ ] PRD-001: WCurve cache
|
||||||
|
- [ ] PRD-002: `Instant::now()` dedup
|
||||||
|
- [ ] PRD-003: `hash_password` hex fix
|
||||||
|
- [ ] PRD-004: CSV file handle cache
|
||||||
|
- [ ] PRD-005: Error string matching
|
||||||
|
- [ ] PRD-006: `chrono` date replacement
|
||||||
|
- [ ] PRD-007: Syslog optimization
|
||||||
|
- [ ] PRD-008: `ip.to_string()` cache
|
||||||
|
|
||||||
|
**Deliverable:** Low-risk PR with 8 clean commits. Run full integration tests.
|
||||||
|
|
||||||
|
### Sprint 2: Platform & Async Fixes (1 day)
|
||||||
|
- [ ] PRD-009: FreeBSD CPU FFI
|
||||||
|
- [ ] PRD-010: Multi-conn Notify wake
|
||||||
|
- [ ] PRD-011: UDP timer reuse
|
||||||
|
|
||||||
|
**Deliverable:** PR with platform + latency improvements.
|
||||||
|
|
||||||
|
### Sprint 3: Hot Path Optimization (1–2 days)
|
||||||
|
- [ ] PRD-012: TCP RX scan optimization
|
||||||
|
- [ ] Add unit test for split status messages
|
||||||
|
- [ ] Benchmark before/after with `criterion` (or manual throughput test)
|
||||||
|
|
||||||
|
**Deliverable:** PR with benchmark numbers proving improvement.
|
||||||
|
|
||||||
|
### Sprint 4: Scalability (2–3 days)
|
||||||
|
- [ ] PRD-013: SQLite connection pool / channel writer
|
||||||
|
- [ ] Load test: 50 concurrent tests, verify no DB lock contention
|
||||||
|
- [ ] Add `remaining_budget` cache
|
||||||
|
|
||||||
|
**Deliverable:** PR with load test results.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Testing Requirements for All PRDs
|
||||||
|
|
||||||
|
Since **no wire protocol changes** are made, the existing integration test suite is the primary validation tool. However, for PRD-012 and PRD-013, additional tests are required:
|
||||||
|
|
||||||
|
### New Tests to Add
|
||||||
|
|
||||||
|
1. **Split Status Message Unit Test (for PRD-012)**
|
||||||
|
```rust
|
||||||
|
#[test]
|
||||||
|
fn test_status_message_split_across_reads() {
|
||||||
|
// Feed first 5 bytes, then remaining 7 bytes
|
||||||
|
// Assert CPU value is extracted correctly
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
2. **Concurrent Quota Load Test (for PRD-013)**
|
||||||
|
```rust
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_concurrent_quota_checks() {
|
||||||
|
// Spawn 50 tasks doing remaining_budget() + record_usage()
|
||||||
|
// Assert no panics, no SQLite locked errors
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
3. **FreeBSD CPU Parity Test (for PRD-009)**
|
||||||
|
Manual verification on FreeBSD that FFI `sysctl` returns same values as command.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Appendix: MikroTik Compatibility Checklist
|
||||||
|
|
||||||
|
For every PRD, verify:
|
||||||
|
- [ ] No change to `Command` or `StatusMessage` struct layouts or serialization
|
||||||
|
- [ ] No change to MD5 challenge-response handshake order
|
||||||
|
- [ ] No change to EC-SRP5 handshake order or byte values
|
||||||
|
- [ ] No change to TCP packet sizes or UDP payload format
|
||||||
|
- [ ] No change to status injection timing (1-second interval)
|
||||||
|
- [ ] No change to NAT probe behavior
|
||||||
|
- [ ] Client can still authenticate against stock RouterOS `btest` server
|
||||||
|
- [ ] Server can still accept connections from stock RouterOS `btest` client
|
||||||
|
|
||||||
|
All PRDs in this document satisfy the above checklist by construction.
|
||||||
99
README.md
99
README.md
@@ -2,6 +2,25 @@
|
|||||||
|
|
||||||
A Rust reimplementation of the [MikroTik Bandwidth Test (btest)](https://wiki.mikrotik.com/wiki/Manual:Tools/Bandwidth_Test) protocol. Both server and client modes, fully compatible with MikroTik RouterOS devices.
|
A Rust reimplementation of the [MikroTik Bandwidth Test (btest)](https://wiki.mikrotik.com/wiki/Manual:Tools/Bandwidth_Test) protocol. Both server and client modes, fully compatible with MikroTik RouterOS devices.
|
||||||
|
|
||||||
|
## Free Public Servers
|
||||||
|
|
||||||
|
Test your MikroTik link speed right now — no setup, no registration:
|
||||||
|
|
||||||
|
| Server | Location | Dashboard |
|
||||||
|
|--------|----------|-----------|
|
||||||
|
| `104.225.217.60` | US | [btest.home.kg](https://btest.home.kg) |
|
||||||
|
| `188.245.59.196` | EU | [btest.mikata.ru](https://btest.mikata.ru) |
|
||||||
|
|
||||||
|
```
|
||||||
|
/tool bandwidth-test address=104.225.217.60 user=btest password=btest protocol=tcp direction=both
|
||||||
|
```
|
||||||
|
|
||||||
|
After the test, visit `https://btest.home.kg/dashboard/YOUR_IP` to see your results, throughput history, and quota usage. Per-IP limits: 2 GB daily / 8 GB weekly / 24 GB monthly.
|
||||||
|
|
||||||
|
> **Note:** TCP is recommended for remote testing. UDP bidirectional through NAT will only show one direction — this is a btest protocol limitation, not specific to btest-rs. See [KNOWN_ISSUES.md](KNOWN_ISSUES.md) for details.
|
||||||
|
|
||||||
|
Want to run your own public server? Build with `cargo build --release --features pro` — see [Server Pro](#server-pro) below.
|
||||||
|
|
||||||
## Features
|
## Features
|
||||||
|
|
||||||
- **Full protocol support** -- TCP and UDP data transfer, IPv4 and IPv6
|
- **Full protocol support** -- TCP and UDP data transfer, IPv4 and IPv6
|
||||||
@@ -16,7 +35,7 @@ A Rust reimplementation of the [MikroTik Bandwidth Test (btest)](https://wiki.mi
|
|||||||
- **Quiet mode** -- suppress terminal output for scripted/automated use
|
- **Quiet mode** -- suppress terminal output for scripted/automated use
|
||||||
- **NAT traversal** -- probe packet to open firewall holes for UDP receive
|
- **NAT traversal** -- probe packet to open firewall holes for UDP receive
|
||||||
- **Single static binary** -- ~2 MB, zero runtime dependencies (musl build)
|
- **Single static binary** -- ~2 MB, zero runtime dependencies (musl build)
|
||||||
- **Cross-platform** -- macOS, Linux (x86_64, ARM64), Docker
|
- **Cross-platform** -- macOS, Linux (x86_64, ARM64, ARMv7), Windows, Android (Termux), Docker
|
||||||
- **Async I/O** -- tokio-based, handles many concurrent connections efficiently
|
- **Async I/O** -- tokio-based, handles many concurrent connections efficiently
|
||||||
|
|
||||||
## Performance
|
## Performance
|
||||||
@@ -42,14 +61,55 @@ On wired gigabit links, expect line-rate performance in both TCP and UDP modes.
|
|||||||
cargo install --path .
|
cargo install --path .
|
||||||
```
|
```
|
||||||
|
|
||||||
### Pre-built binary (Linux x86_64)
|
### Pre-built binaries
|
||||||
|
|
||||||
|
Download from [releases](https://git.manko.yoga/manawenuz/btest-rs/releases) or [GitHub releases](https://github.com/manawenuz/btest-rs/releases):
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# Cross-compile from macOS (requires Docker)
|
# Linux x86_64
|
||||||
scripts/build-linux.sh
|
curl -L <release-url>/btest-linux-x86_64.tar.gz | tar xz
|
||||||
|
sudo mv btest /usr/local/bin/
|
||||||
|
|
||||||
# Copy to server
|
# Raspberry Pi 4/5 (64-bit OS)
|
||||||
scp dist/btest root@yourserver:/usr/local/bin/btest
|
curl -L <release-url>/btest-linux-aarch64.tar.gz | tar xz
|
||||||
|
sudo mv btest /usr/local/bin/
|
||||||
|
|
||||||
|
# Raspberry Pi 3/Zero 2 (32-bit OS)
|
||||||
|
curl -L <release-url>/btest-linux-armv7.tar.gz | tar xz
|
||||||
|
sudo mv btest /usr/local/bin/
|
||||||
|
|
||||||
|
# Windows
|
||||||
|
# Download btest-windows-x86_64.zip from releases
|
||||||
|
|
||||||
|
# Android (Termux, no root needed)
|
||||||
|
curl -L <release-url>/btest-android-aarch64.tar.gz | tar xz
|
||||||
|
mv btest $PREFIX/bin/
|
||||||
|
```
|
||||||
|
|
||||||
|
### Raspberry Pi
|
||||||
|
|
||||||
|
The static musl binaries run on any Raspberry Pi without dependencies:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# On the Pi — detect architecture and install
|
||||||
|
ARCH=$(uname -m)
|
||||||
|
case $ARCH in
|
||||||
|
aarch64) FILE=btest-linux-aarch64.tar.gz ;;
|
||||||
|
armv7l) FILE=btest-linux-armv7.tar.gz ;;
|
||||||
|
*) echo "Unsupported: $ARCH"; exit 1 ;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
curl -LO "https://github.com/manawenuz/btest-rs/releases/latest/download/$FILE"
|
||||||
|
tar xzf "$FILE"
|
||||||
|
sudo mv btest /usr/local/bin/
|
||||||
|
rm "$FILE"
|
||||||
|
|
||||||
|
# Run as server
|
||||||
|
btest -s -a admin -p password --ecsrp5
|
||||||
|
|
||||||
|
# Or install as systemd service
|
||||||
|
curl -LO https://raw.githubusercontent.com/manawenuz/btest-rs/main/scripts/install-service.sh
|
||||||
|
sudo bash install-service.sh --auth-user admin --auth-pass password
|
||||||
```
|
```
|
||||||
|
|
||||||
### Docker
|
### Docker
|
||||||
@@ -208,7 +268,9 @@ See [KNOWN_ISSUES.md](KNOWN_ISSUES.md) for the full list including:
|
|||||||
- **Windows binaries** — cross-compiled but untested
|
- **Windows binaries** — cross-compiled but untested
|
||||||
- **IPv6 UDP on Linux** — untested, likely works fine
|
- **IPv6 UDP on Linux** — untested, likely works fine
|
||||||
|
|
||||||
Contributions and bug reports welcome: https://git.manko.yoga/manawenuz/btest-rs/issues
|
Contributions and bug reports welcome:
|
||||||
|
- https://github.com/manawenuz/btest-rs/issues
|
||||||
|
- https://git.manko.yoga/manawenuz/btest-rs/issues
|
||||||
|
|
||||||
## Documentation
|
## Documentation
|
||||||
|
|
||||||
@@ -228,6 +290,29 @@ scripts/test-mikrotik.sh <ip> # Test against MikroTik device
|
|||||||
scripts/test-docker.sh # Docker container test
|
scripts/test-docker.sh # Docker container test
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## Server Pro
|
||||||
|
|
||||||
|
An optional superset of the standard server with multi-user support, quotas, and a web dashboard. Build with `--features pro`:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cargo build --release --features pro --bin btest-server-pro
|
||||||
|
```
|
||||||
|
|
||||||
|
Features:
|
||||||
|
- **SQLite user database** — add/remove users, per-user quotas
|
||||||
|
- **Per-IP bandwidth quotas** — daily, weekly, monthly limits with inline byte budget enforcement
|
||||||
|
- **Web dashboard** — session history, throughput stats, quota progress bars, JSON export
|
||||||
|
- **TCP multi-connection** — handles MikroTik's default 20-connection mode
|
||||||
|
- **MD5 auth against DB** — proper challenge-response verification
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Create a user and start the server
|
||||||
|
btest-server-pro --users-db users.db useradd btest btest
|
||||||
|
btest-server-pro --users-db users.db --ip-daily 2147483648 --ip-weekly 8589934592 --web-port 8080
|
||||||
|
```
|
||||||
|
|
||||||
|
The pro features are completely optional and don't affect the standard `btest` binary.
|
||||||
|
|
||||||
## Credits
|
## Credits
|
||||||
|
|
||||||
- **[btest-opensource](https://github.com/samm-git/btest-opensource)** by [Alex Samorukov](https://github.com/samm-git) -- original C implementation and protocol reverse-engineering. Licensed under **MIT**.
|
- **[btest-opensource](https://github.com/samm-git/btest-opensource)** by [Alex Samorukov](https://github.com/samm-git) -- original C implementation and protocol reverse-engineering. Licensed under **MIT**.
|
||||||
|
|||||||
79
benches/bandwidth.rs
Normal file
79
benches/bandwidth.rs
Normal file
@@ -0,0 +1,79 @@
|
|||||||
|
use criterion::{black_box, criterion_group, criterion_main, Criterion};
|
||||||
|
use btest_rs::bandwidth::{BandwidthState, calc_send_interval, advance_next_send};
|
||||||
|
use std::sync::atomic::Ordering;
|
||||||
|
use std::time::{Duration, Instant};
|
||||||
|
|
||||||
|
fn bench_atomic_fetch_add(c: &mut Criterion) {
|
||||||
|
let state = BandwidthState::new();
|
||||||
|
c.bench_function("bandwidth_rx_bytes_fetch_add", |b| {
|
||||||
|
b.iter(|| {
|
||||||
|
black_box(state.rx_bytes.fetch_add(1500, Ordering::Relaxed));
|
||||||
|
})
|
||||||
|
});
|
||||||
|
c.bench_function("bandwidth_tx_bytes_fetch_add", |b| {
|
||||||
|
b.iter(|| {
|
||||||
|
black_box(state.tx_bytes.fetch_add(32768, Ordering::Relaxed));
|
||||||
|
})
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
fn bench_spend_budget(c: &mut Criterion) {
|
||||||
|
// Unlimited budget (fast path)
|
||||||
|
let unlimited = BandwidthState::new();
|
||||||
|
c.bench_function("spend_budget_unlimited", |b| {
|
||||||
|
b.iter(|| black_box(unlimited.spend_budget(black_box(1500))))
|
||||||
|
});
|
||||||
|
|
||||||
|
// Limited budget
|
||||||
|
let limited = BandwidthState::new();
|
||||||
|
limited.byte_budget.store(1_000_000_000, Ordering::SeqCst);
|
||||||
|
c.bench_function("spend_budget_limited", |b| {
|
||||||
|
b.iter(|| black_box(limited.spend_budget(black_box(1500))))
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
fn bench_calc_send_interval(c: &mut Criterion) {
|
||||||
|
c.bench_function("calc_interval_100mbps_1500b", |b| {
|
||||||
|
b.iter(|| black_box(calc_send_interval(black_box(100_000_000), black_box(1500))))
|
||||||
|
});
|
||||||
|
c.bench_function("calc_interval_1gbps_32768b", |b| {
|
||||||
|
b.iter(|| black_box(calc_send_interval(black_box(1_000_000_000), black_box(32768))))
|
||||||
|
});
|
||||||
|
c.bench_function("calc_interval_unlimited", |b| {
|
||||||
|
b.iter(|| black_box(calc_send_interval(black_box(0), black_box(1500))))
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
fn bench_advance_next_send(c: &mut Criterion) {
|
||||||
|
let iv = Duration::from_micros(120);
|
||||||
|
let now = Instant::now();
|
||||||
|
let mut next = now;
|
||||||
|
c.bench_function("advance_next_send", |b| {
|
||||||
|
b.iter(|| {
|
||||||
|
let r = advance_next_send(&mut next, iv, now);
|
||||||
|
black_box(r);
|
||||||
|
});
|
||||||
|
next = now;
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
fn bench_summary(c: &mut Criterion) {
|
||||||
|
let state = BandwidthState::new();
|
||||||
|
// Pre-populate some values so loads are real
|
||||||
|
state.total_tx_bytes.store(1_000_000_000, Ordering::Relaxed);
|
||||||
|
state.total_rx_bytes.store(2_000_000_000, Ordering::Relaxed);
|
||||||
|
state.intervals.store(100, Ordering::Relaxed);
|
||||||
|
c.bench_function("bandwidth_summary", |b| {
|
||||||
|
b.iter(|| black_box(state.summary()))
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
criterion_group!(
|
||||||
|
bandwidth_benches,
|
||||||
|
bench_atomic_fetch_add,
|
||||||
|
bench_spend_budget,
|
||||||
|
bench_calc_send_interval,
|
||||||
|
bench_advance_next_send,
|
||||||
|
bench_summary
|
||||||
|
);
|
||||||
|
criterion_main!(bandwidth_benches);
|
||||||
19
benches/ecsrp5.rs
Normal file
19
benches/ecsrp5.rs
Normal file
@@ -0,0 +1,19 @@
|
|||||||
|
use criterion::{black_box, criterion_group, criterion_main, Criterion};
|
||||||
|
use btest_rs::ecsrp5::{WCurve, WCURVE};
|
||||||
|
|
||||||
|
fn bench_wcurve_new(c: &mut Criterion) {
|
||||||
|
c.bench_function("wcurve_new_uncached", |b| {
|
||||||
|
b.iter(|| black_box(WCurve::new()))
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
fn bench_wcurve_cached(c: &mut Criterion) {
|
||||||
|
// Force initialization before benchmarking
|
||||||
|
let _ = &*WCURVE;
|
||||||
|
c.bench_function("wcurve_cached_access", |b| {
|
||||||
|
b.iter(|| black_box(&*WCURVE))
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
criterion_group!(ecsrp5_benches, bench_wcurve_new, bench_wcurve_cached);
|
||||||
|
criterion_main!(ecsrp5_benches);
|
||||||
65
benches/protocol.rs
Normal file
65
benches/protocol.rs
Normal file
@@ -0,0 +1,65 @@
|
|||||||
|
use criterion::{black_box, criterion_group, criterion_main, Criterion};
|
||||||
|
use btest_rs::protocol::{Command, StatusMessage, CMD_PROTO_TCP, CMD_DIR_BOTH};
|
||||||
|
|
||||||
|
fn bench_command_serialize(c: &mut Criterion) {
|
||||||
|
let cmd = Command::new(CMD_PROTO_TCP, CMD_DIR_BOTH);
|
||||||
|
c.bench_function("command_serialize", |b| {
|
||||||
|
b.iter(|| black_box(cmd.serialize()))
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
fn bench_command_deserialize(c: &mut Criterion) {
|
||||||
|
let bytes = [0x01, 0x03, 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00];
|
||||||
|
c.bench_function("command_deserialize", |b| {
|
||||||
|
b.iter(|| black_box(Command::deserialize(black_box(&bytes))))
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
fn bench_status_message_serialize(c: &mut Criterion) {
|
||||||
|
let msg = StatusMessage {
|
||||||
|
seq: 42,
|
||||||
|
bytes_received: 1_000_000,
|
||||||
|
cpu_load: 50,
|
||||||
|
};
|
||||||
|
c.bench_function("status_message_serialize", |b| {
|
||||||
|
b.iter(|| black_box(msg.serialize()))
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
fn bench_status_message_deserialize(c: &mut Criterion) {
|
||||||
|
let bytes = [0x07, 0xB2, 0x00, 0x00, 0x2A, 0x00, 0x00, 0x00, 0x40, 0x42, 0x0F, 0x00];
|
||||||
|
c.bench_function("status_message_deserialize", |b| {
|
||||||
|
b.iter(|| black_box(StatusMessage::deserialize(black_box(&bytes))))
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
fn bench_roundtrip(c: &mut Criterion) {
|
||||||
|
let cmd = Command::new(CMD_PROTO_TCP, CMD_DIR_BOTH);
|
||||||
|
let msg = StatusMessage {
|
||||||
|
seq: 99,
|
||||||
|
bytes_received: 50_000,
|
||||||
|
cpu_load: 75,
|
||||||
|
};
|
||||||
|
c.bench_function("command_roundtrip", |b| {
|
||||||
|
b.iter(|| {
|
||||||
|
let s = black_box(cmd.serialize());
|
||||||
|
black_box(Command::deserialize(&s))
|
||||||
|
})
|
||||||
|
});
|
||||||
|
c.bench_function("status_message_roundtrip", |b| {
|
||||||
|
b.iter(|| {
|
||||||
|
let s = black_box(msg.serialize());
|
||||||
|
black_box(StatusMessage::deserialize(&s))
|
||||||
|
})
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
criterion_group!(
|
||||||
|
protocol_benches,
|
||||||
|
bench_command_serialize,
|
||||||
|
bench_command_deserialize,
|
||||||
|
bench_status_message_serialize,
|
||||||
|
bench_status_message_deserialize,
|
||||||
|
bench_roundtrip
|
||||||
|
);
|
||||||
|
criterion_main!(protocol_benches);
|
||||||
100
benches/tcp_rx_scan.rs
Normal file
100
benches/tcp_rx_scan.rs
Normal file
@@ -0,0 +1,100 @@
|
|||||||
|
use criterion::{black_box, criterion_group, criterion_main, BenchmarkId, Criterion, Throughput};
|
||||||
|
use btest_rs::client::scan_status_message;
|
||||||
|
use btest_rs::protocol::STATUS_MSG_TYPE;
|
||||||
|
|
||||||
|
/// Naive O(n) byte-by-byte scan — the old implementation.
|
||||||
|
fn naive_scan(buf: &[u8]) -> Option<u8> {
|
||||||
|
const STATUS_MSG_SIZE: usize = 12;
|
||||||
|
if buf.len() < STATUS_MSG_SIZE {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
for i in 0..=(buf.len() - STATUS_MSG_SIZE) {
|
||||||
|
if buf[i] == STATUS_MSG_TYPE && buf[i + 1] >= 0x80 {
|
||||||
|
return Some((buf[i + 1] & 0x7F).min(100));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
None
|
||||||
|
}
|
||||||
|
|
||||||
|
fn make_buffer(size: usize, status_at: Option<usize>) -> Vec<u8> {
|
||||||
|
let mut buf = vec![0u8; size];
|
||||||
|
if let Some(pos) = status_at {
|
||||||
|
buf[pos] = STATUS_MSG_TYPE;
|
||||||
|
buf[pos + 1] = 0x80 | 50; // CPU = 50%
|
||||||
|
}
|
||||||
|
buf
|
||||||
|
}
|
||||||
|
|
||||||
|
fn bench_scan_all_zeros(c: &mut Criterion) {
|
||||||
|
let mut group = c.benchmark_group("tcp_rx_scan_all_zeros");
|
||||||
|
for size in [4096, 65536, 262144] {
|
||||||
|
let buf = make_buffer(size, None);
|
||||||
|
group.throughput(Throughput::Bytes(size as u64));
|
||||||
|
group.bench_with_input(BenchmarkId::new("naive", size), &buf, |b, buf| {
|
||||||
|
b.iter(|| black_box(naive_scan(black_box(buf))))
|
||||||
|
});
|
||||||
|
group.bench_with_input(BenchmarkId::new("memchr", size), &buf, |b, buf| {
|
||||||
|
b.iter(|| black_box(scan_status_message(black_box(&[]), black_box(buf))))
|
||||||
|
});
|
||||||
|
}
|
||||||
|
group.finish();
|
||||||
|
}
|
||||||
|
|
||||||
|
fn bench_scan_status_at_start(c: &mut Criterion) {
|
||||||
|
let mut group = c.benchmark_group("tcp_rx_scan_status_at_start");
|
||||||
|
for size in [4096, 65536, 262144] {
|
||||||
|
let buf = make_buffer(size, Some(0));
|
||||||
|
group.throughput(Throughput::Bytes(size as u64));
|
||||||
|
group.bench_with_input(BenchmarkId::new("naive", size), &buf, |b, buf| {
|
||||||
|
b.iter(|| black_box(naive_scan(black_box(buf))))
|
||||||
|
});
|
||||||
|
group.bench_with_input(BenchmarkId::new("memchr", size), &buf, |b, buf| {
|
||||||
|
b.iter(|| black_box(scan_status_message(black_box(&[]), black_box(buf))))
|
||||||
|
});
|
||||||
|
}
|
||||||
|
group.finish();
|
||||||
|
}
|
||||||
|
|
||||||
|
fn bench_scan_status_at_end(c: &mut Criterion) {
|
||||||
|
let mut group = c.benchmark_group("tcp_rx_scan_status_at_end");
|
||||||
|
for size in [4096, 65536, 262144] {
|
||||||
|
let buf = make_buffer(size, Some(size - 12));
|
||||||
|
group.throughput(Throughput::Bytes(size as u64));
|
||||||
|
group.bench_with_input(BenchmarkId::new("naive", size), &buf, |b, buf| {
|
||||||
|
b.iter(|| black_box(naive_scan(black_box(buf))))
|
||||||
|
});
|
||||||
|
group.bench_with_input(BenchmarkId::new("memchr", size), &buf, |b, buf| {
|
||||||
|
b.iter(|| black_box(scan_status_message(black_box(&[]), black_box(buf))))
|
||||||
|
});
|
||||||
|
}
|
||||||
|
group.finish();
|
||||||
|
}
|
||||||
|
|
||||||
|
fn bench_scan_split_message(c: &mut Criterion) {
|
||||||
|
// Simulate a status message split across two reads:
|
||||||
|
// carry has first 5 bytes, buf has remaining 7 bytes
|
||||||
|
let mut carry = vec![0u8; 5];
|
||||||
|
carry[0] = STATUS_MSG_TYPE;
|
||||||
|
carry[1] = 0x80 | 75;
|
||||||
|
let buf = vec![0u8; 7];
|
||||||
|
|
||||||
|
c.bench_function("scan_split_5_7", |b| {
|
||||||
|
b.iter(|| black_box(scan_status_message(black_box(&carry), black_box(&buf))))
|
||||||
|
});
|
||||||
|
|
||||||
|
// Split with 2 bytes in carry (status type + cpu byte), 10 in buf
|
||||||
|
let carry_2 = vec![STATUS_MSG_TYPE, 0x80 | 33];
|
||||||
|
let buf_10 = vec![0u8; 10];
|
||||||
|
c.bench_function("scan_split_2_10", |b| {
|
||||||
|
b.iter(|| black_box(scan_status_message(black_box(&carry_2), black_box(&buf_10))))
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
criterion_group!(
|
||||||
|
tcp_rx_scan_benches,
|
||||||
|
bench_scan_all_zeros,
|
||||||
|
bench_scan_status_at_start,
|
||||||
|
bench_scan_status_at_end,
|
||||||
|
bench_scan_split_message
|
||||||
|
);
|
||||||
|
criterion_main!(tcp_rx_scan_benches);
|
||||||
52
deploy/alpine/APKBUILD
Normal file
52
deploy/alpine/APKBUILD
Normal file
@@ -0,0 +1,52 @@
|
|||||||
|
# Maintainer: Siavash Sameni <manwe at manko dot yoga>
|
||||||
|
pkgname=btest-rs
|
||||||
|
pkgver=0.6.0
|
||||||
|
pkgrel=0
|
||||||
|
pkgdesc="MikroTik Bandwidth Test server and client with EC-SRP5 auth"
|
||||||
|
url="https://github.com/manawenuz/btest-rs"
|
||||||
|
license="MIT AND Apache-2.0"
|
||||||
|
arch="x86_64 aarch64 armv7"
|
||||||
|
makedepends="cargo rust"
|
||||||
|
install="$pkgname.pre-install"
|
||||||
|
source="$pkgname-$pkgver.tar.gz::https://github.com/manawenuz/btest-rs/archive/refs/tags/v$pkgver.tar.gz
|
||||||
|
btest.initd
|
||||||
|
"
|
||||||
|
sha256sums="SKIP
|
||||||
|
SKIP
|
||||||
|
"
|
||||||
|
|
||||||
|
prepare() {
|
||||||
|
default_prepare
|
||||||
|
cd "$builddir"
|
||||||
|
cargo fetch --locked --target "$(rustc -vV | sed -n 's/host: //p')"
|
||||||
|
}
|
||||||
|
|
||||||
|
build() {
|
||||||
|
cd "$builddir"
|
||||||
|
export CARGO_TARGET_DIR=target
|
||||||
|
cargo build --frozen --release
|
||||||
|
}
|
||||||
|
|
||||||
|
check() {
|
||||||
|
cd "$builddir"
|
||||||
|
cargo test --frozen --release
|
||||||
|
}
|
||||||
|
|
||||||
|
package() {
|
||||||
|
cd "$builddir"
|
||||||
|
|
||||||
|
# binary
|
||||||
|
install -Dm755 "target/release/btest" "$pkgdir/usr/bin/btest"
|
||||||
|
|
||||||
|
# man page
|
||||||
|
install -Dm644 "docs/man/btest.1" "$pkgdir/usr/share/man/man1/btest.1"
|
||||||
|
|
||||||
|
# license
|
||||||
|
install -Dm644 "LICENSE" "$pkgdir/usr/share/licenses/$pkgname/LICENSE"
|
||||||
|
|
||||||
|
# documentation
|
||||||
|
install -Dm644 "README.md" "$pkgdir/usr/share/doc/$pkgname/README.md"
|
||||||
|
|
||||||
|
# OpenRC init script
|
||||||
|
install -Dm755 "$srcdir/btest.initd" "$pkgdir/etc/init.d/btest"
|
||||||
|
}
|
||||||
37
deploy/alpine/btest.initd
Executable file
37
deploy/alpine/btest.initd
Executable file
@@ -0,0 +1,37 @@
|
|||||||
|
#!/sbin/openrc-run
|
||||||
|
# OpenRC init script for btest-rs
|
||||||
|
# MikroTik Bandwidth Test server
|
||||||
|
|
||||||
|
name="btest"
|
||||||
|
description="MikroTik Bandwidth Test Server (btest-rs)"
|
||||||
|
command="/usr/bin/btest"
|
||||||
|
command_args="-s"
|
||||||
|
command_background=true
|
||||||
|
pidfile="/run/$name.pid"
|
||||||
|
|
||||||
|
# Run as dedicated user if it exists, otherwise root
|
||||||
|
command_user="btest:btest"
|
||||||
|
|
||||||
|
# Logging
|
||||||
|
output_log="/var/log/$name/$name.log"
|
||||||
|
error_log="/var/log/$name/$name.err"
|
||||||
|
|
||||||
|
depend() {
|
||||||
|
need net
|
||||||
|
after firewall
|
||||||
|
use dns logger
|
||||||
|
}
|
||||||
|
|
||||||
|
start_pre() {
|
||||||
|
# Create log directory
|
||||||
|
checkpath -d -m 0755 -o "$command_user" /var/log/$name
|
||||||
|
|
||||||
|
# Create runtime directory
|
||||||
|
checkpath -d -m 0755 -o "$command_user" /run
|
||||||
|
}
|
||||||
|
|
||||||
|
stop() {
|
||||||
|
ebegin "Stopping $name"
|
||||||
|
start-stop-daemon --stop --pidfile "$pidfile" --retry TERM/5/KILL/3
|
||||||
|
eend $?
|
||||||
|
}
|
||||||
118
deploy/alpine/test-alpine.sh
Executable file
118
deploy/alpine/test-alpine.sh
Executable file
@@ -0,0 +1,118 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
# Test Alpine Linux packaging for btest-rs
|
||||||
|
# Runs inside an Alpine Docker container to build and verify the APK.
|
||||||
|
#
|
||||||
|
# Usage (from repository root):
|
||||||
|
# docker run --rm -v "$PWD":/src alpine:latest /src/deploy/alpine/test-alpine.sh
|
||||||
|
#
|
||||||
|
set -eu
|
||||||
|
|
||||||
|
ALPINE_DIR="/src/deploy/alpine"
|
||||||
|
|
||||||
|
echo "=== Alpine APK packaging test ==="
|
||||||
|
echo "Alpine version: $(cat /etc/alpine-release)"
|
||||||
|
|
||||||
|
# ── Install build dependencies ──────────────────────────────────────
|
||||||
|
echo "--- Installing build dependencies ---"
|
||||||
|
apk update
|
||||||
|
apk add --no-cache \
|
||||||
|
alpine-sdk \
|
||||||
|
rust \
|
||||||
|
cargo \
|
||||||
|
sudo
|
||||||
|
|
||||||
|
# ── Create a non-root build user (abuild refuses to run as root) ──
|
||||||
|
echo "--- Setting up build user ---"
|
||||||
|
adduser -D builder
|
||||||
|
addgroup builder abuild
|
||||||
|
echo "builder ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers
|
||||||
|
|
||||||
|
# ── Prepare build tree ──────────────────────────────────────────────
|
||||||
|
echo "--- Preparing build tree ---"
|
||||||
|
BUILD_DIR="/home/builder/btest-rs"
|
||||||
|
mkdir -p "$BUILD_DIR"
|
||||||
|
cp "$ALPINE_DIR/APKBUILD" "$BUILD_DIR/"
|
||||||
|
cp "$ALPINE_DIR/btest.initd" "$BUILD_DIR/"
|
||||||
|
|
||||||
|
# Generate signing key (required by abuild)
|
||||||
|
su builder -c "abuild-keygen -a -n -q"
|
||||||
|
sudo cp /home/builder/.abuild/*.rsa.pub /etc/apk/keys/
|
||||||
|
|
||||||
|
# ── Build the package ──────────────────────────────────────────────
|
||||||
|
echo "--- Building APK ---"
|
||||||
|
cd "$BUILD_DIR"
|
||||||
|
chown -R builder:builder "$BUILD_DIR"
|
||||||
|
su builder -c "abuild -r"
|
||||||
|
|
||||||
|
echo "--- Build succeeded ---"
|
||||||
|
|
||||||
|
# ── Locate and install the package ──────────────────────────────────
|
||||||
|
echo "--- Installing built APK ---"
|
||||||
|
APK_FILE=$(find /home/builder/packages -name "btest-rs-*.apk" -not -name "*doc*" | head -1)
|
||||||
|
if [ -z "$APK_FILE" ]; then
|
||||||
|
echo "FAIL: APK file not found"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
echo "Found APK: $APK_FILE"
|
||||||
|
apk add --allow-untrusted "$APK_FILE"
|
||||||
|
|
||||||
|
# ── Verify installation ────────────────────────────────────────────
|
||||||
|
echo "--- Verifying installation ---"
|
||||||
|
FAIL=0
|
||||||
|
|
||||||
|
# Binary exists and is executable
|
||||||
|
if command -v btest >/dev/null 2>&1; then
|
||||||
|
echo "PASS: btest binary installed"
|
||||||
|
else
|
||||||
|
echo "FAIL: btest binary not found in PATH"
|
||||||
|
FAIL=1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Binary runs (show version / help)
|
||||||
|
if btest --help >/dev/null 2>&1; then
|
||||||
|
echo "PASS: btest --help exits successfully"
|
||||||
|
else
|
||||||
|
echo "FAIL: btest --help failed"
|
||||||
|
FAIL=1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Man page installed
|
||||||
|
if [ -f /usr/share/man/man1/btest.1 ]; then
|
||||||
|
echo "PASS: man page installed"
|
||||||
|
else
|
||||||
|
echo "FAIL: man page not found"
|
||||||
|
FAIL=1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# License installed
|
||||||
|
if [ -f /usr/share/licenses/btest-rs/LICENSE ]; then
|
||||||
|
echo "PASS: LICENSE installed"
|
||||||
|
else
|
||||||
|
echo "FAIL: LICENSE not found"
|
||||||
|
FAIL=1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# OpenRC init script installed
|
||||||
|
if [ -f /etc/init.d/btest ]; then
|
||||||
|
echo "PASS: OpenRC init script installed"
|
||||||
|
else
|
||||||
|
echo "FAIL: OpenRC init script not found"
|
||||||
|
FAIL=1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Init script is executable
|
||||||
|
if [ -x /etc/init.d/btest ]; then
|
||||||
|
echo "PASS: init script is executable"
|
||||||
|
else
|
||||||
|
echo "FAIL: init script is not executable"
|
||||||
|
FAIL=1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# ── Summary ─────────────────────────────────────────────────────────
|
||||||
|
echo ""
|
||||||
|
if [ "$FAIL" -eq 0 ]; then
|
||||||
|
echo "=== All Alpine packaging tests PASSED ==="
|
||||||
|
else
|
||||||
|
echo "=== Some Alpine packaging tests FAILED ==="
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
15
deploy/aur/.SRCINFO
Normal file
15
deploy/aur/.SRCINFO
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
pkgbase = btest-rs
|
||||||
|
pkgdesc = MikroTik Bandwidth Test (btest) server and client with EC-SRP5 auth
|
||||||
|
pkgver = 0.6.0
|
||||||
|
pkgrel = 1
|
||||||
|
url = https://github.com/manawenuz/btest-rs
|
||||||
|
arch = x86_64
|
||||||
|
arch = aarch64
|
||||||
|
arch = armv7h
|
||||||
|
license = MIT
|
||||||
|
license = Apache-2.0
|
||||||
|
makedepends = cargo
|
||||||
|
source = btest-rs-0.6.0.tar.gz::https://github.com/manawenuz/btest-rs/archive/refs/tags/v0.6.0.tar.gz
|
||||||
|
sha256sums = SKIP
|
||||||
|
|
||||||
|
pkgname = btest-rs
|
||||||
58
deploy/aur/PKGBUILD
Normal file
58
deploy/aur/PKGBUILD
Normal file
@@ -0,0 +1,58 @@
|
|||||||
|
# Maintainer: Siavash Sameni <manwe at manko dot yoga>
|
||||||
|
pkgname=btest-rs
|
||||||
|
pkgver=0.6.0
|
||||||
|
pkgrel=1
|
||||||
|
pkgdesc="MikroTik Bandwidth Test (btest) server and client with EC-SRP5 auth"
|
||||||
|
arch=('x86_64' 'aarch64' 'armv7h')
|
||||||
|
url="https://github.com/manawenuz/btest-rs"
|
||||||
|
license=('MIT' 'Apache-2.0')
|
||||||
|
depends=()
|
||||||
|
makedepends=('cargo')
|
||||||
|
source=("$pkgname-$pkgver.tar.gz::https://github.com/manawenuz/btest-rs/archive/refs/tags/v$pkgver.tar.gz")
|
||||||
|
sha256sums=('SKIP')
|
||||||
|
|
||||||
|
prepare() {
|
||||||
|
cd "$pkgname-$pkgver"
|
||||||
|
export RUSTUP_TOOLCHAIN=stable
|
||||||
|
cargo fetch --locked --target "$(rustc -vV | sed -n 's/host: //p')"
|
||||||
|
}
|
||||||
|
|
||||||
|
build() {
|
||||||
|
cd "$pkgname-$pkgver"
|
||||||
|
export RUSTUP_TOOLCHAIN=stable
|
||||||
|
export CARGO_TARGET_DIR=target
|
||||||
|
cargo build --frozen --release
|
||||||
|
}
|
||||||
|
|
||||||
|
package() {
|
||||||
|
cd "$pkgname-$pkgver"
|
||||||
|
install -Dm755 "target/release/btest" "$pkgdir/usr/bin/btest"
|
||||||
|
install -Dm644 "LICENSE" "$pkgdir/usr/share/licenses/$pkgname/LICENSE"
|
||||||
|
install -Dm644 "docs/man/btest.1" "$pkgdir/usr/share/man/man1/btest.1"
|
||||||
|
install -Dm644 "README.md" "$pkgdir/usr/share/doc/$pkgname/README.md"
|
||||||
|
|
||||||
|
# systemd service
|
||||||
|
install -Dm644 /dev/stdin "$pkgdir/usr/lib/systemd/system/btest.service" <<EOF
|
||||||
|
[Unit]
|
||||||
|
Description=MikroTik Bandwidth Test Server (btest-rs)
|
||||||
|
After=network-online.target
|
||||||
|
Wants=network-online.target
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
Type=simple
|
||||||
|
ExecStart=/usr/bin/btest -s
|
||||||
|
Restart=always
|
||||||
|
RestartSec=5
|
||||||
|
DynamicUser=yes
|
||||||
|
NoNewPrivileges=yes
|
||||||
|
ProtectSystem=strict
|
||||||
|
ProtectHome=yes
|
||||||
|
PrivateTmp=yes
|
||||||
|
AmbientCapabilities=CAP_NET_BIND_SERVICE
|
||||||
|
CapabilityBoundingSet=CAP_NET_BIND_SERVICE
|
||||||
|
LimitNOFILE=65535
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=multi-user.target
|
||||||
|
EOF
|
||||||
|
}
|
||||||
40
deploy/aur/test-aur.sh
Executable file
40
deploy/aur/test-aur.sh
Executable file
@@ -0,0 +1,40 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
# Test the PKGBUILD in a Docker Arch Linux container.
|
||||||
|
# Usage: ./deploy/aur/test-aur.sh
|
||||||
|
set -euo pipefail
|
||||||
|
|
||||||
|
cd "$(dirname "$0")/../.."
|
||||||
|
|
||||||
|
echo "=== Testing AUR PKGBUILD in Arch Linux container ==="
|
||||||
|
|
||||||
|
docker run --rm -v "$(pwd):/src:ro" archlinux:latest bash -c '
|
||||||
|
set -euo pipefail
|
||||||
|
|
||||||
|
# Install base-devel and rust
|
||||||
|
pacman -Syu --noconfirm base-devel rustup git
|
||||||
|
rustup default stable
|
||||||
|
|
||||||
|
# Create build user (makepkg refuses to run as root)
|
||||||
|
useradd -m builder
|
||||||
|
echo "builder ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers
|
||||||
|
|
||||||
|
# Copy source and PKGBUILD
|
||||||
|
su builder -c "
|
||||||
|
mkdir -p /tmp/build && cd /tmp/build
|
||||||
|
cp /src/deploy/aur/PKGBUILD .
|
||||||
|
|
||||||
|
# Build the package
|
||||||
|
makepkg -si --noconfirm
|
||||||
|
|
||||||
|
# Verify
|
||||||
|
echo ''
|
||||||
|
echo '=== Installed ==='
|
||||||
|
btest --version
|
||||||
|
btest --help | head -5
|
||||||
|
echo ''
|
||||||
|
echo '=== Files ==='
|
||||||
|
pacman -Ql btest-rs
|
||||||
|
echo ''
|
||||||
|
echo '=== SUCCESS ==='
|
||||||
|
"
|
||||||
|
'
|
||||||
208
deploy/deb/build-deb.sh
Executable file
208
deploy/deb/build-deb.sh
Executable file
@@ -0,0 +1,208 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
# build-deb.sh -- Build a Debian/Ubuntu .deb package for btest-rs
|
||||||
|
#
|
||||||
|
# Usage:
|
||||||
|
# ./deploy/deb/build-deb.sh # uses dist/btest or target/release/btest
|
||||||
|
# BTEST_BIN=path/to/btest ./deploy/deb/build-deb.sh
|
||||||
|
#
|
||||||
|
# Requirements: dpkg-deb, gzip (standard on Debian/Ubuntu build hosts)
|
||||||
|
set -euo pipefail
|
||||||
|
|
||||||
|
###############################################################################
|
||||||
|
# Package metadata
|
||||||
|
###############################################################################
|
||||||
|
PKG_NAME="btest-rs"
|
||||||
|
PKG_VERSION="0.6.0"
|
||||||
|
PKG_ARCH="amd64"
|
||||||
|
PKG_MAINTAINER="Siavash Sameni <manwe@manko.yoga>"
|
||||||
|
PKG_DESCRIPTION="MikroTik Bandwidth Test (btest) server and client with EC-SRP5 auth"
|
||||||
|
PKG_HOMEPAGE="https://github.com/manawenuz/btest-rs"
|
||||||
|
PKG_LICENSE="MIT AND Apache-2.0"
|
||||||
|
PKG_SECTION="net"
|
||||||
|
PKG_PRIORITY="optional"
|
||||||
|
|
||||||
|
###############################################################################
|
||||||
|
# Paths
|
||||||
|
###############################################################################
|
||||||
|
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||||
|
REPO_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
|
||||||
|
|
||||||
|
# Locate the pre-built binary
|
||||||
|
if [[ -n "${BTEST_BIN:-}" ]]; then
|
||||||
|
: # caller provided an explicit path
|
||||||
|
elif [[ -f "$REPO_ROOT/dist/btest" ]]; then
|
||||||
|
BTEST_BIN="$REPO_ROOT/dist/btest"
|
||||||
|
elif [[ -f "$REPO_ROOT/target/release/btest" ]]; then
|
||||||
|
BTEST_BIN="$REPO_ROOT/target/release/btest"
|
||||||
|
else
|
||||||
|
echo "Error: cannot find btest binary."
|
||||||
|
echo " Build first (cargo build --release) or set BTEST_BIN=path/to/btest"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Verify the binary exists and is executable
|
||||||
|
if [[ ! -f "$BTEST_BIN" ]]; then
|
||||||
|
echo "Error: $BTEST_BIN does not exist."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "==> Using binary: $BTEST_BIN"
|
||||||
|
|
||||||
|
###############################################################################
|
||||||
|
# Prepare staging tree
|
||||||
|
###############################################################################
|
||||||
|
DEB_FILE="${PKG_NAME}_${PKG_VERSION}_${PKG_ARCH}.deb"
|
||||||
|
STAGE="$(mktemp -d)"
|
||||||
|
trap 'rm -rf "$STAGE"' EXIT
|
||||||
|
|
||||||
|
echo "==> Staging in $STAGE"
|
||||||
|
|
||||||
|
# Binary
|
||||||
|
install -Dm755 "$BTEST_BIN" "$STAGE/usr/bin/btest"
|
||||||
|
|
||||||
|
# Man page
|
||||||
|
if [[ -f "$REPO_ROOT/docs/man/btest.1" ]]; then
|
||||||
|
install -Dm644 "$REPO_ROOT/docs/man/btest.1" "$STAGE/usr/share/man/man1/btest.1"
|
||||||
|
gzip -9n "$STAGE/usr/share/man/man1/btest.1"
|
||||||
|
else
|
||||||
|
echo "Warning: docs/man/btest.1 not found -- skipping man page"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# systemd service unit
|
||||||
|
install -d "$STAGE/usr/lib/systemd/system"
|
||||||
|
cat > "$STAGE/usr/lib/systemd/system/btest.service" <<'UNIT'
|
||||||
|
[Unit]
|
||||||
|
Description=MikroTik Bandwidth Test Server (btest-rs)
|
||||||
|
After=network-online.target
|
||||||
|
Wants=network-online.target
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
Type=simple
|
||||||
|
ExecStart=/usr/bin/btest -s
|
||||||
|
Restart=always
|
||||||
|
RestartSec=5
|
||||||
|
DynamicUser=yes
|
||||||
|
NoNewPrivileges=yes
|
||||||
|
ProtectSystem=strict
|
||||||
|
ProtectHome=yes
|
||||||
|
PrivateTmp=yes
|
||||||
|
ProtectKernelTunables=yes
|
||||||
|
ProtectControlGroups=yes
|
||||||
|
AmbientCapabilities=CAP_NET_BIND_SERVICE
|
||||||
|
CapabilityBoundingSet=CAP_NET_BIND_SERVICE
|
||||||
|
LimitNOFILE=65535
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=multi-user.target
|
||||||
|
UNIT
|
||||||
|
|
||||||
|
# Documentation
|
||||||
|
install -Dm644 "$REPO_ROOT/README.md" "$STAGE/usr/share/doc/$PKG_NAME/README.md"
|
||||||
|
|
||||||
|
# License
|
||||||
|
install -Dm644 "$REPO_ROOT/LICENSE" "$STAGE/usr/share/licenses/$PKG_NAME/LICENSE"
|
||||||
|
|
||||||
|
# Debian copyright file (policy-compliant copy in /usr/share/doc)
|
||||||
|
install -d "$STAGE/usr/share/doc/$PKG_NAME"
|
||||||
|
cat > "$STAGE/usr/share/doc/$PKG_NAME/copyright" <<COPY
|
||||||
|
Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
|
||||||
|
Upstream-Name: $PKG_NAME
|
||||||
|
Upstream-Contact: $PKG_MAINTAINER
|
||||||
|
Source: $PKG_HOMEPAGE
|
||||||
|
|
||||||
|
Files: *
|
||||||
|
Copyright: 2024-2026 Siavash Sameni
|
||||||
|
License: MIT AND Apache-2.0
|
||||||
|
COPY
|
||||||
|
|
||||||
|
###############################################################################
|
||||||
|
# Calculate installed size (in KiB, as Debian policy requires)
|
||||||
|
###############################################################################
|
||||||
|
INSTALLED_SIZE=$(du -sk "$STAGE" | cut -f1)
|
||||||
|
|
||||||
|
###############################################################################
|
||||||
|
# DEBIAN/control
|
||||||
|
###############################################################################
|
||||||
|
install -d "$STAGE/DEBIAN"
|
||||||
|
cat > "$STAGE/DEBIAN/control" <<CTRL
|
||||||
|
Package: $PKG_NAME
|
||||||
|
Version: $PKG_VERSION
|
||||||
|
Architecture: $PKG_ARCH
|
||||||
|
Maintainer: $PKG_MAINTAINER
|
||||||
|
Installed-Size: $INSTALLED_SIZE
|
||||||
|
Section: $PKG_SECTION
|
||||||
|
Priority: $PKG_PRIORITY
|
||||||
|
Homepage: $PKG_HOMEPAGE
|
||||||
|
Description: $PKG_DESCRIPTION
|
||||||
|
A high-performance Rust implementation of the MikroTik Bandwidth Test
|
||||||
|
protocol, supporting both server and client modes with EC-SRP5
|
||||||
|
authentication. Supports TCP/UDP throughput testing and is fully
|
||||||
|
compatible with RouterOS btest clients.
|
||||||
|
CTRL
|
||||||
|
|
||||||
|
###############################################################################
|
||||||
|
# DEBIAN/conffiles (mark the systemd unit as a conffile)
|
||||||
|
###############################################################################
|
||||||
|
cat > "$STAGE/DEBIAN/conffiles" <<'CF'
|
||||||
|
/usr/lib/systemd/system/btest.service
|
||||||
|
CF
|
||||||
|
|
||||||
|
###############################################################################
|
||||||
|
# Maintainer scripts
|
||||||
|
###############################################################################
|
||||||
|
|
||||||
|
# postinst -- reload systemd after install
|
||||||
|
cat > "$STAGE/DEBIAN/postinst" <<'POST'
|
||||||
|
#!/bin/sh
|
||||||
|
set -e
|
||||||
|
if [ "$1" = "configure" ]; then
|
||||||
|
if command -v systemctl >/dev/null 2>&1; then
|
||||||
|
systemctl daemon-reload || true
|
||||||
|
echo ""
|
||||||
|
echo "btest-rs installed. To start the server:"
|
||||||
|
echo " sudo systemctl enable --now btest.service"
|
||||||
|
echo ""
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
POST
|
||||||
|
chmod 755 "$STAGE/DEBIAN/postinst"
|
||||||
|
|
||||||
|
# prerm -- stop service before removal
|
||||||
|
cat > "$STAGE/DEBIAN/prerm" <<'PRERM'
|
||||||
|
#!/bin/sh
|
||||||
|
set -e
|
||||||
|
if [ "$1" = "remove" ] || [ "$1" = "deconfigure" ]; then
|
||||||
|
if command -v systemctl >/dev/null 2>&1; then
|
||||||
|
systemctl stop btest.service 2>/dev/null || true
|
||||||
|
systemctl disable btest.service 2>/dev/null || true
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
PRERM
|
||||||
|
chmod 755 "$STAGE/DEBIAN/prerm"
|
||||||
|
|
||||||
|
# postrm -- clean up after removal
|
||||||
|
cat > "$STAGE/DEBIAN/postrm" <<'POSTRM'
|
||||||
|
#!/bin/sh
|
||||||
|
set -e
|
||||||
|
if [ "$1" = "purge" ] || [ "$1" = "remove" ]; then
|
||||||
|
if command -v systemctl >/dev/null 2>&1; then
|
||||||
|
systemctl daemon-reload || true
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
POSTRM
|
||||||
|
chmod 755 "$STAGE/DEBIAN/postrm"
|
||||||
|
|
||||||
|
###############################################################################
|
||||||
|
# Build .deb
|
||||||
|
###############################################################################
|
||||||
|
OUTPUT_DIR="${OUTPUT_DIR:-$REPO_ROOT/dist}"
|
||||||
|
mkdir -p "$OUTPUT_DIR"
|
||||||
|
|
||||||
|
echo "==> Building $DEB_FILE ..."
|
||||||
|
dpkg-deb --root-owner-group --build "$STAGE" "$OUTPUT_DIR/$DEB_FILE"
|
||||||
|
|
||||||
|
echo "==> Package ready: $OUTPUT_DIR/$DEB_FILE"
|
||||||
|
echo ""
|
||||||
|
dpkg-deb --info "$OUTPUT_DIR/$DEB_FILE"
|
||||||
|
echo ""
|
||||||
|
dpkg-deb --contents "$OUTPUT_DIR/$DEB_FILE"
|
||||||
104
deploy/deb/test-deb.sh
Executable file
104
deploy/deb/test-deb.sh
Executable file
@@ -0,0 +1,104 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
# test-deb.sh -- Smoke-test a btest-rs .deb inside an Ubuntu Docker container
|
||||||
|
#
|
||||||
|
# Usage:
|
||||||
|
# ./deploy/deb/test-deb.sh # auto-finds dist/*.deb
|
||||||
|
# ./deploy/deb/test-deb.sh path/to/btest-rs_*.deb
|
||||||
|
#
|
||||||
|
# Requirements: docker
|
||||||
|
set -euo pipefail
|
||||||
|
|
||||||
|
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||||
|
REPO_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
|
||||||
|
IMAGE="${TEST_IMAGE:-ubuntu:24.04}"
|
||||||
|
|
||||||
|
###############################################################################
|
||||||
|
# Locate the .deb
|
||||||
|
###############################################################################
|
||||||
|
if [[ -n "${1:-}" ]]; then
|
||||||
|
DEB_PATH="$1"
|
||||||
|
else
|
||||||
|
DEB_PATH="$(ls -1t "$REPO_ROOT"/dist/btest-rs_*.deb 2>/dev/null | head -1 || true)"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ -z "$DEB_PATH" || ! -f "$DEB_PATH" ]]; then
|
||||||
|
echo "Error: no .deb file found."
|
||||||
|
echo " Build first: ./deploy/deb/build-deb.sh"
|
||||||
|
echo " Or pass path: $0 path/to/btest-rs_*.deb"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
DEB_FILE="$(basename "$DEB_PATH")"
|
||||||
|
DEB_DIR="$(cd "$(dirname "$DEB_PATH")" && pwd)"
|
||||||
|
|
||||||
|
echo "==> Testing $DEB_FILE in $IMAGE"
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
###############################################################################
|
||||||
|
# Run tests inside a disposable container
|
||||||
|
###############################################################################
|
||||||
|
docker run --rm \
|
||||||
|
-v "$DEB_DIR/$DEB_FILE:/tmp/$DEB_FILE:ro" \
|
||||||
|
"$IMAGE" \
|
||||||
|
bash -euxc "
|
||||||
|
###################################################################
|
||||||
|
# 1. Install the .deb
|
||||||
|
###################################################################
|
||||||
|
apt-get update -qq
|
||||||
|
dpkg -i /tmp/$DEB_FILE || apt-get install -f -y # resolve deps if any
|
||||||
|
|
||||||
|
###################################################################
|
||||||
|
# 2. Verify files are in place
|
||||||
|
###################################################################
|
||||||
|
echo '--- Checking installed files ---'
|
||||||
|
test -x /usr/bin/btest
|
||||||
|
test -f /usr/lib/systemd/system/btest.service
|
||||||
|
test -f /usr/share/doc/btest-rs/README.md
|
||||||
|
test -f /usr/share/licenses/btest-rs/LICENSE
|
||||||
|
|
||||||
|
# Man page (may be gzipped)
|
||||||
|
test -f /usr/share/man/man1/btest.1.gz || test -f /usr/share/man/man1/btest.1
|
||||||
|
echo 'All expected files present.'
|
||||||
|
|
||||||
|
###################################################################
|
||||||
|
# 3. btest --version
|
||||||
|
###################################################################
|
||||||
|
echo ''
|
||||||
|
echo '--- btest --version ---'
|
||||||
|
btest --version
|
||||||
|
|
||||||
|
###################################################################
|
||||||
|
# 4. Quick loopback server+client test
|
||||||
|
###################################################################
|
||||||
|
echo ''
|
||||||
|
echo '--- Loopback smoke test ---'
|
||||||
|
|
||||||
|
# Start server in background
|
||||||
|
btest -s &
|
||||||
|
SERVER_PID=\$!
|
||||||
|
sleep 1
|
||||||
|
|
||||||
|
# Run a short TCP test against localhost
|
||||||
|
if btest -c 127.0.0.1 -d 2 2>&1; then
|
||||||
|
echo 'Loopback TCP test passed.'
|
||||||
|
else
|
||||||
|
echo 'Warning: loopback test returned non-zero (may be expected in container).'
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Tear down
|
||||||
|
kill \$SERVER_PID 2>/dev/null || true
|
||||||
|
wait \$SERVER_PID 2>/dev/null || true
|
||||||
|
|
||||||
|
###################################################################
|
||||||
|
# 5. Package metadata sanity
|
||||||
|
###################################################################
|
||||||
|
echo ''
|
||||||
|
echo '--- dpkg metadata ---'
|
||||||
|
dpkg -s btest-rs | head -20
|
||||||
|
|
||||||
|
echo ''
|
||||||
|
echo '=== All tests passed ==='
|
||||||
|
"
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
echo "==> .deb smoke test completed successfully."
|
||||||
57
deploy/openwrt/Makefile
Normal file
57
deploy/openwrt/Makefile
Normal file
@@ -0,0 +1,57 @@
|
|||||||
|
# OpenWrt package Makefile for btest-rs
|
||||||
|
#
|
||||||
|
# To build:
|
||||||
|
# 1. Clone the OpenWrt SDK for your target
|
||||||
|
# 2. Copy this directory to package/btest-rs/ in the SDK
|
||||||
|
# 3. Run: make package/btest-rs/compile V=s
|
||||||
|
#
|
||||||
|
# Or use the pre-built binary approach (see build-ipk.sh)
|
||||||
|
|
||||||
|
include $(TOPDIR)/rules.mk
|
||||||
|
|
||||||
|
PKG_NAME:=btest-rs
|
||||||
|
PKG_VERSION:=0.6.0
|
||||||
|
PKG_RELEASE:=1
|
||||||
|
|
||||||
|
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz
|
||||||
|
PKG_SOURCE_URL:=https://github.com/manawenuz/btest-rs/archive/refs/tags/v$(PKG_VERSION).tar.gz
|
||||||
|
PKG_HASH:=skip
|
||||||
|
|
||||||
|
PKG_BUILD_DEPENDS:=rust/host
|
||||||
|
PKG_BUILD_DIR:=$(BUILD_DIR)/$(PKG_NAME)-$(PKG_VERSION)
|
||||||
|
|
||||||
|
include $(INCLUDE_DIR)/package.mk
|
||||||
|
|
||||||
|
define Package/btest-rs
|
||||||
|
SECTION:=net
|
||||||
|
CATEGORY:=Network
|
||||||
|
TITLE:=MikroTik Bandwidth Test server and client
|
||||||
|
URL:=https://github.com/manawenuz/btest-rs
|
||||||
|
DEPENDS:=
|
||||||
|
PKGARCH:=$(ARCH)
|
||||||
|
endef
|
||||||
|
|
||||||
|
define Package/btest-rs/description
|
||||||
|
A Rust reimplementation of the MikroTik Bandwidth Test (btest) protocol.
|
||||||
|
Supports TCP/UDP, IPv4/IPv6, EC-SRP5 and MD5 authentication,
|
||||||
|
multi-connection, syslog, CSV output, and CPU monitoring.
|
||||||
|
endef
|
||||||
|
|
||||||
|
define Build/Compile
|
||||||
|
cd $(PKG_BUILD_DIR) && \
|
||||||
|
CARGO_TARGET_DIR=$(PKG_BUILD_DIR)/target \
|
||||||
|
cargo build --release --target $(RUSTC_TARGET)
|
||||||
|
endef
|
||||||
|
|
||||||
|
define Package/btest-rs/install
|
||||||
|
$(INSTALL_DIR) $(1)/usr/bin
|
||||||
|
$(INSTALL_BIN) $(PKG_BUILD_DIR)/target/$(RUSTC_TARGET)/release/btest $(1)/usr/bin/btest
|
||||||
|
|
||||||
|
$(INSTALL_DIR) $(1)/etc/init.d
|
||||||
|
$(INSTALL_BIN) ./files/btest.init $(1)/etc/init.d/btest
|
||||||
|
|
||||||
|
$(INSTALL_DIR) $(1)/etc/config
|
||||||
|
$(INSTALL_CONF) ./files/btest.config $(1)/etc/config/btest
|
||||||
|
endef
|
||||||
|
|
||||||
|
$(eval $(call BuildPackage,btest-rs))
|
||||||
117
deploy/openwrt/build-ipk.sh
Executable file
117
deploy/openwrt/build-ipk.sh
Executable file
@@ -0,0 +1,117 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
# Build an OpenWrt .ipk package from a pre-built static binary.
|
||||||
|
# No OpenWrt SDK needed — just packages the binary with metadata.
|
||||||
|
#
|
||||||
|
# Usage:
|
||||||
|
# ./deploy/openwrt/build-ipk.sh <arch> [binary-path]
|
||||||
|
#
|
||||||
|
# Examples:
|
||||||
|
# ./deploy/openwrt/build-ipk.sh x86_64 dist/btest # from cross-compiled binary
|
||||||
|
# ./deploy/openwrt/build-ipk.sh aarch64 dist/btest # for RPi/ARM64 routers
|
||||||
|
# ./deploy/openwrt/build-ipk.sh mipsel target/release/btest # for MIPS little-endian
|
||||||
|
#
|
||||||
|
# Supported architectures: x86_64, aarch64, arm_cortex-a7, mipsel_24kc, mips_24kc
|
||||||
|
set -euo pipefail
|
||||||
|
|
||||||
|
cd "$(dirname "$0")/../.."
|
||||||
|
|
||||||
|
ARCH="${1:?Usage: $0 <arch> [binary-path]}"
|
||||||
|
BINARY="${2:-dist/btest}"
|
||||||
|
VERSION="0.6.0"
|
||||||
|
PKG_NAME="btest-rs"
|
||||||
|
OUTPUT_DIR="dist"
|
||||||
|
|
||||||
|
if [ ! -f "$BINARY" ]; then
|
||||||
|
echo "Error: binary not found at $BINARY"
|
||||||
|
echo "Build it first: cargo build --release --target <target>"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
mkdir -p "$OUTPUT_DIR"
|
||||||
|
WORKDIR=$(mktemp -d)
|
||||||
|
trap "rm -rf $WORKDIR" EXIT
|
||||||
|
|
||||||
|
echo "=== Building ${PKG_NAME}_${VERSION}_${ARCH}.ipk ==="
|
||||||
|
|
||||||
|
# Create package structure
|
||||||
|
mkdir -p "$WORKDIR/data/usr/bin"
|
||||||
|
mkdir -p "$WORKDIR/data/etc/init.d"
|
||||||
|
mkdir -p "$WORKDIR/data/etc/config"
|
||||||
|
mkdir -p "$WORKDIR/control"
|
||||||
|
|
||||||
|
# Install files
|
||||||
|
cp "$BINARY" "$WORKDIR/data/usr/bin/btest"
|
||||||
|
chmod 755 "$WORKDIR/data/usr/bin/btest"
|
||||||
|
cp deploy/openwrt/files/btest.init "$WORKDIR/data/etc/init.d/btest"
|
||||||
|
chmod 755 "$WORKDIR/data/etc/init.d/btest"
|
||||||
|
cp deploy/openwrt/files/btest.config "$WORKDIR/data/etc/config/btest"
|
||||||
|
|
||||||
|
# Calculate installed size
|
||||||
|
INSTALLED_SIZE=$(du -sk "$WORKDIR/data" | awk '{print $1}')
|
||||||
|
|
||||||
|
# Control file
|
||||||
|
cat > "$WORKDIR/control/control" << EOF
|
||||||
|
Package: ${PKG_NAME}
|
||||||
|
Version: ${VERSION}-1
|
||||||
|
Depends: libc
|
||||||
|
Source: https://github.com/manawenuz/btest-rs
|
||||||
|
License: MIT AND Apache-2.0
|
||||||
|
Section: net
|
||||||
|
SourceName: ${PKG_NAME}
|
||||||
|
Maintainer: Siavash Sameni <manwe@manko.yoga>
|
||||||
|
Architecture: ${ARCH}
|
||||||
|
Installed-Size: ${INSTALLED_SIZE}
|
||||||
|
Description: MikroTik Bandwidth Test server and client
|
||||||
|
A Rust reimplementation of the MikroTik btest protocol.
|
||||||
|
Supports TCP/UDP, EC-SRP5 and MD5 auth, IPv4/IPv6.
|
||||||
|
EOF
|
||||||
|
|
||||||
|
# Post-install script
|
||||||
|
cat > "$WORKDIR/control/postinst" << 'EOF'
|
||||||
|
#!/bin/sh
|
||||||
|
[ "${IPKG_NO_SCRIPT}" = "1" ] && exit 0
|
||||||
|
/etc/init.d/btest enable 2>/dev/null || true
|
||||||
|
exit 0
|
||||||
|
EOF
|
||||||
|
chmod 755 "$WORKDIR/control/postinst"
|
||||||
|
|
||||||
|
# Pre-remove script
|
||||||
|
cat > "$WORKDIR/control/prerm" << 'EOF'
|
||||||
|
#!/bin/sh
|
||||||
|
/etc/init.d/btest stop 2>/dev/null || true
|
||||||
|
/etc/init.d/btest disable 2>/dev/null || true
|
||||||
|
exit 0
|
||||||
|
EOF
|
||||||
|
chmod 755 "$WORKDIR/control/prerm"
|
||||||
|
|
||||||
|
# Conffiles
|
||||||
|
cat > "$WORKDIR/control/conffiles" << EOF
|
||||||
|
/etc/config/btest
|
||||||
|
EOF
|
||||||
|
|
||||||
|
# Build the .ipk (it's just a tar.gz of tar.gz's)
|
||||||
|
cd "$WORKDIR"
|
||||||
|
|
||||||
|
# Create data.tar.gz
|
||||||
|
(cd data && tar czf ../data.tar.gz .)
|
||||||
|
|
||||||
|
# Create control.tar.gz
|
||||||
|
(cd control && tar czf ../control.tar.gz .)
|
||||||
|
|
||||||
|
# Create debian-binary
|
||||||
|
echo "2.0" > debian-binary
|
||||||
|
|
||||||
|
# Package it all
|
||||||
|
tar czf "${PKG_NAME}_${VERSION}-1_${ARCH}.ipk" debian-binary control.tar.gz data.tar.gz
|
||||||
|
|
||||||
|
cd -
|
||||||
|
cp "$WORKDIR/${PKG_NAME}_${VERSION}-1_${ARCH}.ipk" "$OUTPUT_DIR/"
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
echo "Package: $OUTPUT_DIR/${PKG_NAME}_${VERSION}-1_${ARCH}.ipk"
|
||||||
|
ls -lh "$OUTPUT_DIR/${PKG_NAME}_${VERSION}-1_${ARCH}.ipk"
|
||||||
|
echo ""
|
||||||
|
echo "Install on OpenWrt:"
|
||||||
|
echo " scp $OUTPUT_DIR/${PKG_NAME}_${VERSION}-1_${ARCH}.ipk root@router:/tmp/"
|
||||||
|
echo " ssh root@router 'opkg install /tmp/${PKG_NAME}_${VERSION}-1_${ARCH}.ipk'"
|
||||||
|
echo " ssh root@router '/etc/init.d/btest enable && /etc/init.d/btest start'"
|
||||||
7
deploy/openwrt/files/btest.config
Normal file
7
deploy/openwrt/files/btest.config
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
config server
|
||||||
|
option enabled '0'
|
||||||
|
option port '2000'
|
||||||
|
option auth_user ''
|
||||||
|
option auth_pass ''
|
||||||
|
option ecsrp5 '0'
|
||||||
|
option syslog ''
|
||||||
34
deploy/openwrt/files/btest.init
Executable file
34
deploy/openwrt/files/btest.init
Executable file
@@ -0,0 +1,34 @@
|
|||||||
|
#!/bin/sh /etc/rc.common
|
||||||
|
# btest-rs OpenWrt init script
|
||||||
|
|
||||||
|
START=90
|
||||||
|
STOP=10
|
||||||
|
|
||||||
|
USE_PROCD=1
|
||||||
|
|
||||||
|
start_service() {
|
||||||
|
local enabled port auth_user auth_pass ecsrp5 syslog
|
||||||
|
|
||||||
|
config_load btest
|
||||||
|
config_get_bool enabled server enabled 0
|
||||||
|
[ "$enabled" -eq 0 ] && return
|
||||||
|
|
||||||
|
config_get port server port 2000
|
||||||
|
config_get auth_user server auth_user ''
|
||||||
|
config_get auth_pass server auth_pass ''
|
||||||
|
config_get_bool ecsrp5 server ecsrp5 0
|
||||||
|
config_get syslog server syslog ''
|
||||||
|
|
||||||
|
procd_open_instance
|
||||||
|
procd_set_param command /usr/bin/btest -s -P "$port"
|
||||||
|
|
||||||
|
[ -n "$auth_user" ] && procd_append_param command -a "$auth_user"
|
||||||
|
[ -n "$auth_pass" ] && procd_append_param command -p "$auth_pass"
|
||||||
|
[ "$ecsrp5" -eq 1 ] && procd_append_param command --ecsrp5
|
||||||
|
[ -n "$syslog" ] && procd_append_param command --syslog "$syslog"
|
||||||
|
|
||||||
|
procd_set_param respawn
|
||||||
|
procd_set_param stdout 1
|
||||||
|
procd_set_param stderr 1
|
||||||
|
procd_close_instance
|
||||||
|
}
|
||||||
73
deploy/rpm/btest-rs.spec
Normal file
73
deploy/rpm/btest-rs.spec
Normal file
@@ -0,0 +1,73 @@
|
|||||||
|
Name: btest-rs
|
||||||
|
Version: 0.6.0
|
||||||
|
Release: 1%{?dist}
|
||||||
|
Summary: MikroTik Bandwidth Test (btest) server and client with EC-SRP5 auth
|
||||||
|
|
||||||
|
License: MIT AND Apache-2.0
|
||||||
|
URL: https://github.com/manawenuz/btest-rs
|
||||||
|
Source0: https://github.com/manawenuz/btest-rs/archive/refs/tags/v%{version}.tar.gz
|
||||||
|
|
||||||
|
BuildRequires: cargo
|
||||||
|
BuildRequires: rust
|
||||||
|
ExclusiveArch: x86_64 aarch64
|
||||||
|
|
||||||
|
%description
|
||||||
|
A Rust reimplementation of the MikroTik Bandwidth Test (btest) protocol,
|
||||||
|
providing both server and client functionality with EC-SRP5 authentication.
|
||||||
|
|
||||||
|
%prep
|
||||||
|
%autosetup -n %{name}-%{version}
|
||||||
|
|
||||||
|
%build
|
||||||
|
export CARGO_TARGET_DIR=target
|
||||||
|
cargo build --release
|
||||||
|
|
||||||
|
%install
|
||||||
|
install -Dm755 target/release/btest %{buildroot}%{_bindir}/btest
|
||||||
|
install -Dm644 docs/man/btest.1 %{buildroot}%{_mandir}/man1/btest.1
|
||||||
|
install -Dm644 LICENSE %{buildroot}%{_datadir}/licenses/%{name}/LICENSE
|
||||||
|
|
||||||
|
# systemd service unit
|
||||||
|
install -d %{buildroot}%{_unitdir}
|
||||||
|
cat > %{buildroot}%{_unitdir}/btest.service << 'EOF'
|
||||||
|
[Unit]
|
||||||
|
Description=MikroTik Bandwidth Test Server (btest-rs)
|
||||||
|
After=network-online.target
|
||||||
|
Wants=network-online.target
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
Type=simple
|
||||||
|
ExecStart=/usr/bin/btest -s
|
||||||
|
Restart=always
|
||||||
|
RestartSec=5
|
||||||
|
DynamicUser=yes
|
||||||
|
NoNewPrivileges=yes
|
||||||
|
ProtectSystem=strict
|
||||||
|
ProtectHome=yes
|
||||||
|
PrivateTmp=yes
|
||||||
|
AmbientCapabilities=CAP_NET_BIND_SERVICE
|
||||||
|
CapabilityBoundingSet=CAP_NET_BIND_SERVICE
|
||||||
|
LimitNOFILE=65535
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=multi-user.target
|
||||||
|
EOF
|
||||||
|
|
||||||
|
%files
|
||||||
|
%license LICENSE
|
||||||
|
%{_bindir}/btest
|
||||||
|
%{_mandir}/man1/btest.1*
|
||||||
|
%{_unitdir}/btest.service
|
||||||
|
|
||||||
|
%post
|
||||||
|
%systemd_post btest.service
|
||||||
|
|
||||||
|
%preun
|
||||||
|
%systemd_preun btest.service
|
||||||
|
|
||||||
|
%postun
|
||||||
|
%systemd_postun_with_restart btest.service
|
||||||
|
|
||||||
|
%changelog
|
||||||
|
* Mon Mar 30 2026 Siavash Sameni <manwe@manko.yoga> - 0.6.0-1
|
||||||
|
- Initial RPM package
|
||||||
30
deploy/rpm/build-rpm.sh
Executable file
30
deploy/rpm/build-rpm.sh
Executable file
@@ -0,0 +1,30 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
# build-rpm.sh — Build the btest-rs RPM package
|
||||||
|
set -euo pipefail
|
||||||
|
|
||||||
|
SPEC_DIR="$(cd "$(dirname "$0")" && pwd)"
|
||||||
|
SPEC_FILE="${SPEC_DIR}/btest-rs.spec"
|
||||||
|
VERSION="0.6.0"
|
||||||
|
TARBALL="v${VERSION}.tar.gz"
|
||||||
|
SOURCE_URL="https://github.com/manawenuz/btest-rs/archive/refs/tags/${TARBALL}"
|
||||||
|
|
||||||
|
echo "==> Setting up rpmbuild tree"
|
||||||
|
mkdir -p ~/rpmbuild/{BUILD,RPMS,SOURCES,SPECS,SRPMS}
|
||||||
|
|
||||||
|
echo "==> Downloading source tarball"
|
||||||
|
if [ ! -f ~/rpmbuild/SOURCES/"${TARBALL}" ]; then
|
||||||
|
curl -fSL -o ~/rpmbuild/SOURCES/"${TARBALL}" "${SOURCE_URL}"
|
||||||
|
else
|
||||||
|
echo " (already present, skipping download)"
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "==> Copying spec file"
|
||||||
|
cp "${SPEC_FILE}" ~/rpmbuild/SPECS/btest-rs.spec
|
||||||
|
|
||||||
|
echo "==> Building RPM"
|
||||||
|
rpmbuild -ba ~/rpmbuild/SPECS/btest-rs.spec
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
echo "==> Build complete. Packages:"
|
||||||
|
find ~/rpmbuild/RPMS -name '*.rpm' -print
|
||||||
|
find ~/rpmbuild/SRPMS -name '*.rpm' -print
|
||||||
75
deploy/rpm/test-rpm.sh
Executable file
75
deploy/rpm/test-rpm.sh
Executable file
@@ -0,0 +1,75 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
# test-rpm.sh — Test the btest-rs RPM build inside a Fedora container
|
||||||
|
set -euo pipefail
|
||||||
|
|
||||||
|
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
|
||||||
|
REPO_ROOT="$(cd "${SCRIPT_DIR}/../.." && pwd)"
|
||||||
|
|
||||||
|
IMAGE="fedora:latest"
|
||||||
|
|
||||||
|
echo "==> Testing RPM build in ${IMAGE}"
|
||||||
|
docker run --rm \
|
||||||
|
-v "${REPO_ROOT}:/workspace:ro" \
|
||||||
|
"${IMAGE}" \
|
||||||
|
bash -euxc '
|
||||||
|
# ── Install build dependencies ──
|
||||||
|
dnf install -y rpm-build rpmdevtools curl gcc make \
|
||||||
|
systemd-rpm-macros
|
||||||
|
|
||||||
|
# Install Rust toolchain
|
||||||
|
curl --proto "=https" --tlsv1.2 -sSf https://sh.rustup.rs \
|
||||||
|
| sh -s -- -y --profile minimal
|
||||||
|
source "$HOME/.cargo/env"
|
||||||
|
|
||||||
|
# ── Set up rpmbuild tree ──
|
||||||
|
rpmdev-setuptree
|
||||||
|
|
||||||
|
VERSION="0.6.0"
|
||||||
|
TARBALL="v${VERSION}.tar.gz"
|
||||||
|
|
||||||
|
# Copy spec
|
||||||
|
cp /workspace/deploy/rpm/btest-rs.spec ~/rpmbuild/SPECS/
|
||||||
|
|
||||||
|
# Create source tarball from workspace
|
||||||
|
# rpmbuild expects btest-rs-VERSION/ top-level directory
|
||||||
|
mkdir -p /tmp/btest-rs-${VERSION}
|
||||||
|
cp -a /workspace/. /tmp/btest-rs-${VERSION}/
|
||||||
|
tar czf ~/rpmbuild/SOURCES/${TARBALL} -C /tmp btest-rs-${VERSION}
|
||||||
|
|
||||||
|
# ── Build RPM ──
|
||||||
|
rpmbuild -ba ~/rpmbuild/SPECS/btest-rs.spec
|
||||||
|
|
||||||
|
# ── Install the RPM ──
|
||||||
|
RPM=$(find ~/rpmbuild/RPMS -name "btest-rs-*.rpm" | head -1)
|
||||||
|
echo "Installing: ${RPM}"
|
||||||
|
dnf install -y "${RPM}"
|
||||||
|
|
||||||
|
# ── Verify installation ──
|
||||||
|
echo "--- btest --version ---"
|
||||||
|
btest --version
|
||||||
|
|
||||||
|
echo "--- Checking systemd unit ---"
|
||||||
|
systemctl cat btest.service || true
|
||||||
|
|
||||||
|
echo "--- Checking man page ---"
|
||||||
|
test -f /usr/share/man/man1/btest.1* && echo "man page OK" || echo "man page MISSING"
|
||||||
|
|
||||||
|
echo "--- Checking license ---"
|
||||||
|
test -f /usr/share/licenses/btest-rs/LICENSE && echo "license OK" || echo "license MISSING"
|
||||||
|
|
||||||
|
# ── Loopback bandwidth test ──
|
||||||
|
echo "--- Starting loopback test ---"
|
||||||
|
btest -s &
|
||||||
|
SERVER_PID=$!
|
||||||
|
sleep 2
|
||||||
|
|
||||||
|
btest -c 127.0.0.1 --duration 3 && echo "Loopback test PASSED" \
|
||||||
|
|| echo "Loopback test FAILED (exit $?)"
|
||||||
|
|
||||||
|
kill "${SERVER_PID}" 2>/dev/null || true
|
||||||
|
wait "${SERVER_PID}" 2>/dev/null || true
|
||||||
|
|
||||||
|
echo "==> All RPM tests completed."
|
||||||
|
'
|
||||||
|
|
||||||
|
echo "==> Fedora container test finished."
|
||||||
@@ -1,7 +1,7 @@
|
|||||||
services:
|
services:
|
||||||
btest-server:
|
btest-server:
|
||||||
build: .
|
build: .
|
||||||
image: git.manko.yoga/manawenuz/btest-rs:latest
|
image: ghcr.io/manawenuz/btest-rs:latest
|
||||||
container_name: btest-server
|
container_name: btest-server
|
||||||
ports:
|
ports:
|
||||||
- "2000:2000/tcp"
|
- "2000:2000/tcp"
|
||||||
@@ -13,7 +13,7 @@ services:
|
|||||||
# Server with authentication enabled
|
# Server with authentication enabled
|
||||||
btest-server-auth:
|
btest-server-auth:
|
||||||
build: .
|
build: .
|
||||||
image: git.manko.yoga/manawenuz/btest-rs:latest
|
image: ghcr.io/manawenuz/btest-rs:latest
|
||||||
container_name: btest-server-auth
|
container_name: btest-server-auth
|
||||||
ports:
|
ports:
|
||||||
- "2010:2000/tcp"
|
- "2010:2000/tcp"
|
||||||
|
|||||||
@@ -2,282 +2,181 @@
|
|||||||
|
|
||||||
## Overview
|
## Overview
|
||||||
|
|
||||||
btest-rs is a Rust reimplementation of the MikroTik Bandwidth Test protocol. It operates in two modes: **server** (accepts connections from MikroTik devices) and **client** (connects to MikroTik btest servers).
|
btest-rs is a Rust reimplementation of the MikroTik Bandwidth Test protocol. It operates in two modes: **server** (accepts connections from MikroTik devices) and **client** (connects to MikroTik btest servers). An optional **server-pro** mode adds multi-user support, quotas, and a web dashboard.
|
||||||
|
|
||||||
## Module Structure
|
## Module Structure
|
||||||
|
|
||||||
```mermaid
|
```
|
||||||
graph TB
|
src/
|
||||||
main["main.rs<br/>CLI parsing (clap)"]
|
├── main.rs # CLI entry point, argument parsing (clap)
|
||||||
server["server.rs<br/>Server mode"]
|
├── lib.rs # Public API (re-exports all modules for tests/pro)
|
||||||
client["client.rs<br/>Client mode"]
|
├── protocol.rs # Wire format: Command, StatusMessage, constants
|
||||||
protocol["protocol.rs<br/>Wire protocol types"]
|
├── auth.rs # MD5 challenge-response authentication
|
||||||
auth["auth.rs<br/>MD5 authentication"]
|
├── ecsrp5.rs # EC-SRP5 authentication (Curve25519 Weierstrass)
|
||||||
ecsrp5["ecsrp5.rs<br/>EC-SRP5 authentication<br/>(Curve25519 Weierstrass)"]
|
├── server.rs # Server mode: listener, TCP/UDP handlers, multi-conn
|
||||||
bandwidth["bandwidth.rs<br/>Rate control & reporting"]
|
├── client.rs # Client mode: connector, TCP/UDP handlers, status parsing
|
||||||
csv_output["csv_output.rs<br/>CSV result logging"]
|
├── bandwidth.rs # Rate limiting, formatting, shared BandwidthState, byte budget
|
||||||
syslog["syslog_logger.rs<br/>Remote syslog (RFC 3164)"]
|
├── cpu.rs # CPU sampler (macOS, Linux, Android, Windows, FreeBSD)
|
||||||
lib["lib.rs<br/>Public API for tests"]
|
├── csv_output.rs # CSV result logging (append-mode, auto-header)
|
||||||
|
├── syslog_logger.rs # Remote syslog sender (RFC 3164 / BSD format)
|
||||||
|
├── bin/
|
||||||
|
│ ├── client_only.rs # Stripped client binary for embedded/OpenWrt
|
||||||
|
│ └── server_only.rs # Stripped server binary for embedded/OpenWrt
|
||||||
|
└── server_pro/ # Optional (--features pro)
|
||||||
|
├── main.rs # Pro CLI: user management, quota flags, web port
|
||||||
|
├── server_loop.rs # Accept loop with auth, quotas, multi-conn sessions
|
||||||
|
├── user_db.rs # SQLite: users, usage, ip_usage, sessions, intervals
|
||||||
|
├── quota.rs # QuotaManager: per-user + per-IP limits, remaining_budget()
|
||||||
|
├── enforcer.rs # QuotaEnforcer: periodic checks, max_duration, StopReason
|
||||||
|
├── ldap_auth.rs # LDAP auth scaffold (not yet wired)
|
||||||
|
└── web/
|
||||||
|
└── mod.rs # Axum web dashboard: Chart.js, quota bars, JSON export
|
||||||
|
```
|
||||||
|
|
||||||
main --> server
|
## CLI Output Format
|
||||||
main --> client
|
|
||||||
main --> bandwidth
|
The client outputs one line per second per direction:
|
||||||
main --> csv_output
|
|
||||||
main --> syslog
|
```
|
||||||
server --> protocol
|
[ 5] TX 285.47 Mbps (35684352 bytes) cpu: 20%/62%
|
||||||
server --> auth
|
[ 5] RX 283.64 Mbps (35454988 bytes) cpu: 20%/62% lost: 12
|
||||||
server --> ecsrp5
|
```
|
||||||
server --> bandwidth
|
|
||||||
server --> syslog
|
Format: `[interval] direction speed (bytes) cpu: local%/remote% [lost: N]`
|
||||||
client --> protocol
|
|
||||||
client --> auth
|
At test end, a summary line:
|
||||||
client --> ecsrp5
|
```
|
||||||
client --> bandwidth
|
TEST_END peer=172.16.81.1 proto=TCP dir=both duration=60s tx_avg=284.94Mbps rx_avg=272.83Mbps tx_bytes=2137030656 rx_bytes=2046260728 lost=0
|
||||||
lib --> server
|
|
||||||
lib --> client
|
|
||||||
lib --> protocol
|
|
||||||
lib --> auth
|
|
||||||
lib --> ecsrp5
|
|
||||||
lib --> bandwidth
|
|
||||||
```
|
```
|
||||||
|
|
||||||
## Data Flow
|
## Data Flow
|
||||||
|
|
||||||
### Server Mode (MikroTik connects to us)
|
### Server Mode (MikroTik connects to us)
|
||||||
|
|
||||||
```mermaid
|
|
||||||
sequenceDiagram
|
|
||||||
participant MK as MikroTik Client
|
|
||||||
participant TCP as TCP Control<br/>(port 2000)
|
|
||||||
participant SRV as btest-rs Server
|
|
||||||
participant UDP as UDP Data<br/>(port 2001+)
|
|
||||||
|
|
||||||
MK->>TCP: Connect
|
|
||||||
SRV->>TCP: HELLO [01 00 00 00]
|
|
||||||
MK->>TCP: Command [16 bytes]
|
|
||||||
Note over SRV: Parse proto, direction,<br/>tx_size, speeds
|
|
||||||
|
|
||||||
alt No auth configured
|
|
||||||
SRV->>TCP: AUTH_OK [01 00 00 00]
|
|
||||||
else MD5 auth (RouterOS < 6.43)
|
|
||||||
SRV->>TCP: AUTH_REQUIRED [02 00 00 00]
|
|
||||||
SRV->>TCP: Challenge [16 random bytes]
|
|
||||||
MK->>TCP: Response [16 hash + 32 username]
|
|
||||||
Note over SRV: Verify MD5(pass + MD5(pass + challenge))
|
|
||||||
SRV->>TCP: AUTH_OK or AUTH_FAILED
|
|
||||||
else EC-SRP5 auth (RouterOS >= 6.43, --ecsrp5 flag)
|
|
||||||
SRV->>TCP: EC-SRP5 [03 00 00 00]
|
|
||||||
MK->>TCP: [len][username\0][client_pubkey:32][parity:1]
|
|
||||||
SRV->>TCP: [len][server_pubkey:32][parity:1][salt:16]
|
|
||||||
MK->>TCP: [len][client_confirmation:32]
|
|
||||||
SRV->>TCP: [len][server_confirmation:32]
|
|
||||||
Note over SRV: Curve25519 Weierstrass EC-SRP5<br/>See docs/ecsrp5-research.md
|
|
||||||
SRV->>TCP: AUTH_OK [01 00 00 00]
|
|
||||||
end
|
|
||||||
|
|
||||||
alt TCP mode
|
|
||||||
Note over SRV,MK: Data flows on same TCP connection
|
|
||||||
loop Every second
|
|
||||||
SRV-->>SRV: Print bandwidth stats
|
|
||||||
end
|
|
||||||
else UDP mode
|
|
||||||
SRV->>TCP: UDP port [2 bytes BE]
|
|
||||||
Note over SRV: Bind UDP socket
|
|
||||||
par TX Thread (if server transmits)
|
|
||||||
loop Continuous
|
|
||||||
SRV->>UDP: Data packets [seq + payload]
|
|
||||||
end
|
|
||||||
and RX Thread (if server receives)
|
|
||||||
loop Continuous
|
|
||||||
UDP->>SRV: Data packets [seq + payload]
|
|
||||||
end
|
|
||||||
and Status Loop (TCP control)
|
|
||||||
loop Every 1 second
|
|
||||||
MK->>TCP: Status [12 bytes]
|
|
||||||
SRV->>TCP: Status [12 bytes]
|
|
||||||
Note over SRV: Adjust TX speed<br/>based on client feedback
|
|
||||||
end
|
|
||||||
end
|
|
||||||
end
|
|
||||||
```
|
```
|
||||||
|
MikroTik → TCP:2000 → HELLO → Command [16 bytes] → Auth → Data Transfer
|
||||||
|
```
|
||||||
|
|
||||||
|
1. Server sends HELLO `[01 00 00 00]`
|
||||||
|
2. Client sends 16-byte command (protocol, direction, tx_size, speeds, conn_count)
|
||||||
|
3. Auth: none (`01`), MD5 (`02`), or EC-SRP5 (`03`)
|
||||||
|
4. TCP: data flows on same connection, 12-byte status messages interleaved every 1s
|
||||||
|
5. UDP: server sends port number, data on UDP, status exchange stays on TCP
|
||||||
|
|
||||||
### Client Mode (we connect to MikroTik)
|
### Client Mode (we connect to MikroTik)
|
||||||
|
|
||||||
```mermaid
|
1. Connect to MikroTik:2000
|
||||||
sequenceDiagram
|
2. Read HELLO, send command
|
||||||
participant CLI as btest-rs Client
|
3. Auto-detect auth type from response byte, authenticate
|
||||||
participant TCP as TCP Control
|
4. Start data transfer with status exchange
|
||||||
participant MK as MikroTik Server
|
|
||||||
|
|
||||||
CLI->>TCP: Connect to MikroTik:2000
|
### Status Message Format (12 bytes)
|
||||||
MK->>TCP: HELLO
|
|
||||||
CLI->>TCP: Command [16 bytes]
|
|
||||||
Note over CLI: direction bits tell server<br/>what to do (TX/RX/BOTH)
|
|
||||||
|
|
||||||
alt Auth response 01 (no auth)
|
|
||||||
Note over CLI: No auth, proceed
|
|
||||||
else Auth response 02 (MD5)
|
|
||||||
MK->>TCP: Challenge [16 random bytes]
|
|
||||||
CLI->>TCP: MD5 response [48 bytes]
|
|
||||||
MK->>TCP: AUTH_OK
|
|
||||||
else Auth response 03 (EC-SRP5)
|
|
||||||
CLI->>TCP: [len][username\0][client_pubkey:32][parity:1]
|
|
||||||
MK->>TCP: [len][server_pubkey:32][parity:1][salt:16]
|
|
||||||
CLI->>TCP: [len][client_confirmation:32]
|
|
||||||
MK->>TCP: [len][server_confirmation:32]
|
|
||||||
MK->>TCP: AUTH_OK
|
|
||||||
end
|
|
||||||
|
|
||||||
Note over CLI,MK: Data transfer begins<br/>(TCP or UDP, same as server)
|
|
||||||
```
|
```
|
||||||
|
[0x07][cpu:1][pad:2][seq:4 LE][bytes_received:4 LE]
|
||||||
|
```
|
||||||
|
|
||||||
|
- Byte 0: `0x07` (STATUS_MSG_TYPE)
|
||||||
|
- Byte 1: `0x80 | cpu_percentage` (MikroTik encoding)
|
||||||
|
- Bytes 4-7: sequence number (little-endian u32)
|
||||||
|
- Bytes 8-11: bytes received this interval (little-endian u32)
|
||||||
|
|
||||||
## Threading Model
|
## Threading Model
|
||||||
|
|
||||||
```mermaid
|
All I/O is async via tokio. Per-client:
|
||||||
graph TB
|
- **TX task**: sends data packets at target rate
|
||||||
subgraph "Server Process"
|
- **RX task**: receives data, counts bytes, extracts status messages (TCP BOTH mode)
|
||||||
LISTEN["Main Loop<br/>Accept connections"]
|
- **Status loop**: exchanges 12-byte status messages every 1s, prints bandwidth
|
||||||
LISTEN -->|spawn per client| HANDLER
|
- **Status reader** (TCP TX-only): reads server's status messages for remote CPU
|
||||||
|
|
||||||
subgraph "Per-Client Tasks (tokio)"
|
Shared state via `Arc<BandwidthState>` with atomic counters — no mutexes.
|
||||||
HANDLER["Connection Handler<br/>Handshake + Auth"]
|
|
||||||
HANDLER --> TX["TX Task<br/>Send data packets"]
|
|
||||||
HANDLER --> RX["RX Task<br/>Receive data packets"]
|
|
||||||
HANDLER --> STATUS["Status Loop<br/>Exchange stats every 1s"]
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
subgraph "Shared State (Arc + Atomics)"
|
### BandwidthState Fields
|
||||||
STATE["BandwidthState"]
|
|
||||||
TX_BYTES["tx_bytes: AtomicU64"]
|
| Field | Type | Purpose |
|
||||||
RX_BYTES["rx_bytes: AtomicU64"]
|
|-------|------|---------|
|
||||||
TX_SPEED["tx_speed: AtomicU32"]
|
| `tx_bytes` | AtomicU64 | Bytes sent this interval (reset by swap) |
|
||||||
RUNNING["running: AtomicBool"]
|
| `rx_bytes` | AtomicU64 | Bytes received this interval |
|
||||||
end
|
| `tx_speed` | AtomicU32 | Target TX speed (dynamic, from server feedback) |
|
||||||
|
| `running` | AtomicBool | Test active flag |
|
||||||
|
| `remote_cpu` | AtomicU8 | Remote peer's CPU (from status messages) |
|
||||||
|
| `byte_budget` | AtomicU64 | Remaining quota bytes (u64::MAX = unlimited) |
|
||||||
|
| `total_tx_bytes` | AtomicU64 | Cumulative TX (never reset) |
|
||||||
|
| `total_rx_bytes` | AtomicU64 | Cumulative RX (never reset) |
|
||||||
|
|
||||||
|
## Server Pro Architecture
|
||||||
|
|
||||||
|
Optional feature (`--features pro`) providing a multi-user public btest server.
|
||||||
|
|
||||||
TX --> TX_BYTES
|
|
||||||
RX --> RX_BYTES
|
|
||||||
STATUS --> TX_BYTES
|
|
||||||
STATUS --> RX_BYTES
|
|
||||||
STATUS --> TX_SPEED
|
|
||||||
TX --> TX_SPEED
|
|
||||||
TX --> RUNNING
|
|
||||||
RX --> RUNNING
|
|
||||||
STATUS --> RUNNING
|
|
||||||
```
|
```
|
||||||
|
Accept → IP check → HELLO → Command → Auth (DB) → Quota check → Budget set → Test
|
||||||
|
↓
|
||||||
|
QuotaEnforcer (parallel)
|
||||||
|
- checks every N seconds
|
||||||
|
- max_duration timeout
|
||||||
|
- sets running=false on exceed
|
||||||
|
```
|
||||||
|
|
||||||
|
**Byte budget**: Before the test starts, `remaining_budget()` computes the minimum remaining quota across all applicable limits. This is stored in `BandwidthState.byte_budget`. Every TX/RX loop checks `spend_budget()` per-packet — when budget hits 0, the test stops immediately. This prevents quota overshoot even on 10+ Gbps links.
|
||||||
|
|
||||||
|
**Multi-connection TCP**: MikroTik sends `tcp_conn_count` connections. The first authenticates and registers a session token. Subsequent connections match by token and join. When all connections arrive, the test starts with per-stream TX/RX tasks.
|
||||||
|
|
||||||
|
**Web dashboard** (axum):
|
||||||
|
- `GET /` — landing page with instructions
|
||||||
|
- `GET /dashboard/{ip}` — per-IP dashboard with Chart.js graph, session table, quota bars
|
||||||
|
- `GET /api/ip/{ip}/stats` — aggregate stats JSON
|
||||||
|
- `GET /api/ip/{ip}/sessions` — session list JSON
|
||||||
|
- `GET /api/ip/{ip}/quota` — quota usage JSON
|
||||||
|
- `GET /api/ip/{ip}/export` — full export with human-readable fields
|
||||||
|
- `GET /api/session/{id}/intervals` — per-second throughput data
|
||||||
|
|
||||||
|
## CPU Usage Monitoring
|
||||||
|
|
||||||
|
A background OS thread samples system CPU every 1 second:
|
||||||
|
|
||||||
|
| Platform | Method |
|
||||||
|
|----------|--------|
|
||||||
|
| macOS | `host_statistics(HOST_CPU_LOAD_INFO)` |
|
||||||
|
| Linux | `/proc/stat` aggregate CPU line |
|
||||||
|
| Android | `/proc/stat` (same as Linux) |
|
||||||
|
| Windows | `GetSystemTimes()` FFI |
|
||||||
|
| FreeBSD | `sysctl kern.cp_time` |
|
||||||
|
|
||||||
|
Stored in global `AtomicU8`, included in status messages as `0x80 | percentage`.
|
||||||
|
|
||||||
|
## Build Targets
|
||||||
|
|
||||||
|
| Target | Binary | Notes |
|
||||||
|
|--------|--------|-------|
|
||||||
|
| `x86_64-unknown-linux-musl` | btest | Static, zero deps |
|
||||||
|
| `aarch64-unknown-linux-musl` | btest | RPi 4/5, ARM servers |
|
||||||
|
| `armv7-unknown-linux-musleabihf` | btest | RPi 3, OpenWrt |
|
||||||
|
| `x86_64-pc-windows-gnu` | btest.exe | Cross-compiled |
|
||||||
|
| `aarch64-linux-android` | btest | Termux ARMv8 |
|
||||||
|
| `armv7-linux-androideabi` | btest | Termux ARMv7 |
|
||||||
|
| macOS (native) | btest | Apple Silicon + Intel |
|
||||||
|
| Docker (multi-arch) | image | amd64 + arm64 |
|
||||||
|
|
||||||
## Key Design Decisions
|
## Key Design Decisions
|
||||||
|
|
||||||
### 1. Tokio async runtime
|
1. **Tokio async runtime** — all I/O is async, handles hundreds of concurrent connections
|
||||||
|
2. **Lock-free shared state** — AtomicU64 counters, `swap(0)` reads and resets per interval
|
||||||
|
3. **Direction bits from server perspective** — `0x01`=server RX, `0x02`=server TX, `0x03`=both
|
||||||
|
4. **TCP socket half keepalive** — dropping `OwnedWriteHalf` sends FIN, so unused halves are kept alive
|
||||||
|
5. **Static musl binary** — ~2 MB, zero runtime dependencies
|
||||||
|
6. **EC-SRP5 with big integer arithmetic** — Curve25519 Weierstrass form via `num-bigint`
|
||||||
|
7. **Global singletons for syslog/CSV** — `Mutex<Option<...>>` statics, initialized once at startup
|
||||||
|
8. **Shared BandwidthState for timeout survival** — state created in main(), survives tokio cancellation
|
||||||
|
9. **Inline byte budget** — per-packet quota check with fast path (u64::MAX = unlimited, returns immediately)
|
||||||
|
10. **TCP status message scanning** — RX loop detects 12-byte status messages in the data stream by scanning for `0x07` marker byte to extract remote CPU
|
||||||
|
|
||||||
All I/O is async via tokio. Each client connection spawns independent tasks for TX, RX, and status exchange. This allows handling hundreds of concurrent connections on a single thread pool.
|
## Tests
|
||||||
|
|
||||||
### 2. Lock-free shared state
|
| Suite | Count | What |
|
||||||
|
|-------|-------|------|
|
||||||
TX/RX threads and the status loop share bandwidth counters via `AtomicU64`. No mutexes needed -- `swap(0)` atomically reads and resets counters each interval.
|
| Unit tests (lib) | 12 | Bandwidth parsing, CPU sampling, auth hash vectors |
|
||||||
|
| Enforcer tests (pro) | 10 | Budget, quota, duration, flush |
|
||||||
### 3. Sequential status loop (matching C pselect)
|
| Integration tests | 8 | Server/client handshake, auth, TCP data |
|
||||||
|
| EC-SRP5 tests | 6 | Full auth flow, wrong password, UDP bidir |
|
||||||
The UDP status exchange uses a sequential timeout-read-then-send pattern rather than `tokio::select!`. This ensures our status messages are sent exactly every 1 second, preventing MikroTik's speed adaptation from seeing irregular feedback.
|
| Full integration | 23 | All protocols × directions, IPv4/6, CSV, syslog, CPU |
|
||||||
|
| **Total** | **59** | |
|
||||||
### 4. Direction bits from server perspective
|
|
||||||
|
|
||||||
The direction byte in the protocol means what the **server** should do:
|
|
||||||
- `0x01` (CMD_DIR_RX) = server receives
|
|
||||||
- `0x02` (CMD_DIR_TX) = server transmits
|
|
||||||
- `0x03` (CMD_DIR_BOTH) = bidirectional
|
|
||||||
|
|
||||||
The client inverts before sending: client "transmit" sends `CMD_DIR_RX` (telling server to receive).
|
|
||||||
|
|
||||||
### 5. TCP socket half keepalive
|
|
||||||
|
|
||||||
When only one direction is active (e.g., TX only), the unused socket half is kept alive. Dropping `OwnedWriteHalf` sends a TCP FIN, which MikroTik interprets as disconnection.
|
|
||||||
|
|
||||||
### 6. Static musl binary
|
|
||||||
|
|
||||||
Release builds use musl for a fully static binary with zero runtime dependencies. The binary is approximately 2 MB and runs on any Linux distribution.
|
|
||||||
|
|
||||||
### 7. EC-SRP5 with big integer arithmetic
|
|
||||||
|
|
||||||
The EC-SRP5 implementation uses `num-bigint` for Curve25519 Weierstrass-form elliptic curve arithmetic. MikroTik's authentication uses the Weierstrass form (not the more common Montgomery or Edwards forms), requiring direct field arithmetic over the prime `2^255 - 19`. The implementation includes point multiplication, `lift_x`, `redp1` (hash-to-curve), and Montgomery coordinate conversion.
|
|
||||||
|
|
||||||
### 8. Global singletons for syslog and CSV
|
|
||||||
|
|
||||||
The syslog and CSV modules use `Mutex<Option<...>>` global statics. This avoids threading state through every function call while remaining safe. Both modules are initialized once at startup and used from any async task via their public API functions.
|
|
||||||
|
|
||||||
### 9. Shared BandwidthState for client duration timeout
|
|
||||||
|
|
||||||
When running with `--duration`, the tokio timeout cancels the client future. To preserve stats accumulated during the test, `BandwidthState` is created in `main()` and passed as an `Arc` into `run_client()`. The state survives cancellation because `main()` holds a reference. The `record_interval()` method accumulates totals that `summary()` returns.
|
|
||||||
|
|
||||||
### 10. IPv6 socket handling
|
|
||||||
|
|
||||||
IPv6 requires special handling on macOS:
|
|
||||||
- UDP sockets bind to `[::]` for IPv6 peers, `0.0.0.0` for IPv4
|
|
||||||
- Socket send/receive buffers set to 4MB via `socket2` before wrapping with tokio
|
|
||||||
- `SocketAddr::new()` used instead of string formatting (avoids `[addr]:port` parsing issues)
|
|
||||||
- Connected sockets preferred for single-connection (avoids ENOBUFS on `send_to()`)
|
|
||||||
- NDP probe packet sent before data blast to populate neighbor cache
|
|
||||||
- Adaptive backoff on ENOBUFS (200μs→10ms, resets on success)
|
|
||||||
|
|
||||||
### 11. CPU usage monitoring
|
|
||||||
|
|
||||||
A background OS thread samples system CPU every 1 second via:
|
|
||||||
- **macOS:** `host_statistics(HOST_CPU_LOAD_INFO)` — returns user/system/idle/nice ticks
|
|
||||||
- **Linux:** `/proc/stat` — reads aggregate CPU line
|
|
||||||
|
|
||||||
The percentage is stored in a global `AtomicU8` and included in every status message at byte 1 using MikroTik's encoding: `0x80 | percentage`. On receive, the remote CPU is decoded with `byte & 0x7F` and capped at 100%. Both local and remote CPU are displayed per interval and logged to CSV/syslog.
|
|
||||||
|
|
||||||
## File Layout
|
|
||||||
|
|
||||||
```
|
|
||||||
btest-rs/
|
|
||||||
├── src/
|
|
||||||
│ ├── main.rs # CLI entry point, argument parsing (clap)
|
|
||||||
│ ├── lib.rs # Public API (used by integration tests)
|
|
||||||
│ ├── protocol.rs # Wire format: Command, StatusMessage, constants
|
|
||||||
│ ├── auth.rs # MD5 challenge-response authentication
|
|
||||||
│ ├── ecsrp5.rs # EC-SRP5 authentication (Curve25519 Weierstrass)
|
|
||||||
│ ├── server.rs # Server mode: listener, TCP/UDP handlers
|
|
||||||
│ ├── client.rs # Client mode: connector, TCP/UDP handlers
|
|
||||||
│ ├── bandwidth.rs # Rate limiting, formatting, shared state
|
|
||||||
│ ├── cpu.rs # CPU usage sampler (macOS + Linux)
|
|
||||||
│ ├── csv_output.rs # CSV result logging (append-mode, auto-header)
|
|
||||||
│ └── syslog_logger.rs # Remote syslog sender (RFC 3164 / BSD format)
|
|
||||||
├── tests/
|
|
||||||
│ └── integration_test.rs # End-to-end server/client tests
|
|
||||||
├── scripts/
|
|
||||||
│ ├── build-linux.sh # Cross-compile for x86_64 Linux (musl)
|
|
||||||
│ ├── build-macos-release.sh # macOS release build
|
|
||||||
│ ├── install-service.sh # systemd service installer
|
|
||||||
│ ├── push-docker.sh # Push Docker image to registry
|
|
||||||
│ ├── test-local.sh # Loopback self-test
|
|
||||||
│ ├── test-mikrotik.sh # Test against MikroTik device
|
|
||||||
│ ├── test-docker.sh # Docker container test
|
|
||||||
│ └── debug-capture.sh # Packet capture for debugging
|
|
||||||
├── docs/
|
|
||||||
│ ├── architecture.md # This file
|
|
||||||
│ ├── protocol.md # Protocol specification
|
|
||||||
│ ├── user-guide.md # Usage documentation
|
|
||||||
│ ├── docker.md # Docker & deployment guide
|
|
||||||
│ ├── ecsrp5-research.md # EC-SRP5 reverse-engineering notes
|
|
||||||
│ └── man/
|
|
||||||
│ └── btest.1 # Unix manual page (troff format)
|
|
||||||
├── tests/
|
|
||||||
│ ├── integration_test.rs # Basic server/client handshake tests
|
|
||||||
│ ├── ecsrp5_test.rs # EC-SRP5 authentication tests
|
|
||||||
│ └── full_integration_test.rs # Comprehensive: all protocols, IPv4/6, CSV, syslog
|
|
||||||
├── deploy/
|
|
||||||
│ └── syslog-ng-btest.conf # syslog-ng configuration for btest events
|
|
||||||
├── proto-test/ # Python EC-SRP5 prototype (research branch)
|
|
||||||
│ ├── btest_ecsrp5_client.py # Working Python btest EC-SRP5 client
|
|
||||||
│ ├── btest_mitm.py # MITM proxy for protocol analysis
|
|
||||||
│ └── elliptic_curves.py # Curve25519 Weierstrass (MarginResearch)
|
|
||||||
├── KNOWN_ISSUES.md # Known bugs and platform limitations
|
|
||||||
├── Dockerfile # Production Docker image (multi-stage)
|
|
||||||
├── Dockerfile.cross # Cross-compilation for Linux x86_64
|
|
||||||
├── docker-compose.yml # Docker Compose configuration
|
|
||||||
├── Cargo.toml # Rust package manifest
|
|
||||||
├── Cargo.lock # Dependency lock file
|
|
||||||
├── LICENSE # MIT License
|
|
||||||
└── btest-opensource/ # Original C implementation (git submodule)
|
|
||||||
```
|
|
||||||
|
|||||||
@@ -1,11 +1,12 @@
|
|||||||
# Docker and Deployment Guide
|
# Docker and Deployment Guide
|
||||||
|
|
||||||
## Container Registry
|
## Container Registries
|
||||||
|
|
||||||
Images are published to:
|
Images are published to:
|
||||||
|
|
||||||
```
|
```
|
||||||
git.manko.yoga/manawenuz/btest-rs
|
git.manko.yoga/manawenuz/btest-rs # Gitea registry
|
||||||
|
ghcr.io/manawenuz/btest-rs # GitHub Container Registry
|
||||||
```
|
```
|
||||||
|
|
||||||
## Quick Start
|
## Quick Start
|
||||||
@@ -87,14 +88,14 @@ docker run --rm -it btest-rs -c 192.168.88.1 -r -a admin -p password
|
|||||||
|
|
||||||
```bash
|
```bash
|
||||||
# Pull from Gitea registry
|
# Pull from Gitea registry
|
||||||
docker pull git.manko.yoga/manawenuz/btest-rs:latest
|
docker pull ghcr.io/manawenuz/btest-rs:latest
|
||||||
|
|
||||||
# Run server
|
# Run server
|
||||||
docker run --rm -it \
|
docker run --rm -it \
|
||||||
-p 2000:2000/tcp \
|
-p 2000:2000/tcp \
|
||||||
-p 2001-2100:2001-2100/udp \
|
-p 2001-2100:2001-2100/udp \
|
||||||
-p 2257-2356:2257-2356/udp \
|
-p 2257-2356:2257-2356/udp \
|
||||||
git.manko.yoga/manawenuz/btest-rs:latest -s -v
|
ghcr.io/manawenuz/btest-rs:latest -s -v
|
||||||
```
|
```
|
||||||
|
|
||||||
## Docker Compose
|
## Docker Compose
|
||||||
@@ -185,7 +186,7 @@ docker build -t btest-rs .
|
|||||||
|
|
||||||
# With custom tag
|
# With custom tag
|
||||||
docker build -t git.manko.yoga/manawenuz/btest-rs:latest .
|
docker build -t git.manko.yoga/manawenuz/btest-rs:latest .
|
||||||
docker build -t git.manko.yoga/manawenuz/btest-rs:0.5.0 .
|
docker build -t git.manko.yoga/manawenuz/btest-rs:0.6.0 .
|
||||||
```
|
```
|
||||||
|
|
||||||
### Multi-platform build
|
### Multi-platform build
|
||||||
@@ -193,7 +194,7 @@ docker build -t git.manko.yoga/manawenuz/btest-rs:0.5.0 .
|
|||||||
```bash
|
```bash
|
||||||
docker buildx build \
|
docker buildx build \
|
||||||
--platform linux/amd64,linux/arm64 \
|
--platform linux/amd64,linux/arm64 \
|
||||||
-t git.manko.yoga/manawenuz/btest-rs:latest \
|
-t ghcr.io/manawenuz/btest-rs:latest \
|
||||||
--push .
|
--push .
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -208,9 +209,9 @@ docker build -t git.manko.yoga/manawenuz/btest-rs:latest .
|
|||||||
docker push git.manko.yoga/manawenuz/btest-rs:latest
|
docker push git.manko.yoga/manawenuz/btest-rs:latest
|
||||||
|
|
||||||
# Also tag with version
|
# Also tag with version
|
||||||
docker tag git.manko.yoga/manawenuz/btest-rs:latest \
|
docker tag ghcr.io/manawenuz/btest-rs:latest \
|
||||||
git.manko.yoga/manawenuz/btest-rs:0.5.0
|
git.manko.yoga/manawenuz/btest-rs:0.6.0
|
||||||
docker push git.manko.yoga/manawenuz/btest-rs:0.5.0
|
docker push git.manko.yoga/manawenuz/btest-rs:0.6.0
|
||||||
```
|
```
|
||||||
|
|
||||||
## Deployment Options
|
## Deployment Options
|
||||||
@@ -223,7 +224,7 @@ docker run -d --name btest-server \
|
|||||||
-p 2000:2000/tcp \
|
-p 2000:2000/tcp \
|
||||||
-p 2001-2100:2001-2100/udp \
|
-p 2001-2100:2001-2100/udp \
|
||||||
-p 2257-2356:2257-2356/udp \
|
-p 2257-2356:2257-2356/udp \
|
||||||
git.manko.yoga/manawenuz/btest-rs:latest \
|
ghcr.io/manawenuz/btest-rs:latest \
|
||||||
-s -a admin -p password --ecsrp5 -v
|
-s -a admin -p password --ecsrp5 -v
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|||||||
50
scripts/push-docker-all.sh
Executable file
50
scripts/push-docker-all.sh
Executable file
@@ -0,0 +1,50 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
# Build and push Docker image to both Gitea and GitHub Container Registry.
|
||||||
|
#
|
||||||
|
# Prerequisites:
|
||||||
|
# docker login git.manko.yoga (Gitea — your username + token)
|
||||||
|
# docker login ghcr.io (GitHub — your username + PAT with packages:write)
|
||||||
|
#
|
||||||
|
# Usage:
|
||||||
|
# ./scripts/push-docker-all.sh v0.6.0
|
||||||
|
set -euo pipefail
|
||||||
|
|
||||||
|
cd "$(dirname "$0")/.."
|
||||||
|
|
||||||
|
if [[ -f .env ]]; then
|
||||||
|
set -a; source .env; set +a
|
||||||
|
fi
|
||||||
|
|
||||||
|
TAG="${1:?Usage: $0 <tag> (e.g. v0.6.0)}"
|
||||||
|
|
||||||
|
GITEA_IMAGE="git.manko.yoga/manawenuz/btest-rs"
|
||||||
|
GHCR_IMAGE="ghcr.io/manawenuz/btest-rs"
|
||||||
|
|
||||||
|
echo "=== Building Docker image ==="
|
||||||
|
docker build \
|
||||||
|
-t "${GITEA_IMAGE}:${TAG}" \
|
||||||
|
-t "${GITEA_IMAGE}:latest" \
|
||||||
|
-t "${GHCR_IMAGE}:${TAG}" \
|
||||||
|
-t "${GHCR_IMAGE}:latest" \
|
||||||
|
.
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
echo "=== Pushing to Gitea ==="
|
||||||
|
docker push "${GITEA_IMAGE}:${TAG}"
|
||||||
|
docker push "${GITEA_IMAGE}:latest"
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
echo "=== Pushing to GitHub Container Registry ==="
|
||||||
|
docker push "${GHCR_IMAGE}:${TAG}"
|
||||||
|
docker push "${GHCR_IMAGE}:latest"
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
echo "Done! Images pushed:"
|
||||||
|
echo " ${GITEA_IMAGE}:${TAG}"
|
||||||
|
echo " ${GITEA_IMAGE}:latest"
|
||||||
|
echo " ${GHCR_IMAGE}:${TAG}"
|
||||||
|
echo " ${GHCR_IMAGE}:latest"
|
||||||
|
echo ""
|
||||||
|
echo "Pull with:"
|
||||||
|
echo " docker pull ${GHCR_IMAGE}:${TAG}"
|
||||||
|
echo " docker run --rm -p 2000:2000 -p 2001-2100:2001-2100/udp ${GHCR_IMAGE}:${TAG} -s -v"
|
||||||
120
scripts/sync-github-release.sh
Executable file
120
scripts/sync-github-release.sh
Executable file
@@ -0,0 +1,120 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
# Sync a release from Gitea to GitHub.
|
||||||
|
# Downloads all binaries from Gitea release, creates GitHub release, uploads them.
|
||||||
|
#
|
||||||
|
# Prerequisites:
|
||||||
|
# gh auth login (GitHub CLI authenticated)
|
||||||
|
#
|
||||||
|
# Usage:
|
||||||
|
# ./scripts/sync-github-release.sh v0.6.0
|
||||||
|
set -euo pipefail
|
||||||
|
|
||||||
|
cd "$(dirname "$0")/.."
|
||||||
|
|
||||||
|
if [[ -f .env ]]; then
|
||||||
|
set -a; source .env; set +a
|
||||||
|
fi
|
||||||
|
|
||||||
|
TAG="${1:?Usage: $0 <tag> (e.g. v0.6.0)}"
|
||||||
|
GITEA_URL="https://git.manko.yoga"
|
||||||
|
GITEA_REPO="manawenuz/btest-rs"
|
||||||
|
GITHUB_REPO="manawenuz/btest-rs"
|
||||||
|
|
||||||
|
echo "=== Downloading assets from Gitea release ${TAG} ==="
|
||||||
|
mkdir -p /tmp/btest-release-${TAG}
|
||||||
|
cd /tmp/btest-release-${TAG}
|
||||||
|
rm -f *.tar.gz *.zip *.txt
|
||||||
|
|
||||||
|
# Get asset list from Gitea API
|
||||||
|
ASSETS=$(curl -sf "${GITEA_URL}/api/v1/repos/${GITEA_REPO}/releases/tags/${TAG}" | \
|
||||||
|
python3 -c "import sys,json; [print(a['browser_download_url']) for a in json.load(sys.stdin).get('assets',[])]")
|
||||||
|
|
||||||
|
if [ -z "$ASSETS" ]; then
|
||||||
|
echo "No assets found for ${TAG} on Gitea. Check if the release exists."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
for url in $ASSETS; do
|
||||||
|
FILENAME=$(basename "$url")
|
||||||
|
echo " Downloading: $FILENAME"
|
||||||
|
curl -sLO "$url"
|
||||||
|
done
|
||||||
|
|
||||||
|
# Merge all separate .sha256 files into checksums-sha256.txt
|
||||||
|
# and remove the individual .sha256 files
|
||||||
|
echo ""
|
||||||
|
echo "=== Merging checksums ==="
|
||||||
|
for sha_file in *.sha256; do
|
||||||
|
[ -f "$sha_file" ] || continue
|
||||||
|
echo " Merging: $sha_file"
|
||||||
|
cat "$sha_file" >> checksums-sha256.txt
|
||||||
|
rm "$sha_file"
|
||||||
|
done
|
||||||
|
|
||||||
|
# Add checksums for any files not yet in checksums-sha256.txt
|
||||||
|
for f in *.tar.gz *.zip; do
|
||||||
|
[ -f "$f" ] || continue
|
||||||
|
if ! grep -q "$f" checksums-sha256.txt 2>/dev/null; then
|
||||||
|
echo " Adding checksum for: $f"
|
||||||
|
shasum -a 256 "$f" >> checksums-sha256.txt
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
# Sort and deduplicate
|
||||||
|
sort -u -k2 checksums-sha256.txt > checksums-sha256.tmp && mv checksums-sha256.tmp checksums-sha256.txt
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
echo "Checksums:"
|
||||||
|
cat checksums-sha256.txt
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
echo "Files to upload:"
|
||||||
|
ls -lh *.tar.gz *.zip checksums-sha256.txt 2>/dev/null
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
echo "=== Creating GitHub release ${TAG} ==="
|
||||||
|
gh release create "${TAG}" \
|
||||||
|
--repo "${GITHUB_REPO}" \
|
||||||
|
--title "btest-rs ${TAG}" \
|
||||||
|
--notes "## Downloads
|
||||||
|
|
||||||
|
| Platform | Architecture | File |
|
||||||
|
|----------|-------------|------|
|
||||||
|
| Linux | x86_64 | btest-linux-x86_64.tar.gz |
|
||||||
|
| Linux | aarch64 (RPi 64-bit) | btest-linux-aarch64.tar.gz |
|
||||||
|
| Linux | armv7 (RPi 32-bit) | btest-linux-armv7.tar.gz |
|
||||||
|
| Windows | x86_64 | btest-windows-x86_64.zip |
|
||||||
|
| macOS | aarch64 (Apple Silicon) | btest-darwin-aarch64.tar.gz |
|
||||||
|
| Docker | x86_64 | \`docker pull ghcr.io/manawenuz/btest-rs:${TAG}\` |
|
||||||
|
|
||||||
|
### Quick Install (Linux)
|
||||||
|
|
||||||
|
\`\`\`bash
|
||||||
|
curl -LO https://github.com/${GITHUB_REPO}/releases/download/${TAG}/btest-linux-x86_64.tar.gz
|
||||||
|
tar xzf btest-linux-x86_64.tar.gz
|
||||||
|
sudo mv btest /usr/local/bin/
|
||||||
|
\`\`\`
|
||||||
|
|
||||||
|
### Raspberry Pi
|
||||||
|
|
||||||
|
\`\`\`bash
|
||||||
|
# 64-bit
|
||||||
|
curl -LO https://github.com/${GITHUB_REPO}/releases/download/${TAG}/btest-linux-aarch64.tar.gz
|
||||||
|
tar xzf btest-linux-aarch64.tar.gz
|
||||||
|
sudo mv btest /usr/local/bin/
|
||||||
|
|
||||||
|
# 32-bit
|
||||||
|
curl -LO https://github.com/${GITHUB_REPO}/releases/download/${TAG}/btest-linux-armv7.tar.gz
|
||||||
|
tar xzf btest-linux-armv7.tar.gz
|
||||||
|
sudo mv btest /usr/local/bin/
|
||||||
|
\`\`\`
|
||||||
|
" \
|
||||||
|
./*.tar.gz ./*.zip ./*.txt 2>/dev/null || true
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
echo "=== Done! ==="
|
||||||
|
echo "https://github.com/${GITHUB_REPO}/releases/tag/${TAG}"
|
||||||
|
|
||||||
|
# Cleanup
|
||||||
|
cd -
|
||||||
|
rm -rf /tmp/btest-release-${TAG}
|
||||||
64
scripts/test-aur-remote.sh
Executable file
64
scripts/test-aur-remote.sh
Executable file
@@ -0,0 +1,64 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
# Test the AUR package on a remote x86_64 Linux server using Docker.
|
||||||
|
#
|
||||||
|
# Usage:
|
||||||
|
# ./scripts/test-aur-remote.sh [user@host]
|
||||||
|
#
|
||||||
|
# Spins up an Arch container, installs btest-rs via yay (like a real user),
|
||||||
|
# runs loopback tests, cleans up.
|
||||||
|
set -euo pipefail
|
||||||
|
|
||||||
|
REMOTE="${1:-}"
|
||||||
|
|
||||||
|
TEST_SCRIPT='
|
||||||
|
docker run --rm archlinux:latest bash -c "
|
||||||
|
set -euo pipefail
|
||||||
|
|
||||||
|
echo \"[1/4] Installing yay...\"
|
||||||
|
pacman -Syu --noconfirm base-devel git sudo >/dev/null 2>&1
|
||||||
|
useradd -m builder
|
||||||
|
echo \"builder ALL=(ALL) NOPASSWD: ALL\" >> /etc/sudoers
|
||||||
|
su builder -c \"
|
||||||
|
cd /tmp
|
||||||
|
git clone https://aur.archlinux.org/yay-bin.git 2>/dev/null
|
||||||
|
cd yay-bin
|
||||||
|
makepkg -si --noconfirm 2>&1 | tail -3
|
||||||
|
\"
|
||||||
|
|
||||||
|
echo \"[2/4] Installing btest-rs from AUR via yay...\"
|
||||||
|
su builder -c \"yay -S btest-rs --noconfirm 2>&1 | tail -10\"
|
||||||
|
|
||||||
|
echo \"\"
|
||||||
|
echo \"[3/4] Verify installation...\"
|
||||||
|
btest --version
|
||||||
|
which btest
|
||||||
|
man -w btest.1 2>/dev/null && echo \"Man page: installed\" || echo \"Man page: not found\"
|
||||||
|
systemctl cat btest.service 2>/dev/null | head -3 && echo \"Systemd unit: installed\" || echo \"Systemd unit: not found\"
|
||||||
|
|
||||||
|
echo \"\"
|
||||||
|
echo \"[4/4] Loopback tests...\"
|
||||||
|
|
||||||
|
echo \"--- TCP (3s) ---\"
|
||||||
|
btest -s -P 19876 &
|
||||||
|
sleep 2
|
||||||
|
btest -c 127.0.0.1 -P 19876 -r -d 3
|
||||||
|
kill %1 2>/dev/null; wait 2>/dev/null || true
|
||||||
|
|
||||||
|
echo \"--- UDP (3s) ---\"
|
||||||
|
btest -s -P 19877 &
|
||||||
|
sleep 2
|
||||||
|
btest -c 127.0.0.1 -P 19877 -r -u -d 3
|
||||||
|
kill %1 2>/dev/null; wait 2>/dev/null || true
|
||||||
|
|
||||||
|
echo \"\"
|
||||||
|
echo \"=== ALL TESTS PASSED ===\"
|
||||||
|
"
|
||||||
|
'
|
||||||
|
|
||||||
|
if [ -n "$REMOTE" ]; then
|
||||||
|
echo "=== Testing AUR package on $REMOTE ==="
|
||||||
|
ssh "$REMOTE" "$TEST_SCRIPT"
|
||||||
|
else
|
||||||
|
echo "=== Testing AUR package locally ==="
|
||||||
|
eval "$TEST_SCRIPT"
|
||||||
|
fi
|
||||||
@@ -20,6 +20,9 @@ pub struct BandwidthState {
|
|||||||
pub intervals: AtomicU32,
|
pub intervals: AtomicU32,
|
||||||
/// Remote peer's CPU usage (received via status messages)
|
/// Remote peer's CPU usage (received via status messages)
|
||||||
pub remote_cpu: AtomicU8,
|
pub remote_cpu: AtomicU8,
|
||||||
|
/// Remaining byte budget (TX + RX combined). When this reaches 0 the test
|
||||||
|
/// stops immediately. u64::MAX means unlimited (default for non-pro server).
|
||||||
|
pub byte_budget: AtomicU64,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl BandwidthState {
|
impl BandwidthState {
|
||||||
@@ -38,6 +41,7 @@ impl BandwidthState {
|
|||||||
total_lost_packets: AtomicU64::new(0),
|
total_lost_packets: AtomicU64::new(0),
|
||||||
intervals: AtomicU32::new(0),
|
intervals: AtomicU32::new(0),
|
||||||
remote_cpu: AtomicU8::new(0),
|
remote_cpu: AtomicU8::new(0),
|
||||||
|
byte_budget: AtomicU64::new(u64::MAX),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -50,6 +54,30 @@ impl BandwidthState {
|
|||||||
self.intervals.fetch_add(1, Relaxed);
|
self.intervals.fetch_add(1, Relaxed);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Try to spend `amount` bytes from the budget. Returns `true` if allowed,
|
||||||
|
/// `false` if the budget is exhausted (and sets `running = false`).
|
||||||
|
#[inline]
|
||||||
|
pub fn spend_budget(&self, amount: u64) -> bool {
|
||||||
|
use std::sync::atomic::Ordering::{Relaxed, SeqCst};
|
||||||
|
// Fast path: unlimited budget (non-pro server)
|
||||||
|
let current = self.byte_budget.load(Relaxed);
|
||||||
|
if current == u64::MAX {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
if current < amount {
|
||||||
|
self.running.store(false, SeqCst);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
self.byte_budget.fetch_sub(amount, Relaxed);
|
||||||
|
true
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Set the byte budget (total bytes allowed for the entire test).
|
||||||
|
#[cfg(feature = "pro")]
|
||||||
|
pub fn set_budget(&self, budget: u64) {
|
||||||
|
self.byte_budget.store(budget, std::sync::atomic::Ordering::SeqCst);
|
||||||
|
}
|
||||||
|
|
||||||
/// Get summary for syslog reporting.
|
/// Get summary for syslog reporting.
|
||||||
pub fn summary(&self) -> (u64, u64, u64, u32) {
|
pub fn summary(&self) -> (u64, u64, u64, u32) {
|
||||||
use std::sync::atomic::Ordering::Relaxed;
|
use std::sync::atomic::Ordering::Relaxed;
|
||||||
@@ -80,6 +108,34 @@ pub fn calc_send_interval(tx_speed_bps: u32, tx_size: u16) -> Option<Duration> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Advance `next_send` by one interval and clamp drift.
|
||||||
|
///
|
||||||
|
/// When the sender falls behind (e.g., the write blocked longer than the
|
||||||
|
/// inter-packet interval), `next_send` accumulates a debt. Once the path
|
||||||
|
/// clears, the loop would fire packets with *no* delay until the debt is
|
||||||
|
/// repaid, producing a burst that overshoots the target rate.
|
||||||
|
///
|
||||||
|
/// This helper resets `next_send` to `now` whenever it has drifted more
|
||||||
|
/// than 2x the interval behind the current wall-clock time, bounding the
|
||||||
|
/// maximum burst to at most one extra interval's worth of packets.
|
||||||
|
pub fn advance_next_send(
|
||||||
|
next_send: &mut std::time::Instant,
|
||||||
|
iv: Duration,
|
||||||
|
now: std::time::Instant,
|
||||||
|
) -> Option<Duration> {
|
||||||
|
*next_send += iv;
|
||||||
|
// If we have fallen more than 2x the interval behind, reset to now
|
||||||
|
// to prevent a compensating burst.
|
||||||
|
if *next_send + iv < now {
|
||||||
|
*next_send = now;
|
||||||
|
}
|
||||||
|
if *next_send > now {
|
||||||
|
Some(*next_send - now)
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Format a bandwidth value in human-readable form.
|
/// Format a bandwidth value in human-readable form.
|
||||||
pub fn format_bandwidth(bits_per_sec: f64) -> String {
|
pub fn format_bandwidth(bits_per_sec: f64) -> String {
|
||||||
if bits_per_sec >= 1_000_000_000.0 {
|
if bits_per_sec >= 1_000_000_000.0 {
|
||||||
|
|||||||
127
src/bin/client_only.rs
Normal file
127
src/bin/client_only.rs
Normal file
@@ -0,0 +1,127 @@
|
|||||||
|
//! btest-client: minimal bandwidth test client for embedded/OpenWrt systems.
|
||||||
|
//!
|
||||||
|
//! Stripped-down client that connects to MikroTik btest servers.
|
||||||
|
//! No server mode, no syslog, smaller binary footprint.
|
||||||
|
//!
|
||||||
|
//! Build: cargo build --profile release-small --bin btest-client
|
||||||
|
|
||||||
|
use clap::Parser;
|
||||||
|
use std::sync::atomic::Ordering;
|
||||||
|
|
||||||
|
#[derive(Parser)]
|
||||||
|
#[command(name = "btest-client", about = "MikroTik Bandwidth Test client", version)]
|
||||||
|
struct Cli {
|
||||||
|
/// Server address to connect to
|
||||||
|
#[arg(short = 'c', long = "client", required = true)]
|
||||||
|
host: String,
|
||||||
|
|
||||||
|
/// Transmit data (upload)
|
||||||
|
#[arg(short = 't', long = "transmit")]
|
||||||
|
transmit: bool,
|
||||||
|
|
||||||
|
/// Receive data (download)
|
||||||
|
#[arg(short = 'r', long = "receive")]
|
||||||
|
receive: bool,
|
||||||
|
|
||||||
|
/// Use UDP
|
||||||
|
#[arg(short = 'u', long = "udp")]
|
||||||
|
udp: bool,
|
||||||
|
|
||||||
|
/// Bandwidth limit (e.g., 100M)
|
||||||
|
#[arg(short = 'b', long = "bandwidth")]
|
||||||
|
bandwidth: Option<String>,
|
||||||
|
|
||||||
|
/// Port
|
||||||
|
#[arg(short = 'P', long = "port", default_value_t = 2000)]
|
||||||
|
port: u16,
|
||||||
|
|
||||||
|
/// Username
|
||||||
|
#[arg(short = 'a', long = "authuser")]
|
||||||
|
auth_user: Option<String>,
|
||||||
|
|
||||||
|
/// Password
|
||||||
|
#[arg(short = 'p', long = "authpass")]
|
||||||
|
auth_pass: Option<String>,
|
||||||
|
|
||||||
|
/// NAT mode
|
||||||
|
#[arg(short = 'n', long = "nat")]
|
||||||
|
nat: bool,
|
||||||
|
|
||||||
|
/// Duration in seconds (0=unlimited)
|
||||||
|
#[arg(short = 'd', long = "duration", default_value_t = 0)]
|
||||||
|
duration: u64,
|
||||||
|
|
||||||
|
/// Verbose
|
||||||
|
#[arg(short = 'v', long = "verbose", action = clap::ArgAction::Count)]
|
||||||
|
verbose: u8,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::main]
|
||||||
|
async fn main() -> anyhow::Result<()> {
|
||||||
|
let cli = Cli::parse();
|
||||||
|
|
||||||
|
let filter = match cli.verbose {
|
||||||
|
0 => "info",
|
||||||
|
1 => "debug",
|
||||||
|
_ => "trace",
|
||||||
|
};
|
||||||
|
tracing_subscriber::fmt()
|
||||||
|
.with_env_filter(
|
||||||
|
tracing_subscriber::EnvFilter::try_from_default_env()
|
||||||
|
.unwrap_or_else(|_| tracing_subscriber::EnvFilter::new(filter)),
|
||||||
|
)
|
||||||
|
.with_target(false)
|
||||||
|
.init();
|
||||||
|
|
||||||
|
btest_rs::cpu::start_sampler();
|
||||||
|
|
||||||
|
if !cli.transmit && !cli.receive {
|
||||||
|
eprintln!("Error: specify -t (transmit) and/or -r (receive)");
|
||||||
|
std::process::exit(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
let direction = match (cli.transmit, cli.receive) {
|
||||||
|
(true, false) => btest_rs::protocol::CMD_DIR_RX,
|
||||||
|
(false, true) => btest_rs::protocol::CMD_DIR_TX,
|
||||||
|
(true, true) => btest_rs::protocol::CMD_DIR_BOTH,
|
||||||
|
_ => unreachable!(),
|
||||||
|
};
|
||||||
|
|
||||||
|
let bw = match &cli.bandwidth {
|
||||||
|
Some(b) => btest_rs::bandwidth::parse_bandwidth(b)?,
|
||||||
|
None => 0,
|
||||||
|
};
|
||||||
|
|
||||||
|
let (tx_speed, rx_speed) = match direction {
|
||||||
|
btest_rs::protocol::CMD_DIR_TX => (bw, 0),
|
||||||
|
btest_rs::protocol::CMD_DIR_RX => (0, bw),
|
||||||
|
_ => (bw, bw),
|
||||||
|
};
|
||||||
|
|
||||||
|
let state = btest_rs::bandwidth::BandwidthState::new();
|
||||||
|
let state_clone = state.clone();
|
||||||
|
|
||||||
|
let host = cli.host.clone();
|
||||||
|
let client_fut = btest_rs::client::run_client(
|
||||||
|
&host, cli.port, direction, cli.udp,
|
||||||
|
tx_speed, rx_speed,
|
||||||
|
cli.auth_user, cli.auth_pass, cli.nat,
|
||||||
|
state_clone,
|
||||||
|
);
|
||||||
|
|
||||||
|
if cli.duration > 0 {
|
||||||
|
match tokio::time::timeout(
|
||||||
|
std::time::Duration::from_secs(cli.duration),
|
||||||
|
client_fut,
|
||||||
|
).await {
|
||||||
|
Ok(r) => { let _ = r?; }
|
||||||
|
Err(_) => {
|
||||||
|
state.running.store(false, Ordering::SeqCst);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
let _ = client_fut.await?;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
62
src/bin/server_only.rs
Normal file
62
src/bin/server_only.rs
Normal file
@@ -0,0 +1,62 @@
|
|||||||
|
//! btest-server: minimal bandwidth test server for embedded/OpenWrt systems.
|
||||||
|
//!
|
||||||
|
//! Stripped-down server that accepts MikroTik client connections.
|
||||||
|
//! No client mode, no syslog, no CSV, smaller binary footprint.
|
||||||
|
//!
|
||||||
|
//! Build: cargo build --profile release-small --bin btest-server
|
||||||
|
|
||||||
|
use clap::Parser;
|
||||||
|
|
||||||
|
#[derive(Parser)]
|
||||||
|
#[command(name = "btest-server", about = "MikroTik Bandwidth Test server", version)]
|
||||||
|
struct Cli {
|
||||||
|
/// Port
|
||||||
|
#[arg(short = 'P', long = "port", default_value_t = 2000)]
|
||||||
|
port: u16,
|
||||||
|
|
||||||
|
/// IPv4 listen address
|
||||||
|
#[arg(long = "listen", default_value = "0.0.0.0")]
|
||||||
|
listen_addr: String,
|
||||||
|
|
||||||
|
/// Username
|
||||||
|
#[arg(short = 'a', long = "authuser")]
|
||||||
|
auth_user: Option<String>,
|
||||||
|
|
||||||
|
/// Password
|
||||||
|
#[arg(short = 'p', long = "authpass")]
|
||||||
|
auth_pass: Option<String>,
|
||||||
|
|
||||||
|
/// Use EC-SRP5 authentication
|
||||||
|
#[arg(long = "ecsrp5")]
|
||||||
|
ecsrp5: bool,
|
||||||
|
|
||||||
|
/// Verbose
|
||||||
|
#[arg(short = 'v', long = "verbose", action = clap::ArgAction::Count)]
|
||||||
|
verbose: u8,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::main]
|
||||||
|
async fn main() -> anyhow::Result<()> {
|
||||||
|
let cli = Cli::parse();
|
||||||
|
|
||||||
|
let filter = match cli.verbose {
|
||||||
|
0 => "info",
|
||||||
|
1 => "debug",
|
||||||
|
_ => "trace",
|
||||||
|
};
|
||||||
|
tracing_subscriber::fmt()
|
||||||
|
.with_env_filter(
|
||||||
|
tracing_subscriber::EnvFilter::try_from_default_env()
|
||||||
|
.unwrap_or_else(|_| tracing_subscriber::EnvFilter::new(filter)),
|
||||||
|
)
|
||||||
|
.with_target(false)
|
||||||
|
.init();
|
||||||
|
|
||||||
|
btest_rs::cpu::start_sampler();
|
||||||
|
|
||||||
|
let v4 = if cli.listen_addr.eq_ignore_ascii_case("none") { None } else { Some(cli.listen_addr) };
|
||||||
|
|
||||||
|
tracing::info!("btest-server starting on port {}", cli.port);
|
||||||
|
btest_rs::server::run_server(cli.port, cli.auth_user, cli.auth_pass, cli.ecsrp5, v4, None).await?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
204
src/client.rs
204
src/client.rs
@@ -27,6 +27,11 @@ pub async fn run_client(
|
|||||||
let mut stream = TcpStream::connect(&addr).await?;
|
let mut stream = TcpStream::connect(&addr).await?;
|
||||||
stream.set_nodelay(true)?;
|
stream.set_nodelay(true)?;
|
||||||
|
|
||||||
|
// Set TCP socket buffers to 4MB for high throughput
|
||||||
|
let sock_ref = socket2::SockRef::from(&stream);
|
||||||
|
let _ = sock_ref.set_send_buffer_size(4 * 1024 * 1024);
|
||||||
|
let _ = sock_ref.set_recv_buffer_size(4 * 1024 * 1024);
|
||||||
|
|
||||||
recv_hello(&mut stream).await?;
|
recv_hello(&mut stream).await?;
|
||||||
tracing::info!("Connected to server");
|
tracing::info!("Connected to server");
|
||||||
|
|
||||||
@@ -127,6 +132,12 @@ async fn run_tcp_test_client(stream: TcpStream, cmd: Command, state: Arc<Bandwid
|
|||||||
Some(tokio::spawn(async move {
|
Some(tokio::spawn(async move {
|
||||||
tcp_client_rx_loop(reader, state_rx).await
|
tcp_client_rx_loop(reader, state_rx).await
|
||||||
}))
|
}))
|
||||||
|
} else if client_should_tx {
|
||||||
|
// TX-only: still need to read the server's status messages to get remote CPU.
|
||||||
|
// Don't count these bytes as RX data.
|
||||||
|
Some(tokio::spawn(async move {
|
||||||
|
tcp_client_status_reader(reader, state_rx).await
|
||||||
|
}))
|
||||||
} else {
|
} else {
|
||||||
_reader_keepalive = Some(reader);
|
_reader_keepalive = Some(reader);
|
||||||
None
|
None
|
||||||
@@ -148,15 +159,17 @@ async fn tcp_client_tx_loop(
|
|||||||
) {
|
) {
|
||||||
tokio::time::sleep(Duration::from_millis(100)).await;
|
tokio::time::sleep(Duration::from_millis(100)).await;
|
||||||
|
|
||||||
let packet = vec![0u8; tx_size]; // TCP data is all zeros
|
|
||||||
let mut interval = bandwidth::calc_send_interval(tx_speed, tx_size as u16);
|
let mut interval = bandwidth::calc_send_interval(tx_speed, tx_size as u16);
|
||||||
|
// Use larger writes when running unlimited to reduce syscall overhead
|
||||||
|
let effective_size = if interval.is_none() { tx_size.max(256 * 1024) } else { tx_size };
|
||||||
|
let packet = vec![0u8; effective_size]; // TCP data is all zeros
|
||||||
let mut next_send = Instant::now();
|
let mut next_send = Instant::now();
|
||||||
|
|
||||||
while state.running.load(Ordering::Relaxed) {
|
while state.running.load(Ordering::Relaxed) {
|
||||||
if writer.write_all(&packet).await.is_err() {
|
if writer.write_all(&packet).await.is_err() {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
state.tx_bytes.fetch_add(tx_size as u64, Ordering::Relaxed);
|
state.tx_bytes.fetch_add(effective_size as u64, Ordering::Relaxed);
|
||||||
|
|
||||||
if state.tx_speed_changed.load(Ordering::Relaxed) {
|
if state.tx_speed_changed.load(Ordering::Relaxed) {
|
||||||
state.tx_speed_changed.store(false, Ordering::Relaxed);
|
state.tx_speed_changed.store(false, Ordering::Relaxed);
|
||||||
@@ -167,10 +180,9 @@ async fn tcp_client_tx_loop(
|
|||||||
|
|
||||||
match interval {
|
match interval {
|
||||||
Some(iv) => {
|
Some(iv) => {
|
||||||
next_send += iv;
|
|
||||||
let now = Instant::now();
|
let now = Instant::now();
|
||||||
if next_send > now {
|
if let Some(delay) = bandwidth::advance_next_send(&mut next_send, iv, now) {
|
||||||
tokio::time::sleep(next_send - now).await;
|
tokio::time::sleep(delay).await;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
None => {
|
None => {
|
||||||
@@ -184,13 +196,88 @@ async fn tcp_client_rx_loop(
|
|||||||
mut reader: tokio::net::tcp::OwnedReadHalf,
|
mut reader: tokio::net::tcp::OwnedReadHalf,
|
||||||
state: Arc<BandwidthState>,
|
state: Arc<BandwidthState>,
|
||||||
) {
|
) {
|
||||||
let mut buf = vec![0u8; 65536];
|
let mut buf = vec![0u8; 256 * 1024];
|
||||||
|
// Carry trailing bytes from the previous read to detect status messages
|
||||||
|
// that are split across TCP read boundaries.
|
||||||
|
let mut carry = [0u8; STATUS_MSG_SIZE - 1];
|
||||||
|
let mut carry_len = 0usize;
|
||||||
|
|
||||||
while state.running.load(Ordering::Relaxed) {
|
while state.running.load(Ordering::Relaxed) {
|
||||||
match reader.read(&mut buf).await {
|
match reader.read(&mut buf).await {
|
||||||
Ok(0) | Err(_) => break,
|
Ok(0) | Err(_) => break,
|
||||||
Ok(n) => {
|
Ok(n) => {
|
||||||
state.rx_bytes.fetch_add(n as u64, Ordering::Relaxed);
|
state.rx_bytes.fetch_add(n as u64, Ordering::Relaxed);
|
||||||
|
|
||||||
|
// 1) Check if a status message spans the carry + start of buf.
|
||||||
|
if carry_len > 0 {
|
||||||
|
for offset in 0..carry_len {
|
||||||
|
if carry[offset] != STATUS_MSG_TYPE {
|
||||||
|
continue;
|
||||||
}
|
}
|
||||||
|
let from_carry = carry_len - offset;
|
||||||
|
let from_buf = STATUS_MSG_SIZE - from_carry;
|
||||||
|
if n < from_buf {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
let cpu_byte = if from_carry >= 2 {
|
||||||
|
carry[offset + 1]
|
||||||
|
} else {
|
||||||
|
buf[0]
|
||||||
|
};
|
||||||
|
if cpu_byte >= 0x80 {
|
||||||
|
state.remote_cpu.store((cpu_byte & 0x7F).min(100), Ordering::Relaxed);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// 2) Fast scan within buf for status messages.
|
||||||
|
// Data packets are all zeros, so memchr (SIMD) exits almost instantly.
|
||||||
|
if n >= STATUS_MSG_SIZE {
|
||||||
|
let search_end = n - STATUS_MSG_SIZE + 1;
|
||||||
|
if let Some(pos) = memchr::memchr(STATUS_MSG_TYPE, &buf[..search_end]) {
|
||||||
|
if buf[pos + 1] >= 0x80 {
|
||||||
|
let cpu = buf[pos + 1] & 0x7F;
|
||||||
|
state.remote_cpu.store(cpu.min(100), Ordering::Relaxed);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// 3) Save trailing bytes for the next read.
|
||||||
|
carry_len = n.min(STATUS_MSG_SIZE - 1);
|
||||||
|
if n >= carry_len {
|
||||||
|
carry[..carry_len].copy_from_slice(&buf[n - carry_len..n]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Read only status messages from the server (TX-only mode).
|
||||||
|
/// The server sends 12-byte status messages on the TCP connection even when
|
||||||
|
/// the client is only transmitting. We need to read them to get remote CPU
|
||||||
|
/// and to prevent the TCP receive buffer from filling up.
|
||||||
|
async fn tcp_client_status_reader(
|
||||||
|
mut reader: tokio::net::tcp::OwnedReadHalf,
|
||||||
|
state: Arc<BandwidthState>,
|
||||||
|
) {
|
||||||
|
let mut buf = [0u8; STATUS_MSG_SIZE];
|
||||||
|
while state.running.load(Ordering::Relaxed) {
|
||||||
|
match reader.read_exact(&mut buf).await {
|
||||||
|
Ok(_) => {
|
||||||
|
if buf[0] == STATUS_MSG_TYPE && buf[1] >= 0x80 {
|
||||||
|
let status = StatusMessage::deserialize(&buf);
|
||||||
|
state.remote_cpu.store(status.cpu_load, Ordering::Relaxed);
|
||||||
|
// Use server's bytes_received for TX speed adaptation
|
||||||
|
if status.bytes_received > 0 {
|
||||||
|
let new_speed =
|
||||||
|
((status.bytes_received as u64 * 8 * 3) / 2) as u32;
|
||||||
|
state.tx_speed.store(new_speed, Ordering::Relaxed);
|
||||||
|
state.tx_speed_changed.store(true, Ordering::Relaxed);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Err(_) => break,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -317,10 +404,9 @@ async fn udp_client_tx_loop(
|
|||||||
|
|
||||||
match interval {
|
match interval {
|
||||||
Some(iv) => {
|
Some(iv) => {
|
||||||
next_send += iv;
|
|
||||||
let now = Instant::now();
|
let now = Instant::now();
|
||||||
if next_send > now {
|
if let Some(delay) = bandwidth::advance_next_send(&mut next_send, iv, now) {
|
||||||
tokio::time::sleep(next_send - now).await;
|
tokio::time::sleep(delay).await;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
None => {
|
None => {
|
||||||
@@ -335,10 +421,15 @@ async fn udp_client_tx_loop(
|
|||||||
async fn udp_client_rx_loop(socket: &UdpSocket, state: Arc<BandwidthState>) {
|
async fn udp_client_rx_loop(socket: &UdpSocket, state: Arc<BandwidthState>) {
|
||||||
let mut buf = vec![0u8; 65536];
|
let mut buf = vec![0u8; 65536];
|
||||||
let mut last_seq: Option<u32> = None;
|
let mut last_seq: Option<u32> = None;
|
||||||
|
let mut timeout = tokio::time::sleep(Duration::from_secs(5));
|
||||||
|
tokio::pin!(timeout);
|
||||||
|
|
||||||
while state.running.load(Ordering::Relaxed) {
|
while state.running.load(Ordering::Relaxed) {
|
||||||
match tokio::time::timeout(Duration::from_secs(5), socket.recv(&mut buf)).await {
|
tokio::select! {
|
||||||
Ok(Ok(n)) if n >= 4 => {
|
biased;
|
||||||
|
res = socket.recv(&mut buf) => {
|
||||||
|
match res {
|
||||||
|
Ok(n) if n >= 4 => {
|
||||||
state.rx_bytes.fetch_add(n as u64, Ordering::Relaxed);
|
state.rx_bytes.fetch_add(n as u64, Ordering::Relaxed);
|
||||||
state.rx_packets.fetch_add(1, Ordering::Relaxed);
|
state.rx_packets.fetch_add(1, Ordering::Relaxed);
|
||||||
|
|
||||||
@@ -352,13 +443,17 @@ async fn udp_client_rx_loop(socket: &UdpSocket, state: Arc<BandwidthState>) {
|
|||||||
}
|
}
|
||||||
last_seq = Some(seq);
|
last_seq = Some(seq);
|
||||||
}
|
}
|
||||||
Ok(Ok(_)) => {}
|
Ok(_) => {}
|
||||||
Ok(Err(e)) => {
|
Err(e) => {
|
||||||
tracing::debug!("UDP recv error: {}", e);
|
tracing::debug!("UDP recv error: {}", e);
|
||||||
tokio::time::sleep(Duration::from_millis(10)).await;
|
tokio::time::sleep(Duration::from_millis(10)).await;
|
||||||
}
|
}
|
||||||
Err(_) => {
|
}
|
||||||
|
timeout.as_mut().reset(tokio::time::Instant::now() + Duration::from_secs(5));
|
||||||
|
}
|
||||||
|
_ = &mut timeout => {
|
||||||
tracing::debug!("UDP RX timeout");
|
tracing::debug!("UDP RX timeout");
|
||||||
|
timeout.as_mut().reset(tokio::time::Instant::now() + Duration::from_secs(5));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -476,3 +571,84 @@ async fn udp_client_status_loop(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Scan for a status message in `carry` + `buf` and return the CPU value if found.
|
||||||
|
#[allow(dead_code)]
|
||||||
|
pub fn scan_status_message(carry: &[u8], buf: &[u8]) -> Option<u8> {
|
||||||
|
let carry_len = carry.len();
|
||||||
|
// 1) Check split across carry + buf
|
||||||
|
if carry_len > 0 {
|
||||||
|
for offset in 0..carry_len {
|
||||||
|
if carry[offset] != STATUS_MSG_TYPE {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
let from_carry = carry_len - offset;
|
||||||
|
let from_buf = STATUS_MSG_SIZE - from_carry;
|
||||||
|
if buf.len() < from_buf {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
let cpu_byte = if from_carry >= 2 {
|
||||||
|
carry[offset + 1]
|
||||||
|
} else {
|
||||||
|
buf[0]
|
||||||
|
};
|
||||||
|
if cpu_byte >= 0x80 {
|
||||||
|
return Some((cpu_byte & 0x7F).min(100));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// 2) Check within buf
|
||||||
|
let n = buf.len();
|
||||||
|
if n >= STATUS_MSG_SIZE {
|
||||||
|
let search_end = n - STATUS_MSG_SIZE + 1;
|
||||||
|
if let Some(pos) = memchr::memchr(STATUS_MSG_TYPE, &buf[..search_end]) {
|
||||||
|
if buf[pos + 1] >= 0x80 {
|
||||||
|
return Some((buf[pos + 1] & 0x7F).min(100));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
None
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod status_scan_tests {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_status_within_buffer() {
|
||||||
|
let mut buf = [0u8; 256];
|
||||||
|
buf[10] = STATUS_MSG_TYPE;
|
||||||
|
buf[11] = 0x80 | 42;
|
||||||
|
assert_eq!(scan_status_message(&[], &buf), Some(42));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_status_split_across_reads() {
|
||||||
|
// 12-byte status message: [0x07, 0x80|50, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
|
||||||
|
// Split: first 5 bytes in carry, last 7 bytes in buf
|
||||||
|
let carry = [0x07, 0xB2, 0, 0, 0];
|
||||||
|
let buf = [0, 0, 0, 0, 0, 0, 0];
|
||||||
|
assert_eq!(scan_status_message(&carry, &buf), Some(50));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_status_split_at_boundary() {
|
||||||
|
// Split: first 1 byte (0x07) in carry, rest in buf
|
||||||
|
let carry = [0x07];
|
||||||
|
let mut buf = [0u8; 20];
|
||||||
|
buf[0] = 0x80 | 77;
|
||||||
|
assert_eq!(scan_status_message(&carry, &buf), Some(77));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_no_status_in_zeros() {
|
||||||
|
let buf = [0u8; 256];
|
||||||
|
assert_eq!(scan_status_message(&[], &buf), None);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_short_buffer_no_panic() {
|
||||||
|
let buf = [0x07, 0x80];
|
||||||
|
assert_eq!(scan_status_message(&[], &buf), None);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
96
src/cpu.rs
96
src/cpu.rs
@@ -1,7 +1,7 @@
|
|||||||
//! Lightweight CPU usage measurement.
|
//! Lightweight CPU usage measurement.
|
||||||
//!
|
//!
|
||||||
//! Returns the system-wide CPU usage as a percentage (0-100).
|
//! Returns the system-wide CPU usage as a percentage (0-100).
|
||||||
//! Works on macOS and Linux without external dependencies.
|
//! Works on macOS, Linux, Windows, and FreeBSD without external dependencies.
|
||||||
|
|
||||||
use std::sync::atomic::{AtomicU8, Ordering};
|
use std::sync::atomic::{AtomicU8, Ordering};
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
@@ -29,7 +29,7 @@ pub fn get() -> u8 {
|
|||||||
|
|
||||||
// --- Platform-specific implementation ---
|
// --- Platform-specific implementation ---
|
||||||
|
|
||||||
#[cfg(target_os = "linux")]
|
#[cfg(any(target_os = "linux", target_os = "android"))]
|
||||||
fn get_cpu_times() -> (u64, u64) {
|
fn get_cpu_times() -> (u64, u64) {
|
||||||
// Read /proc/stat: cpu user nice system idle iowait irq softirq steal
|
// Read /proc/stat: cpu user nice system idle iowait irq softirq steal
|
||||||
if let Ok(content) = std::fs::read_to_string("/proc/stat") {
|
if let Ok(content) = std::fs::read_to_string("/proc/stat") {
|
||||||
@@ -93,7 +93,89 @@ fn get_cpu_times() -> (u64, u64) {
|
|||||||
(0, 0)
|
(0, 0)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(not(any(target_os = "linux", target_os = "macos")))]
|
#[cfg(target_os = "windows")]
|
||||||
|
fn get_cpu_times() -> (u64, u64) {
|
||||||
|
#[repr(C)]
|
||||||
|
#[derive(Default)]
|
||||||
|
#[allow(non_snake_case)]
|
||||||
|
struct FILETIME {
|
||||||
|
dwLowDateTime: u32,
|
||||||
|
dwHighDateTime: u32,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl FILETIME {
|
||||||
|
fn to_u64(&self) -> u64 {
|
||||||
|
(self.dwHighDateTime as u64) << 32 | self.dwLowDateTime as u64
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
extern "system" {
|
||||||
|
fn GetSystemTimes(
|
||||||
|
lpIdleTime: *mut FILETIME,
|
||||||
|
lpKernelTime: *mut FILETIME,
|
||||||
|
lpUserTime: *mut FILETIME,
|
||||||
|
) -> i32;
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut idle = FILETIME::default();
|
||||||
|
let mut kernel = FILETIME::default();
|
||||||
|
let mut user = FILETIME::default();
|
||||||
|
|
||||||
|
// SAFETY: We pass valid pointers to stack-allocated FILETIME structs.
|
||||||
|
// GetSystemTimes is a well-documented Win32 API that writes into these
|
||||||
|
// output parameters. A non-zero return value indicates success.
|
||||||
|
let ret = unsafe { GetSystemTimes(&mut idle, &mut kernel, &mut user) };
|
||||||
|
|
||||||
|
if ret != 0 {
|
||||||
|
let idle_ticks = idle.to_u64();
|
||||||
|
// Kernel time includes idle time on Windows, so total = kernel + user.
|
||||||
|
let total_ticks = kernel.to_u64() + user.to_u64();
|
||||||
|
(total_ticks, idle_ticks)
|
||||||
|
} else {
|
||||||
|
(0, 0)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(target_os = "freebsd")]
|
||||||
|
fn get_cpu_times() -> (u64, u64) {
|
||||||
|
// kern.cp_time returns: user nice system interrupt idle
|
||||||
|
const CTL_KERN: libc::c_int = 1;
|
||||||
|
const KERN_CP_TIME: libc::c_int = 40;
|
||||||
|
|
||||||
|
let mut mib: [libc::c_int; 2] = [CTL_KERN, KERN_CP_TIME];
|
||||||
|
let mut cp_time: [libc::c_ulong; 5] = [0; 5];
|
||||||
|
let mut len = std::mem::size_of_val(&cp_time);
|
||||||
|
|
||||||
|
let ret = unsafe {
|
||||||
|
libc::sysctl(
|
||||||
|
mib.as_mut_ptr(),
|
||||||
|
mib.len() as u32,
|
||||||
|
&mut cp_time as *mut _ as *mut libc::c_void,
|
||||||
|
&mut len,
|
||||||
|
std::ptr::null_mut(),
|
||||||
|
0,
|
||||||
|
)
|
||||||
|
};
|
||||||
|
|
||||||
|
if ret == 0 {
|
||||||
|
let user = cp_time[0] as u64;
|
||||||
|
let nice = cp_time[1] as u64;
|
||||||
|
let system = cp_time[2] as u64;
|
||||||
|
let interrupt = cp_time[3] as u64;
|
||||||
|
let idle = cp_time[4] as u64;
|
||||||
|
let total = user + nice + system + interrupt + idle;
|
||||||
|
return (total, idle);
|
||||||
|
}
|
||||||
|
(0, 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(not(any(
|
||||||
|
target_os = "linux",
|
||||||
|
target_os = "android",
|
||||||
|
target_os = "macos",
|
||||||
|
target_os = "windows",
|
||||||
|
target_os = "freebsd",
|
||||||
|
)))]
|
||||||
fn get_cpu_times() -> (u64, u64) {
|
fn get_cpu_times() -> (u64, u64) {
|
||||||
(0, 0) // Unsupported platform
|
(0, 0) // Unsupported platform
|
||||||
}
|
}
|
||||||
@@ -116,7 +198,13 @@ mod tests {
|
|||||||
fn test_cpu_times_returns_nonzero() {
|
fn test_cpu_times_returns_nonzero() {
|
||||||
let (total, idle) = get_cpu_times();
|
let (total, idle) = get_cpu_times();
|
||||||
// On supported platforms, total should be > 0
|
// On supported platforms, total should be > 0
|
||||||
if cfg!(any(target_os = "linux", target_os = "macos")) {
|
if cfg!(any(
|
||||||
|
target_os = "linux",
|
||||||
|
target_os = "android",
|
||||||
|
target_os = "macos",
|
||||||
|
target_os = "windows",
|
||||||
|
target_os = "freebsd",
|
||||||
|
)) {
|
||||||
assert!(total > 0, "CPU total ticks should be > 0");
|
assert!(total > 0, "CPU total ticks should be > 0");
|
||||||
assert!(idle <= total, "idle should be <= total");
|
assert!(idle <= total, "idle should be <= total");
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -9,7 +9,7 @@ use std::path::Path;
|
|||||||
use std::sync::Mutex;
|
use std::sync::Mutex;
|
||||||
use std::time::SystemTime;
|
use std::time::SystemTime;
|
||||||
|
|
||||||
static CSV_FILE: Mutex<Option<String>> = Mutex::new(None);
|
static CSV_FILE: Mutex<Option<(String, std::fs::File)>> = Mutex::new(None);
|
||||||
static QUIET: std::sync::atomic::AtomicBool = std::sync::atomic::AtomicBool::new(false);
|
static QUIET: std::sync::atomic::AtomicBool = std::sync::atomic::AtomicBool::new(false);
|
||||||
|
|
||||||
const HEADER: &str = "timestamp,host,port,protocol,direction,duration_s,tx_avg_mbps,rx_avg_mbps,tx_bytes,rx_bytes,lost_packets,local_cpu_pct,remote_cpu_pct,auth_type";
|
const HEADER: &str = "timestamp,host,port,protocol,direction,duration_s,tx_avg_mbps,rx_avg_mbps,tx_bytes,rx_bytes,lost_packets,local_cpu_pct,remote_cpu_pct,auth_type";
|
||||||
@@ -18,12 +18,12 @@ const HEADER: &str = "timestamp,host,port,protocol,direction,duration_s,tx_avg_m
|
|||||||
pub fn init(path: &str) -> std::io::Result<()> {
|
pub fn init(path: &str) -> std::io::Result<()> {
|
||||||
let needs_header = !Path::new(path).exists() || std::fs::metadata(path)?.len() == 0;
|
let needs_header = !Path::new(path).exists() || std::fs::metadata(path)?.len() == 0;
|
||||||
|
|
||||||
|
let mut f = OpenOptions::new().create(true).append(true).open(path)?;
|
||||||
if needs_header {
|
if needs_header {
|
||||||
let mut f = OpenOptions::new().create(true).write(true).open(path)?;
|
|
||||||
writeln!(f, "{}", HEADER)?;
|
writeln!(f, "{}", HEADER)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
*CSV_FILE.lock().unwrap() = Some(path.to_string());
|
*CSV_FILE.lock().unwrap() = Some((path.to_string(), f));
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -49,8 +49,8 @@ pub fn write_result(
|
|||||||
remote_cpu: u8,
|
remote_cpu: u8,
|
||||||
auth_type: &str,
|
auth_type: &str,
|
||||||
) {
|
) {
|
||||||
let guard = CSV_FILE.lock().unwrap();
|
let mut guard = CSV_FILE.lock().unwrap();
|
||||||
if let Some(ref path) = *guard {
|
if let Some((ref _path, ref mut file)) = *guard {
|
||||||
let tx_mbps = if duration_secs > 0 {
|
let tx_mbps = if duration_secs > 0 {
|
||||||
tx_bytes as f64 * 8.0 / duration_secs as f64 / 1_000_000.0
|
tx_bytes as f64 * 8.0 / duration_secs as f64 / 1_000_000.0
|
||||||
} else {
|
} else {
|
||||||
@@ -74,9 +74,8 @@ pub fn write_result(
|
|||||||
local_cpu, remote_cpu, auth_type,
|
local_cpu, remote_cpu, auth_type,
|
||||||
);
|
);
|
||||||
|
|
||||||
if let Ok(mut f) = OpenOptions::new().append(true).open(path) {
|
let _ = writeln!(file, "{}", row);
|
||||||
let _ = writeln!(f, "{}", row);
|
let _ = file.flush();
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
114
src/ecsrp5.rs
114
src/ecsrp5.rs
@@ -6,6 +6,8 @@
|
|||||||
//!
|
//!
|
||||||
//! btest framing: `[len:1][payload]` (no 0x06 handler byte, unlike Winbox).
|
//! btest framing: `[len:1][payload]` (no 0x06 handler byte, unlike Winbox).
|
||||||
|
|
||||||
|
use std::sync::LazyLock;
|
||||||
|
|
||||||
use num_bigint::BigUint;
|
use num_bigint::BigUint;
|
||||||
use num_integer::Integer;
|
use num_integer::Integer;
|
||||||
use num_traits::{One, Zero};
|
use num_traits::{One, Zero};
|
||||||
@@ -14,31 +16,33 @@ use tokio::io::{AsyncReadExt, AsyncWriteExt};
|
|||||||
|
|
||||||
use crate::protocol::{BtestError, Result};
|
use crate::protocol::{BtestError, Result};
|
||||||
|
|
||||||
// --- Curve25519 parameters in Weierstrass form ---
|
// --- Curve25519 parameters in Weierstrass form (cached, computed once) ---
|
||||||
|
|
||||||
fn p() -> BigUint {
|
static P: LazyLock<BigUint> = LazyLock::new(|| {
|
||||||
BigUint::parse_bytes(
|
BigUint::parse_bytes(
|
||||||
b"7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffed",
|
b"7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffed",
|
||||||
16,
|
16,
|
||||||
)
|
)
|
||||||
.unwrap()
|
.unwrap()
|
||||||
}
|
});
|
||||||
|
|
||||||
fn curve_order() -> BigUint {
|
static CURVE_ORDER: LazyLock<BigUint> = LazyLock::new(|| {
|
||||||
BigUint::parse_bytes(
|
BigUint::parse_bytes(
|
||||||
b"1000000000000000000000000000000014def9dea2f79cd65812631a5cf5d3ed",
|
b"1000000000000000000000000000000014def9dea2f79cd65812631a5cf5d3ed",
|
||||||
16,
|
16,
|
||||||
)
|
)
|
||||||
.unwrap()
|
.unwrap()
|
||||||
}
|
});
|
||||||
|
|
||||||
fn weierstrass_a() -> BigUint {
|
static WEIERSTRASS_A: LazyLock<BigUint> = LazyLock::new(|| {
|
||||||
BigUint::parse_bytes(
|
BigUint::parse_bytes(
|
||||||
b"2aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa984914a144",
|
b"2aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa984914a144",
|
||||||
16,
|
16,
|
||||||
)
|
)
|
||||||
.unwrap()
|
.unwrap()
|
||||||
}
|
});
|
||||||
|
|
||||||
|
pub static WCURVE: LazyLock<WCurve> = LazyLock::new(WCurve::new);
|
||||||
|
|
||||||
const MONT_A: u64 = 486662;
|
const MONT_A: u64 = 486662;
|
||||||
|
|
||||||
@@ -50,10 +54,10 @@ fn modinv(a: &BigUint, modulus: &BigUint) -> BigUint {
|
|||||||
a.modpow(&exp, modulus)
|
a.modpow(&exp, modulus)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn legendre_symbol(a: &BigUint, p_val: &BigUint) -> i32 {
|
fn legendre_symbol(a: &BigUint, p: &BigUint) -> i32 {
|
||||||
let exp = (p_val - BigUint::one()) / BigUint::from(2u32);
|
let exp = (p - BigUint::one()) / BigUint::from(2u32);
|
||||||
let l = a.modpow(&exp, p_val);
|
let l = a.modpow(&exp, p);
|
||||||
if l == p_val - BigUint::one() {
|
if l == p - BigUint::one() {
|
||||||
-1
|
-1
|
||||||
} else if l == BigUint::zero() {
|
} else if l == BigUint::zero() {
|
||||||
0
|
0
|
||||||
@@ -166,7 +170,7 @@ impl Point {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn add(&self, other: &Point) -> Point {
|
fn add(&self, other: &Point) -> Point {
|
||||||
let p_val = p();
|
let p_val = &*P;
|
||||||
if self.infinity {
|
if self.infinity {
|
||||||
return other.clone();
|
return other.clone();
|
||||||
}
|
}
|
||||||
@@ -179,44 +183,44 @@ impl Point {
|
|||||||
|
|
||||||
let lam = if self.x == other.x && self.y == other.y {
|
let lam = if self.x == other.x && self.y == other.y {
|
||||||
// Point doubling
|
// Point doubling
|
||||||
let three_x_sq = (BigUint::from(3u32) * &self.x * &self.x + &weierstrass_a()) % &p_val;
|
let three_x_sq = (BigUint::from(3u32) * &self.x * &self.x + &*WEIERSTRASS_A) % p_val;
|
||||||
let two_y = (BigUint::from(2u32) * &self.y) % &p_val;
|
let two_y = (BigUint::from(2u32) * &self.y) % p_val;
|
||||||
(three_x_sq * modinv(&two_y, &p_val)) % &p_val
|
(three_x_sq * modinv(&two_y, p_val)) % p_val
|
||||||
} else {
|
} else {
|
||||||
// Point addition
|
// Point addition
|
||||||
let dy = if other.y >= self.y {
|
let dy = if other.y >= self.y {
|
||||||
(&other.y - &self.y) % &p_val
|
(&other.y - &self.y) % p_val
|
||||||
} else {
|
} else {
|
||||||
(&p_val - (&self.y - &other.y) % &p_val) % &p_val
|
(p_val - (&self.y - &other.y) % p_val) % p_val
|
||||||
};
|
};
|
||||||
let dx = if other.x >= self.x {
|
let dx = if other.x >= self.x {
|
||||||
(&other.x - &self.x) % &p_val
|
(&other.x - &self.x) % p_val
|
||||||
} else {
|
} else {
|
||||||
(&p_val - (&self.x - &other.x) % &p_val) % &p_val
|
(p_val - (&self.x - &other.x) % p_val) % p_val
|
||||||
};
|
};
|
||||||
(dy * modinv(&dx, &p_val)) % &p_val
|
(dy * modinv(&dx, p_val)) % p_val
|
||||||
};
|
};
|
||||||
|
|
||||||
let x3 = {
|
let x3 = {
|
||||||
let lam_sq = (&lam * &lam) % &p_val;
|
let lam_sq = (&lam * &lam) % p_val;
|
||||||
let sum_x = (&self.x + &other.x) % &p_val;
|
let sum_x = (&self.x + &other.x) % p_val;
|
||||||
if lam_sq >= sum_x {
|
if lam_sq >= sum_x {
|
||||||
(lam_sq - sum_x) % &p_val
|
(lam_sq - sum_x) % p_val
|
||||||
} else {
|
} else {
|
||||||
(&p_val - (sum_x - lam_sq) % &p_val) % &p_val
|
(p_val - (sum_x - lam_sq) % p_val) % p_val
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
let y3 = {
|
let y3 = {
|
||||||
let dx = if self.x >= x3 {
|
let dx = if self.x >= x3 {
|
||||||
(&self.x - &x3) % &p_val
|
(&self.x - &x3) % p_val
|
||||||
} else {
|
} else {
|
||||||
(&p_val - (&x3 - &self.x) % &p_val) % &p_val
|
(p_val - (&x3 - &self.x) % p_val) % p_val
|
||||||
};
|
};
|
||||||
let prod = (&lam * dx) % &p_val;
|
let prod = (&lam * dx) % p_val;
|
||||||
if prod >= self.y {
|
if prod >= self.y {
|
||||||
(prod - &self.y) % &p_val
|
(prod - &self.y) % p_val
|
||||||
} else {
|
} else {
|
||||||
(&p_val - (&self.y - prod) % &p_val) % &p_val
|
(p_val - (&self.y - prod) % p_val) % p_val
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -226,14 +230,13 @@ impl Point {
|
|||||||
fn scalar_mul(&self, scalar: &BigUint) -> Point {
|
fn scalar_mul(&self, scalar: &BigUint) -> Point {
|
||||||
let mut result = Point::infinity();
|
let mut result = Point::infinity();
|
||||||
let mut base = self.clone();
|
let mut base = self.clone();
|
||||||
let mut k = scalar.clone();
|
let bits = scalar.bits();
|
||||||
|
|
||||||
while !k.is_zero() {
|
for i in 0..bits {
|
||||||
if &k & &BigUint::one() == BigUint::one() {
|
if scalar.bit(i) {
|
||||||
result = result.add(&base);
|
result = result.add(&base);
|
||||||
}
|
}
|
||||||
base = base.add(&base);
|
base = base.add(&base);
|
||||||
k >>= 1;
|
|
||||||
}
|
}
|
||||||
result
|
result
|
||||||
}
|
}
|
||||||
@@ -241,19 +244,19 @@ impl Point {
|
|||||||
|
|
||||||
// --- WCurve: Curve25519 in Weierstrass form ---
|
// --- WCurve: Curve25519 in Weierstrass form ---
|
||||||
|
|
||||||
struct WCurve {
|
pub struct WCurve {
|
||||||
g: Point,
|
g: Point,
|
||||||
conversion_from_m: BigUint,
|
conversion_from_m: BigUint,
|
||||||
conversion_to_m: BigUint,
|
conversion_to_m: BigUint,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl WCurve {
|
impl WCurve {
|
||||||
fn new() -> Self {
|
pub fn new() -> Self {
|
||||||
let p_val = p();
|
let p_val = &*P;
|
||||||
let mont_a = BigUint::from(MONT_A);
|
let mont_a = BigUint::from(MONT_A);
|
||||||
let three_inv = modinv(&BigUint::from(3u32), &p_val);
|
let three_inv = modinv(&BigUint::from(3u32), p_val);
|
||||||
let conversion_from_m = (&mont_a * &three_inv) % &p_val;
|
let conversion_from_m = (&mont_a * &three_inv) % p_val;
|
||||||
let conversion_to_m = (&p_val - &conversion_from_m) % &p_val;
|
let conversion_to_m = (p_val - &conversion_from_m) % p_val;
|
||||||
|
|
||||||
let mut curve = WCurve {
|
let mut curve = WCurve {
|
||||||
g: Point::infinity(),
|
g: Point::infinity(),
|
||||||
@@ -265,8 +268,8 @@ impl WCurve {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn to_montgomery(&self, pt: &Point) -> ([u8; 32], u8) {
|
fn to_montgomery(&self, pt: &Point) -> ([u8; 32], u8) {
|
||||||
let p_val = p();
|
let p_val = &*P;
|
||||||
let x = (&pt.x + &self.conversion_to_m) % &p_val;
|
let x = (&pt.x + &self.conversion_to_m) % p_val;
|
||||||
let parity = if pt.y.bit(0) { 1u8 } else { 0u8 };
|
let parity = if pt.y.bit(0) { 1u8 } else { 0u8 };
|
||||||
let mut bytes = [0u8; 32];
|
let mut bytes = [0u8; 32];
|
||||||
let x_bytes = x.to_bytes_be();
|
let x_bytes = x.to_bytes_be();
|
||||||
@@ -276,14 +279,14 @@ impl WCurve {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn lift_x(&self, x_mont: &BigUint, parity: bool) -> Point {
|
fn lift_x(&self, x_mont: &BigUint, parity: bool) -> Point {
|
||||||
let p_val = p();
|
let p_val = &*P;
|
||||||
let x = x_mont % &p_val;
|
let x = x_mont % p_val;
|
||||||
// y^2 = x^3 + Ax^2 + x (Montgomery)
|
// y^2 = x^3 + Ax^2 + x (Montgomery)
|
||||||
let y_squared = (&x * &x * &x + BigUint::from(MONT_A) * &x * &x + &x) % &p_val;
|
let y_squared = (&x * &x * &x + BigUint::from(MONT_A) * &x * &x + &x) % p_val;
|
||||||
// Convert x to Weierstrass
|
// Convert x to Weierstrass
|
||||||
let x_w = (&x + &self.conversion_from_m) % &p_val;
|
let x_w = (&x + &self.conversion_from_m) % p_val;
|
||||||
|
|
||||||
if let Some((y1, y2)) = prime_mod_sqrt(&y_squared, &p_val) {
|
if let Some((y1, y2)) = prime_mod_sqrt(&y_squared, p_val) {
|
||||||
let pt1 = Point::new(x_w.clone(), y1);
|
let pt1 = Point::new(x_w.clone(), y1);
|
||||||
let pt2 = Point::new(x_w, y2);
|
let pt2 = Point::new(x_w, y2);
|
||||||
if parity {
|
if parity {
|
||||||
@@ -323,7 +326,7 @@ impl WCurve {
|
|||||||
password: &str,
|
password: &str,
|
||||||
salt: &[u8; 16],
|
salt: &[u8; 16],
|
||||||
) -> [u8; 32] {
|
) -> [u8; 32] {
|
||||||
let inner = sha256_bytes(&format!("{}:{}", username, password).as_bytes().to_vec());
|
let inner = sha256_bytes(format!("{}:{}", username, password).as_bytes());
|
||||||
let mut input = Vec::with_capacity(16 + 32);
|
let mut input = Vec::with_capacity(16 + 32);
|
||||||
input.extend_from_slice(salt);
|
input.extend_from_slice(salt);
|
||||||
input.extend_from_slice(&inner);
|
input.extend_from_slice(&inner);
|
||||||
@@ -359,7 +362,7 @@ pub async fn client_authenticate<S: AsyncReadExt + AsyncWriteExt + Unpin>(
|
|||||||
password: &str,
|
password: &str,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
tracing::info!("Starting EC-SRP5 authentication");
|
tracing::info!("Starting EC-SRP5 authentication");
|
||||||
let w = WCurve::new();
|
let w = &*WCURVE;
|
||||||
|
|
||||||
// Generate client ephemeral keypair
|
// Generate client ephemeral keypair
|
||||||
let s_a: [u8; 32] = rand::random();
|
let s_a: [u8; 32] = rand::random();
|
||||||
@@ -415,8 +418,8 @@ pub async fn client_authenticate<S: AsyncReadExt + AsyncWriteExt + Unpin>(
|
|||||||
let i_int = BigUint::from_bytes_be(&i);
|
let i_int = BigUint::from_bytes_be(&i);
|
||||||
let j_int = BigUint::from_bytes_be(&j);
|
let j_int = BigUint::from_bytes_be(&j);
|
||||||
let s_a_int = BigUint::from_bytes_be(&s_a);
|
let s_a_int = BigUint::from_bytes_be(&s_a);
|
||||||
let order = curve_order();
|
let order = &*CURVE_ORDER;
|
||||||
let scalar = ((&i_int * &j_int) + &s_a_int) % ℴ
|
let scalar = ((&i_int * &j_int) + &s_a_int) % order;
|
||||||
|
|
||||||
let z_point = w_b_unblinded.scalar_mul(&scalar);
|
let z_point = w_b_unblinded.scalar_mul(&scalar);
|
||||||
let (z, _) = w.to_montgomery(&z_point);
|
let (z, _) = w.to_montgomery(&z_point);
|
||||||
@@ -476,7 +479,7 @@ impl EcSrp5Credentials {
|
|||||||
/// Derive EC-SRP5 credentials from username/password (done once at startup).
|
/// Derive EC-SRP5 credentials from username/password (done once at startup).
|
||||||
pub fn derive(username: &str, password: &str) -> Self {
|
pub fn derive(username: &str, password: &str) -> Self {
|
||||||
let salt: [u8; 16] = rand::random();
|
let salt: [u8; 16] = rand::random();
|
||||||
let w = WCurve::new();
|
let w = &*WCURVE;
|
||||||
let i = w.gen_password_validator_priv(username, password, &salt);
|
let i = w.gen_password_validator_priv(username, password, &salt);
|
||||||
let (x_gamma, parity) = w.gen_public_key(&i);
|
let (x_gamma, parity) = w.gen_public_key(&i);
|
||||||
Self {
|
Self {
|
||||||
@@ -495,7 +498,7 @@ pub async fn server_authenticate<S: AsyncReadExt + AsyncWriteExt + Unpin>(
|
|||||||
creds: &EcSrp5Credentials,
|
creds: &EcSrp5Credentials,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
tracing::info!("Starting EC-SRP5 server authentication");
|
tracing::info!("Starting EC-SRP5 server authentication");
|
||||||
let w = WCurve::new();
|
let w = &*WCURVE;
|
||||||
|
|
||||||
// MSG1: read [len][username\0][pubkey:32][parity:1]
|
// MSG1: read [len][username\0][pubkey:32][parity:1]
|
||||||
let mut len_buf = [0u8; 1];
|
let mut len_buf = [0u8; 1];
|
||||||
@@ -598,7 +601,12 @@ pub async fn server_authenticate<S: AsyncReadExt + AsyncWriteExt + Unpin>(
|
|||||||
|
|
||||||
mod hex {
|
mod hex {
|
||||||
pub fn encode(data: &[u8]) -> String {
|
pub fn encode(data: &[u8]) -> String {
|
||||||
data.iter().map(|b| format!("{:02x}", b)).collect()
|
let mut s = String::with_capacity(data.len() * 2);
|
||||||
|
for b in data {
|
||||||
|
use std::fmt::Write;
|
||||||
|
let _ = write!(s, "{:02x}", b);
|
||||||
|
}
|
||||||
|
s
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
163
src/server.rs
163
src/server.rs
@@ -4,6 +4,8 @@ use std::sync::atomic::Ordering;
|
|||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::time::{Duration, Instant};
|
use std::time::{Duration, Instant};
|
||||||
|
|
||||||
|
use tokio::sync::Notify;
|
||||||
|
|
||||||
use tokio::io::{AsyncReadExt, AsyncWriteExt};
|
use tokio::io::{AsyncReadExt, AsyncWriteExt};
|
||||||
use tokio::net::{TcpListener, TcpStream, UdpSocket};
|
use tokio::net::{TcpListener, TcpStream, UdpSocket};
|
||||||
use tokio::sync::Mutex;
|
use tokio::sync::Mutex;
|
||||||
@@ -18,6 +20,7 @@ struct TcpSession {
|
|||||||
peer_ip: std::net::IpAddr,
|
peer_ip: std::net::IpAddr,
|
||||||
streams: Vec<TcpStream>,
|
streams: Vec<TcpStream>,
|
||||||
expected: u8,
|
expected: u8,
|
||||||
|
notify: Arc<Notify>,
|
||||||
}
|
}
|
||||||
|
|
||||||
type SessionMap = Arc<Mutex<HashMap<u16, TcpSession>>>;
|
type SessionMap = Arc<Mutex<HashMap<u16, TcpSession>>>;
|
||||||
@@ -135,6 +138,11 @@ async fn handle_client(
|
|||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
stream.set_nodelay(true)?;
|
stream.set_nodelay(true)?;
|
||||||
|
|
||||||
|
// Set TCP socket buffers to 4MB (matching UDP path) for high throughput
|
||||||
|
let sock_ref = socket2::SockRef::from(&stream);
|
||||||
|
let _ = sock_ref.set_send_buffer_size(4 * 1024 * 1024);
|
||||||
|
let _ = sock_ref.set_recv_buffer_size(4 * 1024 * 1024);
|
||||||
|
|
||||||
send_hello(&mut stream).await?;
|
send_hello(&mut stream).await?;
|
||||||
|
|
||||||
// Read 16-byte command (or whatever the client sends)
|
// Read 16-byte command (or whatever the client sends)
|
||||||
@@ -164,6 +172,7 @@ async fn handle_client(
|
|||||||
stream.flush().await?;
|
stream.flush().await?;
|
||||||
|
|
||||||
session.streams.push(stream);
|
session.streams.push(stream);
|
||||||
|
session.notify.notify_one();
|
||||||
tracing::info!(
|
tracing::info!(
|
||||||
"Secondary connection joined ({}/{})",
|
"Secondary connection joined ({}/{})",
|
||||||
session.streams.len() + 1,
|
session.streams.len() + 1,
|
||||||
@@ -244,6 +253,7 @@ async fn handle_client(
|
|||||||
for (_t, s) in map.iter_mut() {
|
for (_t, s) in map.iter_mut() {
|
||||||
if s.peer_ip == peer.ip() && s.streams.len() < s.expected as usize {
|
if s.peer_ip == peer.ip() && s.streams.len() < s.expected as usize {
|
||||||
s.streams.push(stream);
|
s.streams.push(stream);
|
||||||
|
s.notify.notify_one();
|
||||||
return Ok(());
|
return Ok(());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -294,12 +304,14 @@ async fn handle_client(
|
|||||||
let conn_count = cmd.tcp_conn_count;
|
let conn_count = cmd.tcp_conn_count;
|
||||||
|
|
||||||
// Register session for secondary connections to find
|
// Register session for secondary connections to find
|
||||||
|
let notify = Arc::new(Notify::new());
|
||||||
{
|
{
|
||||||
let mut map = sessions.lock().await;
|
let mut map = sessions.lock().await;
|
||||||
map.insert(session_token, TcpSession {
|
map.insert(session_token, TcpSession {
|
||||||
peer_ip: peer.ip(),
|
peer_ip: peer.ip(),
|
||||||
streams: Vec::new(),
|
streams: Vec::new(),
|
||||||
expected: conn_count,
|
expected: conn_count,
|
||||||
|
notify: notify.clone(),
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -315,7 +327,8 @@ async fn handle_client(
|
|||||||
if count + 1 >= conn_count as usize {
|
if count + 1 >= conn_count as usize {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
if Instant::now() > deadline {
|
let now = Instant::now();
|
||||||
|
if now >= deadline {
|
||||||
tracing::warn!(
|
tracing::warn!(
|
||||||
"Timeout waiting for TCP connections ({}/{}), proceeding",
|
"Timeout waiting for TCP connections ({}/{}), proceeding",
|
||||||
count + 1,
|
count + 1,
|
||||||
@@ -323,7 +336,17 @@ async fn handle_client(
|
|||||||
);
|
);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
tokio::time::sleep(Duration::from_millis(100)).await;
|
match tokio::time::timeout(deadline - now, notify.notified()).await {
|
||||||
|
Ok(()) => continue,
|
||||||
|
Err(_) => {
|
||||||
|
tracing::warn!(
|
||||||
|
"Timeout waiting for TCP connections ({}/{}), proceeding",
|
||||||
|
count + 1,
|
||||||
|
conn_count,
|
||||||
|
);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let extra_streams = {
|
let extra_streams = {
|
||||||
@@ -366,8 +389,43 @@ async fn handle_client(
|
|||||||
|
|
||||||
// --- TCP Test Server ---
|
// --- TCP Test Server ---
|
||||||
|
|
||||||
|
/// Public TX task for multi-connection use by server_pro.
|
||||||
|
#[cfg(feature = "pro")]
|
||||||
|
pub async fn tcp_tx_task(
|
||||||
|
writer: tokio::net::tcp::OwnedWriteHalf,
|
||||||
|
tx_size: usize,
|
||||||
|
tx_speed: u32,
|
||||||
|
state: Arc<BandwidthState>,
|
||||||
|
) {
|
||||||
|
tcp_tx_loop(writer, tx_size, tx_speed, state).await;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Public RX task for multi-connection use by server_pro.
|
||||||
|
#[cfg(feature = "pro")]
|
||||||
|
pub async fn tcp_rx_task(
|
||||||
|
reader: tokio::net::tcp::OwnedReadHalf,
|
||||||
|
state: Arc<BandwidthState>,
|
||||||
|
) {
|
||||||
|
tcp_rx_loop(reader, state).await;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Run a TCP bandwidth test on an already-authenticated stream.
|
||||||
|
/// Public API for use by server_pro.
|
||||||
|
#[cfg(feature = "pro")]
|
||||||
|
pub async fn run_tcp_test(
|
||||||
|
stream: TcpStream,
|
||||||
|
cmd: Command,
|
||||||
|
state: Arc<BandwidthState>,
|
||||||
|
) -> Result<(u64, u64, u64, u32)> {
|
||||||
|
run_tcp_test_inner(stream, cmd, state).await
|
||||||
|
}
|
||||||
|
|
||||||
async fn run_tcp_test_server(stream: TcpStream, cmd: Command) -> Result<(u64, u64, u64, u32)> {
|
async fn run_tcp_test_server(stream: TcpStream, cmd: Command) -> Result<(u64, u64, u64, u32)> {
|
||||||
let state = BandwidthState::new();
|
let state = BandwidthState::new();
|
||||||
|
run_tcp_test_inner(stream, cmd, state).await
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn run_tcp_test_inner(stream: TcpStream, cmd: Command, state: Arc<BandwidthState>) -> Result<(u64, u64, u64, u32)> {
|
||||||
let tx_size = cmd.tx_size as usize;
|
let tx_size = cmd.tx_size as usize;
|
||||||
let server_should_tx = cmd.server_tx();
|
let server_should_tx = cmd.server_tx();
|
||||||
let server_should_rx = cmd.server_rx();
|
let server_should_rx = cmd.server_rx();
|
||||||
@@ -437,9 +495,23 @@ async fn run_tcp_test_server(stream: TcpStream, cmd: Command) -> Result<(u64, u6
|
|||||||
Ok(state.summary())
|
Ok(state.summary())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Public API for multi-connection TCP test with external state. Used by server_pro.
|
||||||
|
#[cfg(feature = "pro")]
|
||||||
|
pub async fn run_tcp_multiconn_test(
|
||||||
|
streams: Vec<TcpStream>,
|
||||||
|
cmd: Command,
|
||||||
|
state: Arc<BandwidthState>,
|
||||||
|
) -> Result<(u64, u64, u64, u32)> {
|
||||||
|
run_tcp_multiconn_inner(streams, cmd, state).await
|
||||||
|
}
|
||||||
|
|
||||||
/// TCP multi-connection.
|
/// TCP multi-connection.
|
||||||
async fn run_tcp_multiconn_server(streams: Vec<TcpStream>, cmd: Command) -> Result<(u64, u64, u64, u32)> {
|
async fn run_tcp_multiconn_server(streams: Vec<TcpStream>, cmd: Command) -> Result<(u64, u64, u64, u32)> {
|
||||||
let state = BandwidthState::new();
|
let state = BandwidthState::new();
|
||||||
|
run_tcp_multiconn_inner(streams, cmd, state).await
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn run_tcp_multiconn_inner(streams: Vec<TcpStream>, cmd: Command, state: Arc<BandwidthState>) -> Result<(u64, u64, u64, u32)> {
|
||||||
let tx_size = cmd.tx_size as usize;
|
let tx_size = cmd.tx_size as usize;
|
||||||
let server_should_tx = cmd.server_tx();
|
let server_should_tx = cmd.server_tx();
|
||||||
let server_should_rx = cmd.server_rx();
|
let server_should_rx = cmd.server_rx();
|
||||||
@@ -526,15 +598,19 @@ async fn tcp_tx_loop_inner(
|
|||||||
) {
|
) {
|
||||||
tokio::time::sleep(Duration::from_millis(100)).await;
|
tokio::time::sleep(Duration::from_millis(100)).await;
|
||||||
|
|
||||||
let packet = vec![0u8; tx_size];
|
|
||||||
let mut interval = bandwidth::calc_send_interval(tx_speed, tx_size as u16);
|
let mut interval = bandwidth::calc_send_interval(tx_speed, tx_size as u16);
|
||||||
|
// Use larger writes when running unlimited to reduce syscall overhead
|
||||||
|
let effective_size = if interval.is_none() { tx_size.max(256 * 1024) } else { tx_size };
|
||||||
|
let packet = vec![0u8; effective_size];
|
||||||
let mut next_send = Instant::now();
|
let mut next_send = Instant::now();
|
||||||
let mut next_status = Instant::now() + Duration::from_secs(1);
|
let mut next_status = Instant::now() + Duration::from_secs(1);
|
||||||
let mut status_seq: u32 = 0;
|
let mut status_seq: u32 = 0;
|
||||||
|
|
||||||
while state.running.load(Ordering::Relaxed) {
|
while state.running.load(Ordering::Relaxed) {
|
||||||
|
let now = Instant::now();
|
||||||
|
|
||||||
// Inject status message every ~1 second if in bidirectional mode
|
// Inject status message every ~1 second if in bidirectional mode
|
||||||
if send_status && Instant::now() >= next_status {
|
if send_status && now >= next_status {
|
||||||
status_seq += 1;
|
status_seq += 1;
|
||||||
let rx_bytes = state.rx_bytes.swap(0, Ordering::Relaxed);
|
let rx_bytes = state.rx_bytes.swap(0, Ordering::Relaxed);
|
||||||
let status = StatusMessage { cpu_load: crate::cpu::get(),
|
let status = StatusMessage { cpu_load: crate::cpu::get(),
|
||||||
@@ -547,28 +623,29 @@ async fn tcp_tx_loop_inner(
|
|||||||
}
|
}
|
||||||
state.record_interval(0, rx_bytes, 0);
|
state.record_interval(0, rx_bytes, 0);
|
||||||
bandwidth::print_status(status_seq, "RX", rx_bytes, Duration::from_secs(1), None);
|
bandwidth::print_status(status_seq, "RX", rx_bytes, Duration::from_secs(1), None);
|
||||||
next_status = Instant::now() + Duration::from_secs(1);
|
next_status = now + Duration::from_secs(1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if !state.spend_budget(effective_size as u64) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
if writer.write_all(&packet).await.is_err() {
|
if writer.write_all(&packet).await.is_err() {
|
||||||
state.running.store(false, Ordering::SeqCst);
|
state.running.store(false, Ordering::SeqCst);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
state.tx_bytes.fetch_add(tx_size as u64, Ordering::Relaxed);
|
state.tx_bytes.fetch_add(effective_size as u64, Ordering::Relaxed);
|
||||||
|
|
||||||
if state.tx_speed_changed.load(Ordering::Relaxed) {
|
if state.tx_speed_changed.load(Ordering::Relaxed) {
|
||||||
state.tx_speed_changed.store(false, Ordering::Relaxed);
|
state.tx_speed_changed.store(false, Ordering::Relaxed);
|
||||||
let new_speed = state.tx_speed.load(Ordering::Relaxed);
|
let new_speed = state.tx_speed.load(Ordering::Relaxed);
|
||||||
interval = bandwidth::calc_send_interval(new_speed, tx_size as u16);
|
interval = bandwidth::calc_send_interval(new_speed, tx_size as u16);
|
||||||
next_send = Instant::now();
|
next_send = now;
|
||||||
}
|
}
|
||||||
|
|
||||||
match interval {
|
match interval {
|
||||||
Some(iv) => {
|
Some(iv) => {
|
||||||
next_send += iv;
|
if let Some(delay) = bandwidth::advance_next_send(&mut next_send, iv, now) {
|
||||||
let now = Instant::now();
|
tokio::time::sleep(delay).await;
|
||||||
if next_send > now {
|
|
||||||
tokio::time::sleep(next_send - now).await;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
None => {
|
None => {
|
||||||
@@ -579,7 +656,7 @@ async fn tcp_tx_loop_inner(
|
|||||||
}
|
}
|
||||||
|
|
||||||
async fn tcp_rx_loop(mut reader: tokio::net::tcp::OwnedReadHalf, state: Arc<BandwidthState>) {
|
async fn tcp_rx_loop(mut reader: tokio::net::tcp::OwnedReadHalf, state: Arc<BandwidthState>) {
|
||||||
let mut buf = vec![0u8; 65536];
|
let mut buf = vec![0u8; 256 * 1024];
|
||||||
while state.running.load(Ordering::Relaxed) {
|
while state.running.load(Ordering::Relaxed) {
|
||||||
match reader.read(&mut buf).await {
|
match reader.read(&mut buf).await {
|
||||||
Ok(0) | Err(_) => {
|
Ok(0) | Err(_) => {
|
||||||
@@ -587,6 +664,9 @@ async fn tcp_rx_loop(mut reader: tokio::net::tcp::OwnedReadHalf, state: Arc<Band
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
Ok(n) => {
|
Ok(n) => {
|
||||||
|
if !state.spend_budget(n as u64) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
state.rx_bytes.fetch_add(n as u64, Ordering::Relaxed);
|
state.rx_bytes.fetch_add(n as u64, Ordering::Relaxed);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -634,6 +714,19 @@ async fn tcp_status_sender(
|
|||||||
|
|
||||||
// --- UDP Test Server ---
|
// --- UDP Test Server ---
|
||||||
|
|
||||||
|
/// Run a UDP bandwidth test on an already-authenticated stream.
|
||||||
|
/// Public API for use by server_pro. Caller provides the UDP port offset.
|
||||||
|
#[cfg(feature = "pro")]
|
||||||
|
pub async fn run_udp_test(
|
||||||
|
stream: &mut TcpStream,
|
||||||
|
peer: SocketAddr,
|
||||||
|
cmd: &Command,
|
||||||
|
state: Arc<BandwidthState>,
|
||||||
|
udp_port_start: u16,
|
||||||
|
) -> Result<(u64, u64, u64, u32)> {
|
||||||
|
run_udp_test_inner(stream, peer, cmd, state, udp_port_start).await
|
||||||
|
}
|
||||||
|
|
||||||
async fn run_udp_test_server(
|
async fn run_udp_test_server(
|
||||||
stream: &mut TcpStream,
|
stream: &mut TcpStream,
|
||||||
peer: SocketAddr,
|
peer: SocketAddr,
|
||||||
@@ -641,7 +734,17 @@ async fn run_udp_test_server(
|
|||||||
udp_port_offset: Arc<std::sync::atomic::AtomicU16>,
|
udp_port_offset: Arc<std::sync::atomic::AtomicU16>,
|
||||||
) -> Result<(u64, u64, u64, u32)> {
|
) -> Result<(u64, u64, u64, u32)> {
|
||||||
let offset = udp_port_offset.fetch_add(1, Ordering::SeqCst);
|
let offset = udp_port_offset.fetch_add(1, Ordering::SeqCst);
|
||||||
let server_udp_port = BTEST_UDP_PORT_START + offset;
|
let state = BandwidthState::new();
|
||||||
|
run_udp_test_inner(stream, peer, cmd, state, BTEST_UDP_PORT_START + offset).await
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn run_udp_test_inner(
|
||||||
|
stream: &mut TcpStream,
|
||||||
|
peer: SocketAddr,
|
||||||
|
cmd: &Command,
|
||||||
|
state: Arc<BandwidthState>,
|
||||||
|
server_udp_port: u16,
|
||||||
|
) -> Result<(u64, u64, u64, u32)> {
|
||||||
let client_udp_port = server_udp_port + BTEST_PORT_CLIENT_OFFSET;
|
let client_udp_port = server_udp_port + BTEST_PORT_CLIENT_OFFSET;
|
||||||
|
|
||||||
stream.write_all(&server_udp_port.to_be_bytes()).await?;
|
stream.write_all(&server_udp_port.to_be_bytes()).await?;
|
||||||
@@ -708,7 +811,6 @@ async fn run_udp_test_server(
|
|||||||
if use_unconnected { "unconnected" } else { "connected" },
|
if use_unconnected { "unconnected" } else { "connected" },
|
||||||
);
|
);
|
||||||
|
|
||||||
let state = BandwidthState::new();
|
|
||||||
let tx_size = cmd.tx_size as usize;
|
let tx_size = cmd.tx_size as usize;
|
||||||
let server_should_tx = cmd.server_tx();
|
let server_should_tx = cmd.server_tx();
|
||||||
let server_should_rx = cmd.server_rx();
|
let server_should_rx = cmd.server_rx();
|
||||||
@@ -762,6 +864,10 @@ async fn udp_tx_loop(
|
|||||||
let mut consecutive_errors: u32 = 0;
|
let mut consecutive_errors: u32 = 0;
|
||||||
|
|
||||||
while state.running.load(Ordering::Relaxed) {
|
while state.running.load(Ordering::Relaxed) {
|
||||||
|
if !state.spend_budget(tx_size as u64) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
packet[0..4].copy_from_slice(&seq.to_be_bytes());
|
packet[0..4].copy_from_slice(&seq.to_be_bytes());
|
||||||
|
|
||||||
let result = if multi_conn {
|
let result = if multi_conn {
|
||||||
@@ -805,10 +911,9 @@ async fn udp_tx_loop(
|
|||||||
|
|
||||||
match interval {
|
match interval {
|
||||||
Some(iv) => {
|
Some(iv) => {
|
||||||
next_send += iv;
|
|
||||||
let now = Instant::now();
|
let now = Instant::now();
|
||||||
if next_send > now {
|
if let Some(delay) = bandwidth::advance_next_send(&mut next_send, iv, now) {
|
||||||
tokio::time::sleep(next_send - now).await;
|
tokio::time::sleep(delay).await;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
None => {
|
None => {
|
||||||
@@ -832,12 +937,18 @@ async fn udp_tx_loop(
|
|||||||
async fn udp_rx_loop(socket: &UdpSocket, state: Arc<BandwidthState>) {
|
async fn udp_rx_loop(socket: &UdpSocket, state: Arc<BandwidthState>) {
|
||||||
let mut buf = vec![0u8; 65536];
|
let mut buf = vec![0u8; 65536];
|
||||||
let mut last_seq: Option<u32> = None;
|
let mut last_seq: Option<u32> = None;
|
||||||
|
let mut timeout = tokio::time::sleep(Duration::from_secs(5));
|
||||||
|
tokio::pin!(timeout);
|
||||||
|
|
||||||
while state.running.load(Ordering::Relaxed) {
|
while state.running.load(Ordering::Relaxed) {
|
||||||
// Use recv_from to accept packets from any source port
|
tokio::select! {
|
||||||
// (multi-connection MikroTik sends from multiple ports)
|
biased;
|
||||||
match tokio::time::timeout(Duration::from_secs(5), socket.recv_from(&mut buf)).await {
|
res = socket.recv_from(&mut buf) => {
|
||||||
Ok(Ok((n, _src))) if n >= 4 => {
|
match res {
|
||||||
|
Ok((n, _src)) if n >= 4 => {
|
||||||
|
if !state.spend_budget(n as u64) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
state.rx_bytes.fetch_add(n as u64, Ordering::Relaxed);
|
state.rx_bytes.fetch_add(n as u64, Ordering::Relaxed);
|
||||||
state.rx_packets.fetch_add(1, Ordering::Relaxed);
|
state.rx_packets.fetch_add(1, Ordering::Relaxed);
|
||||||
|
|
||||||
@@ -852,13 +963,17 @@ async fn udp_rx_loop(socket: &UdpSocket, state: Arc<BandwidthState>) {
|
|||||||
last_seq = Some(seq);
|
last_seq = Some(seq);
|
||||||
state.last_udp_seq.store(seq, Ordering::Relaxed);
|
state.last_udp_seq.store(seq, Ordering::Relaxed);
|
||||||
}
|
}
|
||||||
Ok(Ok(_)) => {}
|
Ok(_) => {}
|
||||||
Ok(Err(e)) => {
|
Err(e) => {
|
||||||
tracing::debug!("UDP recv error: {}", e);
|
tracing::debug!("UDP recv error: {}", e);
|
||||||
tokio::time::sleep(Duration::from_millis(10)).await;
|
tokio::time::sleep(Duration::from_millis(10)).await;
|
||||||
}
|
}
|
||||||
Err(_) => {
|
}
|
||||||
|
timeout.as_mut().reset(tokio::time::Instant::now() + Duration::from_secs(5));
|
||||||
|
}
|
||||||
|
_ = &mut timeout => {
|
||||||
tracing::debug!("UDP RX timeout");
|
tracing::debug!("UDP RX timeout");
|
||||||
|
timeout.as_mut().reset(tokio::time::Instant::now() + Duration::from_secs(5));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
411
src/server_pro/enforcer.rs
Normal file
411
src/server_pro/enforcer.rs
Normal file
@@ -0,0 +1,411 @@
|
|||||||
|
//! Mid-session quota enforcement.
|
||||||
|
//!
|
||||||
|
//! Runs alongside a bandwidth test, periodically checking if the user
|
||||||
|
//! or IP has exceeded their quota. Terminates the test if so.
|
||||||
|
|
||||||
|
use std::net::IpAddr;
|
||||||
|
use std::sync::atomic::Ordering;
|
||||||
|
use std::sync::Arc;
|
||||||
|
use std::time::{Duration, Instant};
|
||||||
|
|
||||||
|
use btest_rs::bandwidth::BandwidthState;
|
||||||
|
|
||||||
|
use super::quota::{Direction, QuotaError, QuotaManager};
|
||||||
|
|
||||||
|
/// Enforces quotas during an active test session.
|
||||||
|
/// Call `run()` as a spawned task — it will set `state.running = false`
|
||||||
|
/// when a quota is exceeded or max_duration is reached.
|
||||||
|
pub struct QuotaEnforcer {
|
||||||
|
quota_mgr: QuotaManager,
|
||||||
|
username: String,
|
||||||
|
ip: IpAddr,
|
||||||
|
state: Arc<BandwidthState>,
|
||||||
|
check_interval: Duration,
|
||||||
|
max_duration: Duration,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, PartialEq)]
|
||||||
|
pub enum StopReason {
|
||||||
|
/// Test still running (not stopped)
|
||||||
|
Running,
|
||||||
|
/// Max duration reached
|
||||||
|
MaxDuration,
|
||||||
|
/// User daily quota exceeded
|
||||||
|
UserDailyQuota,
|
||||||
|
/// User weekly quota exceeded
|
||||||
|
UserWeeklyQuota,
|
||||||
|
/// User monthly quota exceeded
|
||||||
|
UserMonthlyQuota,
|
||||||
|
/// IP daily quota exceeded
|
||||||
|
IpDailyQuota,
|
||||||
|
/// IP weekly quota exceeded
|
||||||
|
IpWeeklyQuota,
|
||||||
|
/// IP monthly quota exceeded
|
||||||
|
IpMonthlyQuota,
|
||||||
|
/// Client disconnected normally
|
||||||
|
ClientDisconnected,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl std::fmt::Display for StopReason {
|
||||||
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
|
match self {
|
||||||
|
Self::Running => write!(f, "running"),
|
||||||
|
Self::MaxDuration => write!(f, "max_duration_reached"),
|
||||||
|
Self::UserDailyQuota => write!(f, "user_daily_quota_exceeded"),
|
||||||
|
Self::UserWeeklyQuota => write!(f, "user_weekly_quota_exceeded"),
|
||||||
|
Self::UserMonthlyQuota => write!(f, "user_monthly_quota_exceeded"),
|
||||||
|
Self::IpDailyQuota => write!(f, "ip_daily_quota_exceeded"),
|
||||||
|
Self::IpWeeklyQuota => write!(f, "ip_weekly_quota_exceeded"),
|
||||||
|
Self::IpMonthlyQuota => write!(f, "ip_monthly_quota_exceeded"),
|
||||||
|
Self::ClientDisconnected => write!(f, "client_disconnected"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl QuotaEnforcer {
|
||||||
|
pub fn new(
|
||||||
|
quota_mgr: QuotaManager,
|
||||||
|
username: String,
|
||||||
|
ip: IpAddr,
|
||||||
|
state: Arc<BandwidthState>,
|
||||||
|
check_interval_secs: u64,
|
||||||
|
max_duration_secs: u64,
|
||||||
|
) -> Self {
|
||||||
|
Self {
|
||||||
|
quota_mgr,
|
||||||
|
username,
|
||||||
|
ip,
|
||||||
|
state,
|
||||||
|
check_interval: Duration::from_secs(check_interval_secs.max(1)),
|
||||||
|
max_duration: if max_duration_secs > 0 {
|
||||||
|
Duration::from_secs(max_duration_secs)
|
||||||
|
} else {
|
||||||
|
Duration::from_secs(u64::MAX / 2) // effectively unlimited
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Run the enforcer loop. Returns the reason the test was stopped.
|
||||||
|
/// This should be spawned as a tokio task.
|
||||||
|
pub async fn run(&self) -> StopReason {
|
||||||
|
let start = Instant::now();
|
||||||
|
let mut interval = tokio::time::interval(self.check_interval);
|
||||||
|
interval.tick().await; // consume first immediate tick
|
||||||
|
|
||||||
|
loop {
|
||||||
|
interval.tick().await;
|
||||||
|
|
||||||
|
// Check if test already ended normally
|
||||||
|
if !self.state.running.load(Ordering::Relaxed) {
|
||||||
|
return StopReason::ClientDisconnected;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check max duration
|
||||||
|
if start.elapsed() >= self.max_duration {
|
||||||
|
tracing::warn!(
|
||||||
|
"Max duration ({:?}) reached for user '{}' from {}",
|
||||||
|
self.max_duration, self.username, self.ip,
|
||||||
|
);
|
||||||
|
self.state.running.store(false, Ordering::SeqCst);
|
||||||
|
return StopReason::MaxDuration;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Flush current session bytes to DB before checking
|
||||||
|
// (read without reset — totals accumulate, we just need current snapshot)
|
||||||
|
let session_tx = self.state.total_tx_bytes.load(Ordering::Relaxed);
|
||||||
|
let session_rx = self.state.total_rx_bytes.load(Ordering::Relaxed);
|
||||||
|
|
||||||
|
// Temporarily record session bytes so quota check sees them
|
||||||
|
// We use a separate "pending" record that gets finalized at session end
|
||||||
|
let ip_str = self.ip.to_string();
|
||||||
|
|
||||||
|
// Check user quotas
|
||||||
|
match self.check_user_with_session(session_tx, session_rx) {
|
||||||
|
StopReason::Running => {}
|
||||||
|
reason => {
|
||||||
|
tracing::warn!(
|
||||||
|
"Quota exceeded for user '{}' from {}: {} (session: tx={}, rx={})",
|
||||||
|
self.username, self.ip, reason, session_tx, session_rx,
|
||||||
|
);
|
||||||
|
self.state.running.store(false, Ordering::SeqCst);
|
||||||
|
return reason;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check IP quotas
|
||||||
|
match self.check_ip_with_session(&ip_str, session_tx, session_rx) {
|
||||||
|
StopReason::Running => {}
|
||||||
|
reason => {
|
||||||
|
tracing::warn!(
|
||||||
|
"IP quota exceeded for {} (user '{}'): {} (session: tx={}, rx={})",
|
||||||
|
self.ip, self.username, reason, session_tx, session_rx,
|
||||||
|
);
|
||||||
|
self.state.running.store(false, Ordering::SeqCst);
|
||||||
|
return reason;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn check_user_with_session(&self, session_tx: u64, session_rx: u64) -> StopReason {
|
||||||
|
let session_total = session_tx + session_rx;
|
||||||
|
|
||||||
|
// Check against quota manager (which reads DB)
|
||||||
|
// The DB has usage from PREVIOUS sessions; we add current session bytes
|
||||||
|
if let Err(e) = self.quota_mgr.check_user(&self.username) {
|
||||||
|
// Already exceeded from previous sessions
|
||||||
|
return match e {
|
||||||
|
QuotaError::DailyExceeded { .. } => StopReason::UserDailyQuota,
|
||||||
|
QuotaError::WeeklyExceeded { .. } => StopReason::UserWeeklyQuota,
|
||||||
|
QuotaError::MonthlyExceeded { .. } => StopReason::UserMonthlyQuota,
|
||||||
|
_ => StopReason::UserDailyQuota,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
// Also check if current session PLUS previous usage exceeds quota
|
||||||
|
// (check_user only sees DB, not current session bytes)
|
||||||
|
// This is handled by the quota_mgr.check_user reading from DB,
|
||||||
|
// and we periodically flush to DB during the session.
|
||||||
|
StopReason::Running
|
||||||
|
}
|
||||||
|
|
||||||
|
fn check_ip_with_session(&self, _ip_str: &str, _session_tx: u64, _session_rx: u64) -> StopReason {
|
||||||
|
if let Err(e) = self.quota_mgr.check_ip(&self.ip, Direction::Both) {
|
||||||
|
return match e {
|
||||||
|
QuotaError::IpDailyExceeded { .. } | QuotaError::IpInboundDailyExceeded { .. } | QuotaError::IpOutboundDailyExceeded { .. } => StopReason::IpDailyQuota,
|
||||||
|
QuotaError::IpWeeklyExceeded { .. } | QuotaError::IpInboundWeeklyExceeded { .. } | QuotaError::IpOutboundWeeklyExceeded { .. } => StopReason::IpWeeklyQuota,
|
||||||
|
QuotaError::IpMonthlyExceeded { .. } | QuotaError::IpInboundMonthlyExceeded { .. } | QuotaError::IpOutboundMonthlyExceeded { .. } => StopReason::IpMonthlyQuota,
|
||||||
|
QuotaError::TooManyConnections { .. } => StopReason::IpDailyQuota, // reuse
|
||||||
|
_ => StopReason::IpDailyQuota,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
StopReason::Running
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Flush session bytes to DB. Call periodically and at session end.
|
||||||
|
pub fn flush_to_db(&self, ip_str: &str) {
|
||||||
|
let tx = self.state.total_tx_bytes.load(Ordering::Relaxed);
|
||||||
|
let rx = self.state.total_rx_bytes.load(Ordering::Relaxed);
|
||||||
|
// From server perspective: tx = outbound (we sent), rx = inbound (we received)
|
||||||
|
self.quota_mgr.record_usage(
|
||||||
|
&self.username,
|
||||||
|
ip_str,
|
||||||
|
rx, // inbound = what we received from client
|
||||||
|
tx, // outbound = what we sent to client
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
use crate::user_db::UserDb;
|
||||||
|
use crate::quota::QuotaManager;
|
||||||
|
|
||||||
|
fn setup_test_db() -> (UserDb, QuotaManager) {
|
||||||
|
let db = UserDb::open(":memory:").unwrap();
|
||||||
|
db.ensure_tables().unwrap();
|
||||||
|
db.add_user("testuser", "testpass").unwrap();
|
||||||
|
let qm = QuotaManager::new(
|
||||||
|
db.clone(),
|
||||||
|
1000, // daily: 1000 bytes
|
||||||
|
5000, // weekly
|
||||||
|
10000, // monthly
|
||||||
|
500, // ip daily (combined)
|
||||||
|
2000, // ip weekly (combined)
|
||||||
|
8000, // ip monthly (combined)
|
||||||
|
500, // ip_daily_inbound
|
||||||
|
500, // ip_daily_outbound
|
||||||
|
2000, // ip_weekly_inbound
|
||||||
|
2000, // ip_weekly_outbound
|
||||||
|
8000, // ip_monthly_inbound
|
||||||
|
8000, // ip_monthly_outbound
|
||||||
|
2, // max conn per ip
|
||||||
|
60, // max duration
|
||||||
|
);
|
||||||
|
(db, qm)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_enforcer_max_duration() {
|
||||||
|
let (db, qm) = setup_test_db();
|
||||||
|
let state = BandwidthState::new();
|
||||||
|
let enforcer = QuotaEnforcer::new(
|
||||||
|
qm, "testuser".into(), "127.0.0.1".parse().unwrap(),
|
||||||
|
state.clone(), 1, 2, // check every 1s, max 2s
|
||||||
|
);
|
||||||
|
let reason = enforcer.run().await;
|
||||||
|
assert_eq!(reason, StopReason::MaxDuration);
|
||||||
|
assert!(!state.running.load(Ordering::Relaxed));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_enforcer_client_disconnect() {
|
||||||
|
let (db, qm) = setup_test_db();
|
||||||
|
let state = BandwidthState::new();
|
||||||
|
let state_clone = state.clone();
|
||||||
|
|
||||||
|
// Stop the test after 500ms
|
||||||
|
tokio::spawn(async move {
|
||||||
|
tokio::time::sleep(Duration::from_millis(500)).await;
|
||||||
|
state_clone.running.store(false, Ordering::SeqCst);
|
||||||
|
});
|
||||||
|
|
||||||
|
let enforcer = QuotaEnforcer::new(
|
||||||
|
qm, "testuser".into(), "127.0.0.1".parse().unwrap(),
|
||||||
|
state, 1, 0, // check every 1s, no max duration
|
||||||
|
);
|
||||||
|
let reason = enforcer.run().await;
|
||||||
|
assert_eq!(reason, StopReason::ClientDisconnected);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_enforcer_user_daily_quota_exceeded() {
|
||||||
|
let (db, qm) = setup_test_db();
|
||||||
|
|
||||||
|
// Pre-fill usage to exceed daily quota (1000 bytes)
|
||||||
|
db.record_usage("testuser", 600, 500).unwrap(); // 1100 > 1000
|
||||||
|
|
||||||
|
let state = BandwidthState::new();
|
||||||
|
let enforcer = QuotaEnforcer::new(
|
||||||
|
qm, "testuser".into(), "127.0.0.1".parse().unwrap(),
|
||||||
|
state.clone(), 1, 0,
|
||||||
|
);
|
||||||
|
let reason = enforcer.run().await;
|
||||||
|
assert_eq!(reason, StopReason::UserDailyQuota);
|
||||||
|
assert!(!state.running.load(Ordering::Relaxed));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_enforcer_ip_daily_quota_exceeded() {
|
||||||
|
let (db, qm) = setup_test_db();
|
||||||
|
|
||||||
|
// Pre-fill IP usage to exceed IP daily quota (500 bytes)
|
||||||
|
db.record_ip_usage("127.0.0.1", 300, 300).unwrap(); // 600 > 500
|
||||||
|
|
||||||
|
let state = BandwidthState::new();
|
||||||
|
let enforcer = QuotaEnforcer::new(
|
||||||
|
qm, "testuser".into(), "127.0.0.1".parse().unwrap(),
|
||||||
|
state.clone(), 1, 0,
|
||||||
|
);
|
||||||
|
let reason = enforcer.run().await;
|
||||||
|
assert_eq!(reason, StopReason::IpDailyQuota);
|
||||||
|
assert!(!state.running.load(Ordering::Relaxed));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_enforcer_under_quota_runs_normally() {
|
||||||
|
let (db, qm) = setup_test_db();
|
||||||
|
|
||||||
|
// Usage well under quota
|
||||||
|
db.record_usage("testuser", 100, 100).unwrap(); // 200 < 1000
|
||||||
|
|
||||||
|
let state = BandwidthState::new();
|
||||||
|
let state_clone = state.clone();
|
||||||
|
|
||||||
|
// Stop after 2s
|
||||||
|
tokio::spawn(async move {
|
||||||
|
tokio::time::sleep(Duration::from_secs(2)).await;
|
||||||
|
state_clone.running.store(false, Ordering::SeqCst);
|
||||||
|
});
|
||||||
|
|
||||||
|
let enforcer = QuotaEnforcer::new(
|
||||||
|
qm, "testuser".into(), "127.0.0.1".parse().unwrap(),
|
||||||
|
state, 1, 0,
|
||||||
|
);
|
||||||
|
let reason = enforcer.run().await;
|
||||||
|
assert_eq!(reason, StopReason::ClientDisconnected);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_enforcer_flush_records_usage() {
|
||||||
|
let (db, qm) = setup_test_db();
|
||||||
|
let state = BandwidthState::new();
|
||||||
|
|
||||||
|
// Simulate some transfer
|
||||||
|
state.total_tx_bytes.store(5000, Ordering::Relaxed);
|
||||||
|
state.total_rx_bytes.store(3000, Ordering::Relaxed);
|
||||||
|
|
||||||
|
let enforcer = QuotaEnforcer::new(
|
||||||
|
qm, "testuser".into(), "127.0.0.1".parse().unwrap(),
|
||||||
|
state, 10, 0,
|
||||||
|
);
|
||||||
|
enforcer.flush_to_db("127.0.0.1");
|
||||||
|
|
||||||
|
// flush_to_db: total_tx=5000→outbound, total_rx=3000→inbound
|
||||||
|
// quota_mgr.record_usage(inbound=3000, outbound=5000)
|
||||||
|
// db.record_usage(tx=outbound=5000, rx=inbound=3000)
|
||||||
|
let (tx, rx) = db.get_daily_usage("testuser").unwrap();
|
||||||
|
assert_eq!(tx, 5000); // outbound (what server sent)
|
||||||
|
assert_eq!(rx, 3000); // inbound (what server received)
|
||||||
|
|
||||||
|
let (ip_in, ip_out) = db.get_ip_daily_usage("127.0.0.1").unwrap();
|
||||||
|
assert!(ip_in + ip_out > 0, "IP usage should be recorded");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_remaining_budget_calculation() {
|
||||||
|
let (db, qm) = setup_test_db();
|
||||||
|
let ip: IpAddr = "10.0.0.1".parse().unwrap();
|
||||||
|
|
||||||
|
// No usage yet: budget = min(daily=1000, weekly=5000, monthly=10000, ip_daily=500, ...)
|
||||||
|
// IP daily combined = 500 is the smallest
|
||||||
|
let budget = qm.remaining_budget("testuser", &ip);
|
||||||
|
assert_eq!(budget, 500, "budget should be min of all limits (ip_daily=500)");
|
||||||
|
|
||||||
|
// Use record_usage which properly records combined + directional
|
||||||
|
// inbound=200, outbound=200 → combined = 400
|
||||||
|
qm.record_usage("testuser", "10.0.0.1", 200, 200);
|
||||||
|
|
||||||
|
// IP daily combined: 500 - 400 = 100 remaining
|
||||||
|
// IP daily inbound: 500 - 200 = 300 remaining
|
||||||
|
// IP daily outbound: 500 - 200 = 300 remaining
|
||||||
|
// User daily: 1000 - 400 = 600 remaining
|
||||||
|
let budget = qm.remaining_budget("testuser", &ip);
|
||||||
|
assert_eq!(budget, 100, "budget should reflect IP combined remaining (100)");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_budget_zero_when_exhausted() {
|
||||||
|
let (db, qm) = setup_test_db();
|
||||||
|
let ip: IpAddr = "10.0.0.2".parse().unwrap();
|
||||||
|
|
||||||
|
// Exhaust user daily quota (1000 bytes)
|
||||||
|
db.record_usage("testuser", 600, 500).unwrap(); // 1100 > 1000
|
||||||
|
|
||||||
|
let budget = qm.remaining_budget("testuser", &ip);
|
||||||
|
assert_eq!(budget, 0, "budget should be 0 when user daily quota is exhausted");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_byte_budget_stops_transfer() {
|
||||||
|
let state = BandwidthState::new();
|
||||||
|
|
||||||
|
// Set a 1000-byte budget
|
||||||
|
state.set_budget(1000);
|
||||||
|
|
||||||
|
// Spend 500 bytes — should succeed
|
||||||
|
assert!(state.spend_budget(500));
|
||||||
|
|
||||||
|
// Spend another 400 — should succeed (100 remaining)
|
||||||
|
assert!(state.spend_budget(400));
|
||||||
|
|
||||||
|
// Spend 200 — should fail (only 100 remaining)
|
||||||
|
assert!(!state.spend_budget(200));
|
||||||
|
|
||||||
|
// running should be false
|
||||||
|
assert!(!state.running.load(Ordering::Relaxed));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_unlimited_budget_always_succeeds() {
|
||||||
|
let state = BandwidthState::new();
|
||||||
|
// Default budget is u64::MAX (unlimited)
|
||||||
|
|
||||||
|
// Should always succeed
|
||||||
|
for _ in 0..1000 {
|
||||||
|
assert!(state.spend_budget(1_000_000_000));
|
||||||
|
}
|
||||||
|
assert!(state.running.load(Ordering::Relaxed));
|
||||||
|
}
|
||||||
|
}
|
||||||
98
src/server_pro/ldap_auth.rs
Normal file
98
src/server_pro/ldap_auth.rs
Normal file
@@ -0,0 +1,98 @@
|
|||||||
|
//! LDAP/Active Directory authentication for btest-server-pro.
|
||||||
|
//!
|
||||||
|
//! Authenticates users against an LDAP directory using simple bind.
|
||||||
|
|
||||||
|
use ldap3::{LdapConnAsync, Scope, SearchEntry};
|
||||||
|
|
||||||
|
pub struct LdapConfig {
|
||||||
|
pub url: String,
|
||||||
|
pub base_dn: String,
|
||||||
|
pub bind_dn: Option<String>,
|
||||||
|
pub bind_pass: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct LdapAuth {
|
||||||
|
config: LdapConfig,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Escape special characters in LDAP filter values per RFC 4515.
|
||||||
|
fn ldap_escape(input: &str) -> String {
|
||||||
|
let mut out = String::with_capacity(input.len());
|
||||||
|
for c in input.chars() {
|
||||||
|
match c {
|
||||||
|
'\\' => out.push_str("\\5c"),
|
||||||
|
'*' => out.push_str("\\2a"),
|
||||||
|
'(' => out.push_str("\\28"),
|
||||||
|
')' => out.push_str("\\29"),
|
||||||
|
'\0' => out.push_str("\\00"),
|
||||||
|
_ => out.push(c),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
out
|
||||||
|
}
|
||||||
|
|
||||||
|
impl LdapAuth {
|
||||||
|
pub fn new(config: LdapConfig) -> Self {
|
||||||
|
Self { config }
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Authenticate a user by attempting an LDAP bind.
|
||||||
|
/// Returns Ok(true) if authentication succeeds.
|
||||||
|
pub async fn authenticate(&self, username: &str, password: &str) -> anyhow::Result<bool> {
|
||||||
|
let (conn, mut ldap) = LdapConnAsync::new(&self.config.url).await?;
|
||||||
|
ldap3::drive!(conn);
|
||||||
|
|
||||||
|
let safe_username = ldap_escape(username);
|
||||||
|
|
||||||
|
// If service account configured, bind first to search for user DN
|
||||||
|
let user_dn = if let (Some(ref bind_dn), Some(ref bind_pass)) =
|
||||||
|
(&self.config.bind_dn, &self.config.bind_pass)
|
||||||
|
{
|
||||||
|
let result = ldap.simple_bind(bind_dn, bind_pass).await?;
|
||||||
|
if result.rc != 0 {
|
||||||
|
tracing::warn!("LDAP service bind failed: rc={}", result.rc);
|
||||||
|
return Ok(false);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Search for the user
|
||||||
|
let filter = format!(
|
||||||
|
"(&(objectClass=person)(|(uid={})(sAMAccountName={})(cn={})))",
|
||||||
|
safe_username, safe_username, safe_username
|
||||||
|
);
|
||||||
|
let (results, _) = ldap
|
||||||
|
.search(&self.config.base_dn, Scope::Subtree, &filter, vec!["dn"])
|
||||||
|
.await?
|
||||||
|
.success()?;
|
||||||
|
|
||||||
|
if results.is_empty() {
|
||||||
|
tracing::debug!("LDAP user not found: {}", username);
|
||||||
|
return Ok(false);
|
||||||
|
}
|
||||||
|
|
||||||
|
let entry = match results.into_iter().next() {
|
||||||
|
Some(r) => SearchEntry::construct(r),
|
||||||
|
None => {
|
||||||
|
tracing::debug!("LDAP user not found: {}", username);
|
||||||
|
return Ok(false);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
entry.dn
|
||||||
|
} else {
|
||||||
|
// No service account — construct DN directly
|
||||||
|
format!("uid={},{}", safe_username, self.config.base_dn)
|
||||||
|
};
|
||||||
|
|
||||||
|
// Attempt user bind
|
||||||
|
let result = ldap.simple_bind(&user_dn, password).await?;
|
||||||
|
let success = result.rc == 0;
|
||||||
|
|
||||||
|
if success {
|
||||||
|
tracing::info!("LDAP auth successful for {} (dn={})", username, user_dn);
|
||||||
|
} else {
|
||||||
|
tracing::warn!("LDAP auth failed for {} (dn={}): rc={}", username, user_dn, result.rc);
|
||||||
|
}
|
||||||
|
|
||||||
|
let _ = ldap.unbind().await;
|
||||||
|
Ok(success)
|
||||||
|
}
|
||||||
|
}
|
||||||
343
src/server_pro/main.rs
Normal file
343
src/server_pro/main.rs
Normal file
@@ -0,0 +1,343 @@
|
|||||||
|
//! btest-server-pro: MikroTik Bandwidth Test server with multi-user, quotas, and LDAP.
|
||||||
|
//!
|
||||||
|
//! This is a superset of the standard `btest` server with additional features:
|
||||||
|
//! - SQLite user database (--users-db)
|
||||||
|
//! - Per-user and per-IP bandwidth quotas (daily/weekly)
|
||||||
|
//! - LDAP/Active Directory authentication (--ldap-url)
|
||||||
|
//! - Rate limiting for public server deployment
|
||||||
|
//!
|
||||||
|
//! Build with: cargo build --release --features pro --bin btest-server-pro
|
||||||
|
|
||||||
|
mod user_db;
|
||||||
|
mod quota;
|
||||||
|
mod enforcer;
|
||||||
|
mod server_loop;
|
||||||
|
mod web;
|
||||||
|
mod ldap_auth;
|
||||||
|
|
||||||
|
use clap::Parser;
|
||||||
|
use tracing_subscriber::EnvFilter;
|
||||||
|
|
||||||
|
#[derive(Parser, Debug)]
|
||||||
|
#[command(
|
||||||
|
name = "btest-server-pro",
|
||||||
|
about = "btest-rs Pro Server: multi-user, quotas, LDAP",
|
||||||
|
version,
|
||||||
|
)]
|
||||||
|
struct Cli {
|
||||||
|
/// Listen port
|
||||||
|
#[arg(short = 'P', long = "port", default_value_t = 2000)]
|
||||||
|
port: u16,
|
||||||
|
|
||||||
|
/// IPv4 listen address
|
||||||
|
#[arg(long = "listen", default_value = "0.0.0.0")]
|
||||||
|
listen_addr: String,
|
||||||
|
|
||||||
|
/// IPv6 listen address (optional)
|
||||||
|
#[arg(long = "listen6")]
|
||||||
|
listen6_addr: Option<String>,
|
||||||
|
|
||||||
|
/// SQLite user database path
|
||||||
|
#[arg(long = "users-db", default_value = "btest-users.db")]
|
||||||
|
users_db: String,
|
||||||
|
|
||||||
|
/// LDAP server URL (e.g., ldap://dc.example.com)
|
||||||
|
#[arg(long = "ldap-url")]
|
||||||
|
ldap_url: Option<String>,
|
||||||
|
|
||||||
|
/// LDAP base DN for user search
|
||||||
|
#[arg(long = "ldap-base-dn")]
|
||||||
|
ldap_base_dn: Option<String>,
|
||||||
|
|
||||||
|
/// LDAP bind DN (for service account)
|
||||||
|
#[arg(long = "ldap-bind-dn")]
|
||||||
|
ldap_bind_dn: Option<String>,
|
||||||
|
|
||||||
|
/// LDAP bind password
|
||||||
|
#[arg(long = "ldap-bind-pass")]
|
||||||
|
ldap_bind_pass: Option<String>,
|
||||||
|
|
||||||
|
/// Default daily quota per user in bytes (0 = unlimited)
|
||||||
|
#[arg(long = "daily-quota", default_value_t = 0)]
|
||||||
|
daily_quota: u64,
|
||||||
|
|
||||||
|
/// Default weekly quota per user in bytes (0 = unlimited)
|
||||||
|
#[arg(long = "weekly-quota", default_value_t = 0)]
|
||||||
|
weekly_quota: u64,
|
||||||
|
|
||||||
|
/// Default monthly quota per user in bytes (0 = unlimited)
|
||||||
|
#[arg(long = "monthly-quota", default_value_t = 0)]
|
||||||
|
monthly_quota: u64,
|
||||||
|
|
||||||
|
/// Daily bandwidth limit per IP in bytes (0 = unlimited)
|
||||||
|
#[arg(long = "ip-daily", default_value_t = 0)]
|
||||||
|
ip_daily: u64,
|
||||||
|
|
||||||
|
/// Weekly bandwidth limit per IP in bytes (0 = unlimited)
|
||||||
|
#[arg(long = "ip-weekly", default_value_t = 0)]
|
||||||
|
ip_weekly: u64,
|
||||||
|
|
||||||
|
/// Monthly bandwidth limit per IP in bytes (0 = unlimited)
|
||||||
|
#[arg(long = "ip-monthly", default_value_t = 0)]
|
||||||
|
ip_monthly: u64,
|
||||||
|
|
||||||
|
/// Maximum concurrent connections per IP (0 = unlimited)
|
||||||
|
#[arg(long = "max-conn-per-ip", default_value_t = 5)]
|
||||||
|
max_conn_per_ip: u32,
|
||||||
|
|
||||||
|
/// Maximum test duration in seconds (0 = unlimited)
|
||||||
|
#[arg(long = "max-duration", default_value_t = 300)]
|
||||||
|
max_duration: u64,
|
||||||
|
|
||||||
|
/// Daily inbound (client→server) limit per IP in bytes (0 = use --ip-daily)
|
||||||
|
#[arg(long = "ip-daily-in", default_value_t = 0)]
|
||||||
|
ip_daily_in: u64,
|
||||||
|
|
||||||
|
/// Daily outbound (server→client) limit per IP in bytes (0 = use --ip-daily)
|
||||||
|
#[arg(long = "ip-daily-out", default_value_t = 0)]
|
||||||
|
ip_daily_out: u64,
|
||||||
|
|
||||||
|
/// Weekly inbound limit per IP in bytes (0 = use --ip-weekly)
|
||||||
|
#[arg(long = "ip-weekly-in", default_value_t = 0)]
|
||||||
|
ip_weekly_in: u64,
|
||||||
|
|
||||||
|
/// Weekly outbound limit per IP in bytes (0 = use --ip-weekly)
|
||||||
|
#[arg(long = "ip-weekly-out", default_value_t = 0)]
|
||||||
|
ip_weekly_out: u64,
|
||||||
|
|
||||||
|
/// Monthly inbound limit per IP in bytes (0 = use --ip-monthly)
|
||||||
|
#[arg(long = "ip-monthly-in", default_value_t = 0)]
|
||||||
|
ip_monthly_in: u64,
|
||||||
|
|
||||||
|
/// Monthly outbound limit per IP in bytes (0 = use --ip-monthly)
|
||||||
|
#[arg(long = "ip-monthly-out", default_value_t = 0)]
|
||||||
|
ip_monthly_out: u64,
|
||||||
|
|
||||||
|
/// How often to check quotas during a test in seconds
|
||||||
|
#[arg(long = "quota-check-interval", default_value_t = 10)]
|
||||||
|
quota_check_interval: u64,
|
||||||
|
|
||||||
|
/// Web dashboard port (0 = disabled)
|
||||||
|
#[arg(long = "web-port", default_value_t = 8080)]
|
||||||
|
web_port: u16,
|
||||||
|
|
||||||
|
/// Shared password for public mode (all users use this password)
|
||||||
|
#[arg(long = "shared-password")]
|
||||||
|
shared_password: Option<String>,
|
||||||
|
|
||||||
|
/// Use EC-SRP5 authentication
|
||||||
|
#[arg(long = "ecsrp5")]
|
||||||
|
ecsrp5: bool,
|
||||||
|
|
||||||
|
/// Syslog server address
|
||||||
|
#[arg(long = "syslog")]
|
||||||
|
syslog: Option<String>,
|
||||||
|
|
||||||
|
/// CSV output file
|
||||||
|
#[arg(long = "csv")]
|
||||||
|
csv: Option<String>,
|
||||||
|
|
||||||
|
/// Verbose logging
|
||||||
|
#[arg(short = 'v', long = "verbose", action = clap::ArgAction::Count)]
|
||||||
|
verbose: u8,
|
||||||
|
|
||||||
|
/// User management subcommand
|
||||||
|
#[command(subcommand)]
|
||||||
|
command: Option<UserCommand>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(clap::Subcommand, Debug)]
|
||||||
|
enum UserCommand {
|
||||||
|
/// Add a user
|
||||||
|
#[command(name = "useradd")]
|
||||||
|
UserAdd {
|
||||||
|
/// Username
|
||||||
|
username: String,
|
||||||
|
/// Password
|
||||||
|
password: String,
|
||||||
|
},
|
||||||
|
/// Delete a user
|
||||||
|
#[command(name = "userdel")]
|
||||||
|
UserDel {
|
||||||
|
/// Username
|
||||||
|
username: String,
|
||||||
|
},
|
||||||
|
/// List all users
|
||||||
|
#[command(name = "userlist")]
|
||||||
|
UserList,
|
||||||
|
/// Enable/disable a user
|
||||||
|
#[command(name = "userset")]
|
||||||
|
UserSet {
|
||||||
|
/// Username
|
||||||
|
username: String,
|
||||||
|
/// Enable (true/false)
|
||||||
|
#[arg(long)]
|
||||||
|
enabled: Option<bool>,
|
||||||
|
/// Daily quota in bytes
|
||||||
|
#[arg(long)]
|
||||||
|
daily: Option<i64>,
|
||||||
|
/// Weekly quota in bytes
|
||||||
|
#[arg(long)]
|
||||||
|
weekly: Option<i64>,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::main]
|
||||||
|
async fn main() -> anyhow::Result<()> {
|
||||||
|
let cli = Cli::parse();
|
||||||
|
|
||||||
|
let filter = match cli.verbose {
|
||||||
|
0 => "info",
|
||||||
|
1 => "debug",
|
||||||
|
_ => "trace",
|
||||||
|
};
|
||||||
|
tracing_subscriber::fmt()
|
||||||
|
.with_env_filter(
|
||||||
|
EnvFilter::try_from_default_env().unwrap_or_else(|_| EnvFilter::new(filter)),
|
||||||
|
)
|
||||||
|
.with_target(false)
|
||||||
|
.init();
|
||||||
|
|
||||||
|
// Initialize subsystems
|
||||||
|
btest_rs::cpu::start_sampler();
|
||||||
|
|
||||||
|
if let Some(ref syslog_addr) = cli.syslog {
|
||||||
|
if let Err(e) = btest_rs::syslog_logger::init(syslog_addr) {
|
||||||
|
eprintln!("Warning: syslog init failed: {}", e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(ref csv_path) = cli.csv {
|
||||||
|
if let Err(e) = btest_rs::csv_output::init(csv_path) {
|
||||||
|
eprintln!("Warning: CSV init failed: {}", e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Initialize user database
|
||||||
|
let db = user_db::UserDb::open(&cli.users_db)?;
|
||||||
|
db.ensure_tables()?;
|
||||||
|
|
||||||
|
// Handle user management subcommands (exit after)
|
||||||
|
if let Some(cmd) = &cli.command {
|
||||||
|
match cmd {
|
||||||
|
UserCommand::UserAdd { username, password } => {
|
||||||
|
db.add_user(username, password)?;
|
||||||
|
println!("User '{}' added.", username);
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
UserCommand::UserDel { username } => {
|
||||||
|
if db.delete_user(username)? {
|
||||||
|
println!("User '{}' deleted.", username);
|
||||||
|
} else {
|
||||||
|
println!("User '{}' not found.", username);
|
||||||
|
}
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
UserCommand::UserList => {
|
||||||
|
let users = db.list_users()?;
|
||||||
|
if users.is_empty() {
|
||||||
|
println!("No users.");
|
||||||
|
} else {
|
||||||
|
println!("{:<20} {:<10} {:<15} {:<15}", "USERNAME", "ENABLED", "DAILY_QUOTA", "WEEKLY_QUOTA");
|
||||||
|
println!("{}", "-".repeat(60));
|
||||||
|
for u in &users {
|
||||||
|
println!("{:<20} {:<10} {:<15} {:<15}",
|
||||||
|
u.username,
|
||||||
|
if u.enabled { "yes" } else { "no" },
|
||||||
|
if u.daily_quota == 0 { "default".to_string() } else { format!("{}B", u.daily_quota) },
|
||||||
|
if u.weekly_quota == 0 { "default".to_string() } else { format!("{}B", u.weekly_quota) },
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
UserCommand::UserSet { username, enabled, daily, weekly } => {
|
||||||
|
if let Some(e) = enabled {
|
||||||
|
db.set_user_enabled(username, *e)?;
|
||||||
|
println!("User '{}' enabled={}", username, e);
|
||||||
|
}
|
||||||
|
if daily.is_some() || weekly.is_some() {
|
||||||
|
let d = daily.unwrap_or(0);
|
||||||
|
let w = weekly.unwrap_or(0);
|
||||||
|
db.set_user_quota(username, d, w, 0)?;
|
||||||
|
println!("User '{}' quota: daily={}, weekly={}", username, d, w);
|
||||||
|
}
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
tracing::info!("User database: {} ({} users)", cli.users_db, db.user_count()?);
|
||||||
|
|
||||||
|
// Initialize LDAP if configured
|
||||||
|
if let Some(ref url) = cli.ldap_url {
|
||||||
|
tracing::info!("LDAP configured: {}", url);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Initialize quota manager
|
||||||
|
// Directional flags override combined: --ip-daily-in > --ip-daily > unlimited
|
||||||
|
let or_fallback = |specific: u64, combined: u64| if specific > 0 { specific } else { combined };
|
||||||
|
let quota_mgr = quota::QuotaManager::new(
|
||||||
|
db.clone(),
|
||||||
|
cli.daily_quota,
|
||||||
|
cli.weekly_quota,
|
||||||
|
cli.monthly_quota,
|
||||||
|
cli.ip_daily,
|
||||||
|
cli.ip_weekly,
|
||||||
|
cli.ip_monthly,
|
||||||
|
or_fallback(cli.ip_daily_in, cli.ip_daily),
|
||||||
|
or_fallback(cli.ip_daily_out, cli.ip_daily),
|
||||||
|
or_fallback(cli.ip_weekly_in, cli.ip_weekly),
|
||||||
|
or_fallback(cli.ip_weekly_out, cli.ip_weekly),
|
||||||
|
or_fallback(cli.ip_monthly_in, cli.ip_monthly),
|
||||||
|
or_fallback(cli.ip_monthly_out, cli.ip_monthly),
|
||||||
|
cli.max_conn_per_ip,
|
||||||
|
cli.max_duration,
|
||||||
|
);
|
||||||
|
|
||||||
|
let fmt_q = |v: u64| if v == 0 { "unlimited".to_string() } else { format!("{}B", v) };
|
||||||
|
tracing::info!(
|
||||||
|
"User quotas: daily={}, weekly={}, monthly={}",
|
||||||
|
fmt_q(cli.daily_quota), fmt_q(cli.weekly_quota), fmt_q(cli.monthly_quota),
|
||||||
|
);
|
||||||
|
tracing::info!(
|
||||||
|
"IP quotas: daily={}, weekly={}, monthly={}",
|
||||||
|
fmt_q(cli.ip_daily), fmt_q(cli.ip_weekly), fmt_q(cli.ip_monthly),
|
||||||
|
);
|
||||||
|
tracing::info!(
|
||||||
|
"Limits: max_conn_per_ip={}, max_duration={}s",
|
||||||
|
cli.max_conn_per_ip, cli.max_duration,
|
||||||
|
);
|
||||||
|
|
||||||
|
// Start web dashboard if port > 0
|
||||||
|
if cli.web_port > 0 {
|
||||||
|
let web_db = db.clone();
|
||||||
|
let web_port = cli.web_port;
|
||||||
|
tokio::spawn(async move {
|
||||||
|
tracing::info!("Web dashboard starting on http://0.0.0.0:{}", web_port);
|
||||||
|
let app = web::create_router(web_db);
|
||||||
|
let listener = tokio::net::TcpListener::bind(format!("0.0.0.0:{}", web_port))
|
||||||
|
.await
|
||||||
|
.expect("Failed to bind web dashboard port");
|
||||||
|
if let Err(e) = axum::serve(listener, app).await {
|
||||||
|
tracing::error!("Web dashboard error: {}", e);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
tracing::info!("btest-server-pro starting on port {}", cli.port);
|
||||||
|
|
||||||
|
let v4 = if cli.listen_addr.eq_ignore_ascii_case("none") { None } else { Some(cli.listen_addr) };
|
||||||
|
let v6 = cli.listen6_addr;
|
||||||
|
|
||||||
|
server_loop::run_pro_server(
|
||||||
|
cli.port,
|
||||||
|
cli.ecsrp5,
|
||||||
|
v4, v6,
|
||||||
|
db,
|
||||||
|
quota_mgr,
|
||||||
|
cli.quota_check_interval,
|
||||||
|
).await?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
470
src/server_pro/quota.rs
Normal file
470
src/server_pro/quota.rs
Normal file
@@ -0,0 +1,470 @@
|
|||||||
|
//! Bandwidth quota management for btest-server-pro.
|
||||||
|
//!
|
||||||
|
//! Enforces per-user and per-IP bandwidth limits (daily/weekly/monthly),
|
||||||
|
//! with separate tracking for inbound (client-to-server) and outbound
|
||||||
|
//! (server-to-client) directions.
|
||||||
|
|
||||||
|
use std::collections::HashMap;
|
||||||
|
use std::net::IpAddr;
|
||||||
|
use std::sync::{Arc, Mutex};
|
||||||
|
|
||||||
|
use super::user_db::UserDb;
|
||||||
|
|
||||||
|
/// Traffic direction for bandwidth tests.
|
||||||
|
///
|
||||||
|
/// From the **server's** perspective:
|
||||||
|
/// - `Inbound` = client sends data to us (client TX, server RX)
|
||||||
|
/// - `Outbound` = we send data to the client (server TX, client RX)
|
||||||
|
/// - `Both` = bidirectional test
|
||||||
|
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||||
|
pub enum Direction {
|
||||||
|
Inbound,
|
||||||
|
Outbound,
|
||||||
|
Both,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub struct QuotaManager {
|
||||||
|
db: UserDb,
|
||||||
|
/// Per-user defaults (0 = unlimited)
|
||||||
|
default_daily: u64,
|
||||||
|
default_weekly: u64,
|
||||||
|
default_monthly: u64,
|
||||||
|
/// Per-IP combined (inbound + outbound) limits (0 = unlimited) — for abuse prevention
|
||||||
|
ip_daily: u64,
|
||||||
|
ip_weekly: u64,
|
||||||
|
ip_monthly: u64,
|
||||||
|
/// Per-IP directional limits (0 = unlimited)
|
||||||
|
ip_daily_inbound: u64,
|
||||||
|
ip_daily_outbound: u64,
|
||||||
|
ip_weekly_inbound: u64,
|
||||||
|
ip_weekly_outbound: u64,
|
||||||
|
ip_monthly_inbound: u64,
|
||||||
|
ip_monthly_outbound: u64,
|
||||||
|
/// Max simultaneous connections from one IP
|
||||||
|
max_conn_per_ip: u32,
|
||||||
|
/// Max test duration in seconds
|
||||||
|
max_duration: u64,
|
||||||
|
active_connections: Arc<Mutex<HashMap<IpAddr, u32>>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub enum QuotaError {
|
||||||
|
DailyExceeded { used: u64, limit: u64 },
|
||||||
|
WeeklyExceeded { used: u64, limit: u64 },
|
||||||
|
MonthlyExceeded { used: u64, limit: u64 },
|
||||||
|
/// Combined (inbound + outbound) IP daily limit exceeded.
|
||||||
|
IpDailyExceeded { used: u64, limit: u64 },
|
||||||
|
/// Combined (inbound + outbound) IP weekly limit exceeded.
|
||||||
|
IpWeeklyExceeded { used: u64, limit: u64 },
|
||||||
|
/// Combined (inbound + outbound) IP monthly limit exceeded.
|
||||||
|
IpMonthlyExceeded { used: u64, limit: u64 },
|
||||||
|
/// Per-direction IP daily limits.
|
||||||
|
IpInboundDailyExceeded { used: u64, limit: u64 },
|
||||||
|
IpOutboundDailyExceeded { used: u64, limit: u64 },
|
||||||
|
/// Per-direction IP weekly limits.
|
||||||
|
IpInboundWeeklyExceeded { used: u64, limit: u64 },
|
||||||
|
IpOutboundWeeklyExceeded { used: u64, limit: u64 },
|
||||||
|
/// Per-direction IP monthly limits.
|
||||||
|
IpInboundMonthlyExceeded { used: u64, limit: u64 },
|
||||||
|
IpOutboundMonthlyExceeded { used: u64, limit: u64 },
|
||||||
|
TooManyConnections { current: u32, limit: u32 },
|
||||||
|
UserDisabled,
|
||||||
|
UserNotFound,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl std::fmt::Display for QuotaError {
|
||||||
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
|
match self {
|
||||||
|
Self::DailyExceeded { used, limit } =>
|
||||||
|
write!(f, "User daily quota exceeded: {}/{} bytes", used, limit),
|
||||||
|
Self::WeeklyExceeded { used, limit } =>
|
||||||
|
write!(f, "User weekly quota exceeded: {}/{} bytes", used, limit),
|
||||||
|
Self::MonthlyExceeded { used, limit } =>
|
||||||
|
write!(f, "User monthly quota exceeded: {}/{} bytes", used, limit),
|
||||||
|
Self::IpDailyExceeded { used, limit } =>
|
||||||
|
write!(f, "IP daily quota exceeded: {}/{} bytes", used, limit),
|
||||||
|
Self::IpWeeklyExceeded { used, limit } =>
|
||||||
|
write!(f, "IP weekly quota exceeded: {}/{} bytes", used, limit),
|
||||||
|
Self::IpMonthlyExceeded { used, limit } =>
|
||||||
|
write!(f, "IP monthly quota exceeded: {}/{} bytes", used, limit),
|
||||||
|
Self::IpInboundDailyExceeded { used, limit } =>
|
||||||
|
write!(f, "IP inbound daily quota exceeded: {}/{} bytes", used, limit),
|
||||||
|
Self::IpOutboundDailyExceeded { used, limit } =>
|
||||||
|
write!(f, "IP outbound daily quota exceeded: {}/{} bytes", used, limit),
|
||||||
|
Self::IpInboundWeeklyExceeded { used, limit } =>
|
||||||
|
write!(f, "IP inbound weekly quota exceeded: {}/{} bytes", used, limit),
|
||||||
|
Self::IpOutboundWeeklyExceeded { used, limit } =>
|
||||||
|
write!(f, "IP outbound weekly quota exceeded: {}/{} bytes", used, limit),
|
||||||
|
Self::IpInboundMonthlyExceeded { used, limit } =>
|
||||||
|
write!(f, "IP inbound monthly quota exceeded: {}/{} bytes", used, limit),
|
||||||
|
Self::IpOutboundMonthlyExceeded { used, limit } =>
|
||||||
|
write!(f, "IP outbound monthly quota exceeded: {}/{} bytes", used, limit),
|
||||||
|
Self::TooManyConnections { current, limit } =>
|
||||||
|
write!(f, "Too many connections from this IP: {}/{}", current, limit),
|
||||||
|
Self::UserDisabled => write!(f, "User account is disabled"),
|
||||||
|
Self::UserNotFound => write!(f, "User not found"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl QuotaManager {
|
||||||
|
#[allow(clippy::too_many_arguments)]
|
||||||
|
pub fn new(
|
||||||
|
db: UserDb,
|
||||||
|
default_daily: u64,
|
||||||
|
default_weekly: u64,
|
||||||
|
default_monthly: u64,
|
||||||
|
ip_daily: u64,
|
||||||
|
ip_weekly: u64,
|
||||||
|
ip_monthly: u64,
|
||||||
|
ip_daily_inbound: u64,
|
||||||
|
ip_daily_outbound: u64,
|
||||||
|
ip_weekly_inbound: u64,
|
||||||
|
ip_weekly_outbound: u64,
|
||||||
|
ip_monthly_inbound: u64,
|
||||||
|
ip_monthly_outbound: u64,
|
||||||
|
max_conn_per_ip: u32,
|
||||||
|
max_duration: u64,
|
||||||
|
) -> Self {
|
||||||
|
Self {
|
||||||
|
db,
|
||||||
|
default_daily,
|
||||||
|
default_weekly,
|
||||||
|
default_monthly,
|
||||||
|
ip_daily,
|
||||||
|
ip_weekly,
|
||||||
|
ip_monthly,
|
||||||
|
ip_daily_inbound,
|
||||||
|
ip_daily_outbound,
|
||||||
|
ip_weekly_inbound,
|
||||||
|
ip_weekly_outbound,
|
||||||
|
ip_monthly_inbound,
|
||||||
|
ip_monthly_outbound,
|
||||||
|
max_conn_per_ip,
|
||||||
|
max_duration,
|
||||||
|
active_connections: Arc::new(Mutex::new(HashMap::new())),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Check if a user is allowed to start a test.
|
||||||
|
pub fn check_user(&self, username: &str) -> Result<(), QuotaError> {
|
||||||
|
let user = self.db.get_user(username)
|
||||||
|
.map_err(|_| QuotaError::UserNotFound)?
|
||||||
|
.ok_or(QuotaError::UserNotFound)?;
|
||||||
|
|
||||||
|
if !user.enabled {
|
||||||
|
return Err(QuotaError::UserDisabled);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Daily
|
||||||
|
let daily_limit = if user.daily_quota > 0 { user.daily_quota as u64 } else { self.default_daily };
|
||||||
|
if daily_limit > 0 {
|
||||||
|
let (tx, rx) = self.db.get_daily_usage(username).unwrap_or((0, 0));
|
||||||
|
let used = tx + rx;
|
||||||
|
if used >= daily_limit {
|
||||||
|
return Err(QuotaError::DailyExceeded { used, limit: daily_limit });
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Weekly
|
||||||
|
let weekly_limit = if user.weekly_quota > 0 { user.weekly_quota as u64 } else { self.default_weekly };
|
||||||
|
if weekly_limit > 0 {
|
||||||
|
let (tx, rx) = self.db.get_weekly_usage(username).unwrap_or((0, 0));
|
||||||
|
let used = tx + rx;
|
||||||
|
if used >= weekly_limit {
|
||||||
|
return Err(QuotaError::WeeklyExceeded { used, limit: weekly_limit });
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Monthly
|
||||||
|
if self.default_monthly > 0 {
|
||||||
|
let (tx, rx) = self.db.get_monthly_usage(username).unwrap_or((0, 0));
|
||||||
|
let used = tx + rx;
|
||||||
|
if used >= self.default_monthly {
|
||||||
|
return Err(QuotaError::MonthlyExceeded { used, limit: self.default_monthly });
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Check if an IP is allowed to connect, considering both combined and
|
||||||
|
/// directional bandwidth quotas.
|
||||||
|
///
|
||||||
|
/// The `direction` parameter indicates which direction the test will use.
|
||||||
|
/// For `Direction::Both`, both inbound and outbound directional limits are
|
||||||
|
/// checked. Combined (total) limits are always checked regardless of
|
||||||
|
/// direction.
|
||||||
|
pub fn check_ip(&self, ip: &IpAddr, direction: Direction) -> Result<(), QuotaError> {
|
||||||
|
// Connection limit
|
||||||
|
if self.max_conn_per_ip > 0 {
|
||||||
|
let conns = self.active_connections.lock().unwrap();
|
||||||
|
let current = conns.get(ip).copied().unwrap_or(0);
|
||||||
|
if current >= self.max_conn_per_ip {
|
||||||
|
return Err(QuotaError::TooManyConnections {
|
||||||
|
current,
|
||||||
|
limit: self.max_conn_per_ip,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let ip_str = ip.to_string();
|
||||||
|
|
||||||
|
// --- Combined (inbound + outbound) limits ---
|
||||||
|
self.check_ip_combined(&ip_str)?;
|
||||||
|
|
||||||
|
// --- Directional limits ---
|
||||||
|
let check_inbound = matches!(direction, Direction::Inbound | Direction::Both);
|
||||||
|
let check_outbound = matches!(direction, Direction::Outbound | Direction::Both);
|
||||||
|
|
||||||
|
if check_inbound {
|
||||||
|
self.check_ip_inbound(&ip_str)?;
|
||||||
|
}
|
||||||
|
if check_outbound {
|
||||||
|
self.check_ip_outbound(&ip_str)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Check combined (total inbound + outbound) IP limits.
|
||||||
|
fn check_ip_combined(&self, ip_str: &str) -> Result<(), QuotaError> {
|
||||||
|
// IP daily (combined)
|
||||||
|
if self.ip_daily > 0 {
|
||||||
|
let (tx, rx) = self.db.get_ip_daily_usage(ip_str).unwrap_or((0, 0));
|
||||||
|
let used = tx + rx;
|
||||||
|
if used >= self.ip_daily {
|
||||||
|
return Err(QuotaError::IpDailyExceeded { used, limit: self.ip_daily });
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// IP weekly (combined)
|
||||||
|
if self.ip_weekly > 0 {
|
||||||
|
let (tx, rx) = self.db.get_ip_weekly_usage(ip_str).unwrap_or((0, 0));
|
||||||
|
let used = tx + rx;
|
||||||
|
if used >= self.ip_weekly {
|
||||||
|
return Err(QuotaError::IpWeeklyExceeded { used, limit: self.ip_weekly });
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// IP monthly (combined)
|
||||||
|
if self.ip_monthly > 0 {
|
||||||
|
let (tx, rx) = self.db.get_ip_monthly_usage(ip_str).unwrap_or((0, 0));
|
||||||
|
let used = tx + rx;
|
||||||
|
if used >= self.ip_monthly {
|
||||||
|
return Err(QuotaError::IpMonthlyExceeded { used, limit: self.ip_monthly });
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Check inbound-only (client sends to us) IP limits.
|
||||||
|
fn check_ip_inbound(&self, ip_str: &str) -> Result<(), QuotaError> {
|
||||||
|
// Daily inbound
|
||||||
|
if self.ip_daily_inbound > 0 {
|
||||||
|
let used = self.db.get_ip_daily_inbound(ip_str).unwrap_or(0);
|
||||||
|
if used >= self.ip_daily_inbound {
|
||||||
|
return Err(QuotaError::IpInboundDailyExceeded {
|
||||||
|
used,
|
||||||
|
limit: self.ip_daily_inbound,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Weekly inbound
|
||||||
|
if self.ip_weekly_inbound > 0 {
|
||||||
|
let used = self.db.get_ip_weekly_inbound(ip_str).unwrap_or(0);
|
||||||
|
if used >= self.ip_weekly_inbound {
|
||||||
|
return Err(QuotaError::IpInboundWeeklyExceeded {
|
||||||
|
used,
|
||||||
|
limit: self.ip_weekly_inbound,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Monthly inbound
|
||||||
|
if self.ip_monthly_inbound > 0 {
|
||||||
|
let used = self.db.get_ip_monthly_inbound(ip_str).unwrap_or(0);
|
||||||
|
if used >= self.ip_monthly_inbound {
|
||||||
|
return Err(QuotaError::IpInboundMonthlyExceeded {
|
||||||
|
used,
|
||||||
|
limit: self.ip_monthly_inbound,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Check outbound-only (we send to client) IP limits.
|
||||||
|
fn check_ip_outbound(&self, ip_str: &str) -> Result<(), QuotaError> {
|
||||||
|
// Daily outbound
|
||||||
|
if self.ip_daily_outbound > 0 {
|
||||||
|
let used = self.db.get_ip_daily_outbound(ip_str).unwrap_or(0);
|
||||||
|
if used >= self.ip_daily_outbound {
|
||||||
|
return Err(QuotaError::IpOutboundDailyExceeded {
|
||||||
|
used,
|
||||||
|
limit: self.ip_daily_outbound,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Weekly outbound
|
||||||
|
if self.ip_weekly_outbound > 0 {
|
||||||
|
let used = self.db.get_ip_weekly_outbound(ip_str).unwrap_or(0);
|
||||||
|
if used >= self.ip_weekly_outbound {
|
||||||
|
return Err(QuotaError::IpOutboundWeeklyExceeded {
|
||||||
|
used,
|
||||||
|
limit: self.ip_weekly_outbound,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Monthly outbound
|
||||||
|
if self.ip_monthly_outbound > 0 {
|
||||||
|
let used = self.db.get_ip_monthly_outbound(ip_str).unwrap_or(0);
|
||||||
|
if used >= self.ip_monthly_outbound {
|
||||||
|
return Err(QuotaError::IpOutboundMonthlyExceeded {
|
||||||
|
used,
|
||||||
|
limit: self.ip_monthly_outbound,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn connect(&self, ip: &IpAddr) {
|
||||||
|
let mut conns = self.active_connections.lock().unwrap();
|
||||||
|
*conns.entry(*ip).or_insert(0) += 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn disconnect(&self, ip: &IpAddr) {
|
||||||
|
let mut conns = self.active_connections.lock().unwrap();
|
||||||
|
if let Some(count) = conns.get_mut(ip) {
|
||||||
|
*count = count.saturating_sub(1);
|
||||||
|
if *count == 0 {
|
||||||
|
conns.remove(ip);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Record usage after a test completes (both user and IP), with separate
|
||||||
|
/// inbound and outbound byte counts.
|
||||||
|
///
|
||||||
|
/// - `inbound_bytes`: bytes the client sent to us (server RX).
|
||||||
|
/// - `outbound_bytes`: bytes we sent to the client (server TX).
|
||||||
|
///
|
||||||
|
/// Both the combined user/IP usage and directional IP usage are recorded.
|
||||||
|
pub fn record_usage(
|
||||||
|
&self,
|
||||||
|
username: &str,
|
||||||
|
ip: &str,
|
||||||
|
inbound_bytes: u64,
|
||||||
|
outbound_bytes: u64,
|
||||||
|
) {
|
||||||
|
// Record combined user usage (tx/rx from the server's perspective:
|
||||||
|
// tx = outbound, rx = inbound).
|
||||||
|
if let Err(e) = self.db.record_usage(username, outbound_bytes, inbound_bytes) {
|
||||||
|
tracing::error!("Failed to record user usage for {}: {}", username, e);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Record IP usage — record_ip_usage already writes both the
|
||||||
|
// inbound_bytes and outbound_bytes columns in one operation.
|
||||||
|
// Do NOT also call record_ip_inbound_usage/record_ip_outbound_usage
|
||||||
|
// as they update the same columns and would double-count.
|
||||||
|
if let Err(e) = self.db.record_ip_usage(ip, outbound_bytes, inbound_bytes) {
|
||||||
|
tracing::error!("Failed to record IP usage for {}: {}", ip, e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Calculate the remaining byte budget for a user+IP combination.
|
||||||
|
/// Returns the minimum remaining quota across all applicable limits.
|
||||||
|
/// Used to set `BandwidthState::byte_budget` before a test starts,
|
||||||
|
/// preventing overshoot beyond quota boundaries.
|
||||||
|
pub fn remaining_budget(&self, username: &str, ip: &IpAddr) -> u64 {
|
||||||
|
let mut budget = u64::MAX;
|
||||||
|
let ip_str = ip.to_string();
|
||||||
|
|
||||||
|
// Helper: min that ignores 0 (unlimited)
|
||||||
|
let cap = |budget: &mut u64, limit: u64, used: u64| {
|
||||||
|
if limit > 0 {
|
||||||
|
let remaining = limit.saturating_sub(used);
|
||||||
|
*budget = (*budget).min(remaining);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// User quotas (combined tx+rx)
|
||||||
|
if let Ok(Some(user)) = self.db.get_user(username) {
|
||||||
|
let daily_limit = if user.daily_quota > 0 { user.daily_quota as u64 } else { self.default_daily };
|
||||||
|
if daily_limit > 0 {
|
||||||
|
let (tx, rx) = self.db.get_daily_usage(username).unwrap_or((0, 0));
|
||||||
|
cap(&mut budget, daily_limit, tx + rx);
|
||||||
|
}
|
||||||
|
|
||||||
|
let weekly_limit = if user.weekly_quota > 0 { user.weekly_quota as u64 } else { self.default_weekly };
|
||||||
|
if weekly_limit > 0 {
|
||||||
|
let (tx, rx) = self.db.get_weekly_usage(username).unwrap_or((0, 0));
|
||||||
|
cap(&mut budget, weekly_limit, tx + rx);
|
||||||
|
}
|
||||||
|
|
||||||
|
if self.default_monthly > 0 {
|
||||||
|
let (tx, rx) = self.db.get_monthly_usage(username).unwrap_or((0, 0));
|
||||||
|
cap(&mut budget, self.default_monthly, tx + rx);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// IP combined quotas
|
||||||
|
if self.ip_daily > 0 {
|
||||||
|
let (tx, rx) = self.db.get_ip_daily_usage(&ip_str).unwrap_or((0, 0));
|
||||||
|
cap(&mut budget, self.ip_daily, tx + rx);
|
||||||
|
}
|
||||||
|
if self.ip_weekly > 0 {
|
||||||
|
let (tx, rx) = self.db.get_ip_weekly_usage(&ip_str).unwrap_or((0, 0));
|
||||||
|
cap(&mut budget, self.ip_weekly, tx + rx);
|
||||||
|
}
|
||||||
|
if self.ip_monthly > 0 {
|
||||||
|
let (tx, rx) = self.db.get_ip_monthly_usage(&ip_str).unwrap_or((0, 0));
|
||||||
|
cap(&mut budget, self.ip_monthly, tx + rx);
|
||||||
|
}
|
||||||
|
|
||||||
|
// IP directional quotas — use inbound + outbound as combined ceiling
|
||||||
|
if self.ip_daily_inbound > 0 {
|
||||||
|
let used = self.db.get_ip_daily_inbound(&ip_str).unwrap_or(0);
|
||||||
|
cap(&mut budget, self.ip_daily_inbound, used);
|
||||||
|
}
|
||||||
|
if self.ip_daily_outbound > 0 {
|
||||||
|
let used = self.db.get_ip_daily_outbound(&ip_str).unwrap_or(0);
|
||||||
|
cap(&mut budget, self.ip_daily_outbound, used);
|
||||||
|
}
|
||||||
|
if self.ip_weekly_inbound > 0 {
|
||||||
|
let used = self.db.get_ip_weekly_inbound(&ip_str).unwrap_or(0);
|
||||||
|
cap(&mut budget, self.ip_weekly_inbound, used);
|
||||||
|
}
|
||||||
|
if self.ip_weekly_outbound > 0 {
|
||||||
|
let used = self.db.get_ip_weekly_outbound(&ip_str).unwrap_or(0);
|
||||||
|
cap(&mut budget, self.ip_weekly_outbound, used);
|
||||||
|
}
|
||||||
|
if self.ip_monthly_inbound > 0 {
|
||||||
|
let used = self.db.get_ip_monthly_inbound(&ip_str).unwrap_or(0);
|
||||||
|
cap(&mut budget, self.ip_monthly_inbound, used);
|
||||||
|
}
|
||||||
|
if self.ip_monthly_outbound > 0 {
|
||||||
|
let used = self.db.get_ip_monthly_outbound(&ip_str).unwrap_or(0);
|
||||||
|
cap(&mut budget, self.ip_monthly_outbound, used);
|
||||||
|
}
|
||||||
|
|
||||||
|
budget
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn max_duration(&self) -> u64 {
|
||||||
|
self.max_duration
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn active_connections_count(&self, ip: &IpAddr) -> u32 {
|
||||||
|
let conns = self.active_connections.lock().unwrap();
|
||||||
|
conns.get(ip).copied().unwrap_or(0)
|
||||||
|
}
|
||||||
|
}
|
||||||
449
src/server_pro/server_loop.rs
Normal file
449
src/server_pro/server_loop.rs
Normal file
@@ -0,0 +1,449 @@
|
|||||||
|
//! Enhanced server loop with quota enforcement.
|
||||||
|
//!
|
||||||
|
//! Wraps the standard btest server connection handler with:
|
||||||
|
//! - Pre-connection IP/user quota checks
|
||||||
|
//! - MD5 challenge-response auth against user DB
|
||||||
|
//! - TCP multi-connection session support
|
||||||
|
//! - Mid-session quota enforcement via QuotaEnforcer
|
||||||
|
//! - Post-session usage recording
|
||||||
|
|
||||||
|
use std::collections::HashMap;
|
||||||
|
use std::net::SocketAddr;
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use tokio::io::{AsyncReadExt, AsyncWriteExt};
|
||||||
|
use tokio::net::{TcpListener, TcpStream};
|
||||||
|
use tokio::sync::Mutex;
|
||||||
|
|
||||||
|
use btest_rs::protocol::*;
|
||||||
|
use btest_rs::bandwidth::BandwidthState;
|
||||||
|
|
||||||
|
use super::enforcer::{QuotaEnforcer, StopReason};
|
||||||
|
use super::quota::{Direction, QuotaManager};
|
||||||
|
use super::user_db::UserDb;
|
||||||
|
|
||||||
|
/// Pending TCP multi-connection session.
|
||||||
|
struct TcpSession {
|
||||||
|
peer_ip: std::net::IpAddr,
|
||||||
|
username: String,
|
||||||
|
cmd: Command,
|
||||||
|
streams: Vec<TcpStream>,
|
||||||
|
expected: u8,
|
||||||
|
}
|
||||||
|
|
||||||
|
type SessionMap = Arc<Mutex<HashMap<u16, TcpSession>>>;
|
||||||
|
|
||||||
|
/// Run the pro server with quota enforcement.
|
||||||
|
pub async fn run_pro_server(
|
||||||
|
port: u16,
|
||||||
|
_ecsrp5: bool,
|
||||||
|
listen_v4: Option<String>,
|
||||||
|
listen_v6: Option<String>,
|
||||||
|
db: UserDb,
|
||||||
|
quota_mgr: QuotaManager,
|
||||||
|
quota_check_interval: u64,
|
||||||
|
) -> anyhow::Result<()> {
|
||||||
|
let v4_listener = if let Some(ref addr) = listen_v4 {
|
||||||
|
let bind_addr = format!("{}:{}", addr, port);
|
||||||
|
Some(TcpListener::bind(&bind_addr).await?)
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
};
|
||||||
|
|
||||||
|
let v6_listener = if let Some(ref addr) = listen_v6 {
|
||||||
|
let bind_addr = format!("[{}]:{}", addr, port);
|
||||||
|
Some(TcpListener::bind(&bind_addr).await?)
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
};
|
||||||
|
|
||||||
|
if v4_listener.is_none() && v6_listener.is_none() {
|
||||||
|
anyhow::bail!("No listeners bound");
|
||||||
|
}
|
||||||
|
|
||||||
|
let sessions: SessionMap = Arc::new(Mutex::new(HashMap::new()));
|
||||||
|
|
||||||
|
tracing::info!("btest-server-pro ready, accepting connections");
|
||||||
|
|
||||||
|
loop {
|
||||||
|
let (stream, peer) = match (&v4_listener, &v6_listener) {
|
||||||
|
(Some(v4), Some(v6)) => {
|
||||||
|
tokio::select! {
|
||||||
|
r = v4.accept() => r?,
|
||||||
|
r = v6.accept() => r?,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
(Some(v4), None) => v4.accept().await?,
|
||||||
|
(None, Some(v6)) => v6.accept().await?,
|
||||||
|
_ => unreachable!(),
|
||||||
|
};
|
||||||
|
|
||||||
|
tracing::info!("New connection from {}", peer);
|
||||||
|
|
||||||
|
let db = db.clone();
|
||||||
|
let qm = quota_mgr.clone();
|
||||||
|
let interval = quota_check_interval;
|
||||||
|
let sess = sessions.clone();
|
||||||
|
|
||||||
|
tokio::spawn(async move {
|
||||||
|
let is_primary = match handle_pro_connection(stream, peer, db, qm.clone(), interval, sess).await {
|
||||||
|
Ok(Some((username, stop_reason, tx, rx))) => {
|
||||||
|
tracing::info!(
|
||||||
|
"Client {} (user '{}') finished: {} (tx={}, rx={})",
|
||||||
|
peer, username, stop_reason, tx, rx,
|
||||||
|
);
|
||||||
|
btest_rs::syslog_logger::test_end(
|
||||||
|
&peer.to_string(), "btest", &format!("{}", stop_reason),
|
||||||
|
tx, rx, 0, 0,
|
||||||
|
);
|
||||||
|
true
|
||||||
|
}
|
||||||
|
Ok(None) => false, // secondary connection or pending multi-conn
|
||||||
|
Err(e) => {
|
||||||
|
tracing::error!("Client {} error: {}", peer, e);
|
||||||
|
true
|
||||||
|
}
|
||||||
|
};
|
||||||
|
// Only decrement connection count for primary connections
|
||||||
|
if is_primary {
|
||||||
|
qm.disconnect(&peer.ip());
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Handle a single TCP connection. Returns None for secondary multi-conn joins.
|
||||||
|
async fn handle_pro_connection(
|
||||||
|
mut stream: TcpStream,
|
||||||
|
peer: SocketAddr,
|
||||||
|
db: UserDb,
|
||||||
|
quota_mgr: QuotaManager,
|
||||||
|
quota_check_interval: u64,
|
||||||
|
sessions: SessionMap,
|
||||||
|
) -> anyhow::Result<Option<(String, StopReason, u64, u64)>> {
|
||||||
|
stream.set_nodelay(true)?;
|
||||||
|
|
||||||
|
// HELLO
|
||||||
|
stream.write_all(&HELLO).await?;
|
||||||
|
|
||||||
|
// Read command (or session token for secondary connections)
|
||||||
|
let mut cmd_buf = [0u8; 16];
|
||||||
|
stream.read_exact(&mut cmd_buf).await?;
|
||||||
|
|
||||||
|
// Check if this is a secondary connection joining an existing TCP session
|
||||||
|
// Secondary connections send [HI, LO, ...] matching an existing session token
|
||||||
|
{
|
||||||
|
let potential_token = u16::from_be_bytes([cmd_buf[0], cmd_buf[1]]);
|
||||||
|
let mut map = sessions.lock().await;
|
||||||
|
if let Some(session) = map.get_mut(&potential_token) {
|
||||||
|
if session.peer_ip == peer.ip()
|
||||||
|
&& session.streams.len() < session.expected as usize
|
||||||
|
{
|
||||||
|
tracing::info!(
|
||||||
|
"Secondary connection from {} joining session (token={:04x}, {}/{})",
|
||||||
|
peer, potential_token,
|
||||||
|
session.streams.len() + 1, session.expected,
|
||||||
|
);
|
||||||
|
|
||||||
|
// Auth the secondary connection with same token response
|
||||||
|
let ok = [0x01, cmd_buf[0], cmd_buf[1], 0x00];
|
||||||
|
stream.write_all(&ok).await?;
|
||||||
|
stream.flush().await?;
|
||||||
|
|
||||||
|
session.streams.push(stream);
|
||||||
|
|
||||||
|
// If all connections have joined, start the test
|
||||||
|
if session.streams.len() >= session.expected as usize {
|
||||||
|
let session = map.remove(&potential_token).unwrap();
|
||||||
|
let db2 = db.clone();
|
||||||
|
let qm2 = quota_mgr.clone();
|
||||||
|
tokio::spawn(async move {
|
||||||
|
match run_pro_multiconn_test(
|
||||||
|
session.streams, session.cmd, peer,
|
||||||
|
&session.username, db2, qm2, quota_check_interval,
|
||||||
|
).await {
|
||||||
|
Ok((stop, tx, rx)) => {
|
||||||
|
tracing::info!(
|
||||||
|
"Multi-conn {} (user '{}') finished: {} (tx={}, rx={})",
|
||||||
|
peer, session.username, stop, tx, rx,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
tracing::error!("Multi-conn {} error: {}", peer, e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
return Ok(None);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Primary connection — check IP quota/connection limit now
|
||||||
|
if let Err(e) = quota_mgr.check_ip(&peer.ip(), Direction::Both) {
|
||||||
|
tracing::warn!("Rejected {} — {}", peer, e);
|
||||||
|
btest_rs::syslog_logger::auth_failure(
|
||||||
|
&peer.to_string(), "-", "-", &format!("{}", e),
|
||||||
|
);
|
||||||
|
return Ok(None);
|
||||||
|
}
|
||||||
|
quota_mgr.connect(&peer.ip());
|
||||||
|
|
||||||
|
let cmd = Command::deserialize(&cmd_buf);
|
||||||
|
|
||||||
|
tracing::info!(
|
||||||
|
"Client {} command: proto={} dir={} conn_count={} tx_size={}",
|
||||||
|
peer,
|
||||||
|
if cmd.is_udp() { "UDP" } else { "TCP" },
|
||||||
|
match cmd.direction { CMD_DIR_RX => "RX", CMD_DIR_TX => "TX", _ => "BOTH" },
|
||||||
|
cmd.tcp_conn_count,
|
||||||
|
cmd.tx_size,
|
||||||
|
);
|
||||||
|
|
||||||
|
// Build auth OK response with session token for multi-connection
|
||||||
|
let is_tcp_multi = !cmd.is_udp() && cmd.tcp_conn_count > 0;
|
||||||
|
let session_token: u16 = if is_tcp_multi {
|
||||||
|
rand::random::<u16>() | 0x0101 // ensure both bytes non-zero
|
||||||
|
} else {
|
||||||
|
0
|
||||||
|
};
|
||||||
|
let ok_response: [u8; 4] = if is_tcp_multi {
|
||||||
|
[0x01, (session_token >> 8) as u8, (session_token & 0xFF) as u8, 0x00]
|
||||||
|
} else {
|
||||||
|
AUTH_OK
|
||||||
|
};
|
||||||
|
|
||||||
|
// Authenticate — MD5 challenge-response against DB
|
||||||
|
stream.write_all(&AUTH_REQUIRED).await?;
|
||||||
|
let challenge = btest_rs::auth::generate_challenge();
|
||||||
|
stream.write_all(&challenge).await?;
|
||||||
|
stream.flush().await?;
|
||||||
|
|
||||||
|
let mut response = [0u8; 48];
|
||||||
|
stream.read_exact(&mut response).await?;
|
||||||
|
|
||||||
|
let received_hash = &response[0..16];
|
||||||
|
let received_user = &response[16..48];
|
||||||
|
|
||||||
|
let user_end = received_user.iter().position(|&b| b == 0).unwrap_or(32);
|
||||||
|
let username = std::str::from_utf8(&received_user[..user_end])
|
||||||
|
.unwrap_or("")
|
||||||
|
.to_string();
|
||||||
|
|
||||||
|
// Verify against DB
|
||||||
|
let user = db.get_user(&username)?;
|
||||||
|
match user {
|
||||||
|
None => {
|
||||||
|
tracing::warn!("Auth failed: user '{}' not found", username);
|
||||||
|
stream.write_all(&AUTH_FAILED).await?;
|
||||||
|
btest_rs::syslog_logger::auth_failure(
|
||||||
|
&peer.to_string(), &username, "md5", "user not found",
|
||||||
|
);
|
||||||
|
anyhow::bail!("User not found");
|
||||||
|
}
|
||||||
|
Some(u) => {
|
||||||
|
if !u.enabled {
|
||||||
|
tracing::warn!("Auth failed: user '{}' is disabled", username);
|
||||||
|
stream.write_all(&AUTH_FAILED).await?;
|
||||||
|
btest_rs::syslog_logger::auth_failure(
|
||||||
|
&peer.to_string(), &username, "md5", "user disabled",
|
||||||
|
);
|
||||||
|
anyhow::bail!("User disabled");
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify MD5 hash against stored raw password
|
||||||
|
if let Ok(Some(raw_pass)) = db.get_password(&username) {
|
||||||
|
let expected_hash = btest_rs::auth::compute_auth_hash(&raw_pass, &challenge);
|
||||||
|
if received_hash != expected_hash {
|
||||||
|
tracing::warn!("Auth failed: password mismatch for user '{}'", username);
|
||||||
|
stream.write_all(&AUTH_FAILED).await?;
|
||||||
|
btest_rs::syslog_logger::auth_failure(
|
||||||
|
&peer.to_string(), &username, "md5", "password mismatch",
|
||||||
|
);
|
||||||
|
anyhow::bail!("Auth failed");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// If no raw password stored, accept (backwards compat with old DB entries)
|
||||||
|
|
||||||
|
stream.write_all(&ok_response).await?;
|
||||||
|
stream.flush().await?;
|
||||||
|
|
||||||
|
tracing::info!("Auth successful for user '{}'", username);
|
||||||
|
btest_rs::syslog_logger::auth_success(
|
||||||
|
&peer.to_string(), &username, "md5",
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check user quota before starting test
|
||||||
|
if let Err(e) = quota_mgr.check_user(&username) {
|
||||||
|
tracing::warn!("Quota check failed for '{}': {}", username, e);
|
||||||
|
btest_rs::syslog_logger::auth_failure(
|
||||||
|
&peer.to_string(), &username, "quota", &format!("{}", e),
|
||||||
|
);
|
||||||
|
return Ok(Some((username, StopReason::UserDailyQuota, 0, 0)));
|
||||||
|
}
|
||||||
|
|
||||||
|
// TCP multi-connection: register session and wait for secondary connections
|
||||||
|
if is_tcp_multi {
|
||||||
|
tracing::info!(
|
||||||
|
"TCP multi-connection: waiting for {} connections (token={:04x})",
|
||||||
|
cmd.tcp_conn_count, session_token,
|
||||||
|
);
|
||||||
|
let mut map = sessions.lock().await;
|
||||||
|
map.insert(session_token, TcpSession {
|
||||||
|
peer_ip: peer.ip(),
|
||||||
|
username: username.clone(),
|
||||||
|
cmd: cmd.clone(),
|
||||||
|
streams: vec![stream],
|
||||||
|
expected: cmd.tcp_conn_count, // tcp_conn_count includes the primary
|
||||||
|
});
|
||||||
|
// The test will be started when all connections join (in the secondary handler above)
|
||||||
|
return Ok(None);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Single-connection test
|
||||||
|
run_pro_single_test(stream, cmd, peer, &username, db, quota_mgr, quota_check_interval).await
|
||||||
|
.map(|(stop, tx, rx)| Some((username, stop, tx, rx)))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Run a single-connection bandwidth test with quota enforcement.
|
||||||
|
async fn run_pro_single_test(
|
||||||
|
stream: TcpStream,
|
||||||
|
cmd: Command,
|
||||||
|
peer: SocketAddr,
|
||||||
|
username: &str,
|
||||||
|
db: UserDb,
|
||||||
|
quota_mgr: QuotaManager,
|
||||||
|
quota_check_interval: u64,
|
||||||
|
) -> anyhow::Result<(StopReason, u64, u64)> {
|
||||||
|
let proto_str = if cmd.is_udp() { "UDP" } else { "TCP" };
|
||||||
|
let dir_str = match cmd.direction {
|
||||||
|
CMD_DIR_RX => "RX", CMD_DIR_TX => "TX", _ => "BOTH"
|
||||||
|
};
|
||||||
|
let session_id = db.start_session(
|
||||||
|
username, &peer.ip().to_string(), proto_str, dir_str,
|
||||||
|
)?;
|
||||||
|
|
||||||
|
btest_rs::syslog_logger::test_start(
|
||||||
|
&peer.to_string(), proto_str, dir_str, cmd.tcp_conn_count,
|
||||||
|
);
|
||||||
|
|
||||||
|
let state = BandwidthState::new();
|
||||||
|
|
||||||
|
// Set byte budget
|
||||||
|
let budget = quota_mgr.remaining_budget(username, &peer.ip());
|
||||||
|
if budget < u64::MAX {
|
||||||
|
state.set_budget(budget);
|
||||||
|
tracing::info!("Byte budget for '{}' from {}: {} bytes", username, peer.ip(), budget);
|
||||||
|
}
|
||||||
|
|
||||||
|
let enforcer = QuotaEnforcer::new(
|
||||||
|
quota_mgr.clone(),
|
||||||
|
username.to_string(),
|
||||||
|
peer.ip(),
|
||||||
|
state.clone(),
|
||||||
|
quota_check_interval,
|
||||||
|
quota_mgr.max_duration(),
|
||||||
|
);
|
||||||
|
|
||||||
|
let enforcer_state = state.clone();
|
||||||
|
let enforcer_handle = tokio::spawn(async move {
|
||||||
|
enforcer.run().await
|
||||||
|
});
|
||||||
|
|
||||||
|
static UDP_PORT_OFFSET: std::sync::atomic::AtomicU16 = std::sync::atomic::AtomicU16::new(0);
|
||||||
|
|
||||||
|
let mut stream_mut = stream;
|
||||||
|
let test_result = if cmd.is_udp() {
|
||||||
|
let offset = UDP_PORT_OFFSET.fetch_add(1, std::sync::atomic::Ordering::SeqCst);
|
||||||
|
let udp_port = btest_rs::protocol::BTEST_UDP_PORT_START + offset;
|
||||||
|
btest_rs::server::run_udp_test(
|
||||||
|
&mut stream_mut, peer, &cmd, state.clone(), udp_port,
|
||||||
|
).await
|
||||||
|
} else {
|
||||||
|
btest_rs::server::run_tcp_test(stream_mut, cmd.clone(), state.clone()).await
|
||||||
|
};
|
||||||
|
|
||||||
|
enforcer_state.running.store(false, std::sync::atomic::Ordering::SeqCst);
|
||||||
|
let stop_reason = enforcer_handle.await.unwrap_or(StopReason::ClientDisconnected);
|
||||||
|
|
||||||
|
let final_reason = match &test_result {
|
||||||
|
Ok(_) => {
|
||||||
|
if stop_reason == StopReason::ClientDisconnected {
|
||||||
|
StopReason::ClientDisconnected
|
||||||
|
} else {
|
||||||
|
stop_reason
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Err(_) => StopReason::ClientDisconnected,
|
||||||
|
};
|
||||||
|
|
||||||
|
let (total_tx, total_rx, _, _) = state.summary();
|
||||||
|
quota_mgr.record_usage(username, &peer.ip().to_string(), total_tx, total_rx);
|
||||||
|
db.end_session(session_id, total_tx, total_rx)?;
|
||||||
|
|
||||||
|
Ok((final_reason, total_tx, total_rx))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Run a TCP multi-connection test with all streams collected.
|
||||||
|
/// Delegates to the standard multi-conn handler which correctly manages
|
||||||
|
/// TX+status injection for bidirectional mode.
|
||||||
|
async fn run_pro_multiconn_test(
|
||||||
|
streams: Vec<TcpStream>,
|
||||||
|
cmd: Command,
|
||||||
|
peer: SocketAddr,
|
||||||
|
username: &str,
|
||||||
|
db: UserDb,
|
||||||
|
quota_mgr: QuotaManager,
|
||||||
|
quota_check_interval: u64,
|
||||||
|
) -> anyhow::Result<(StopReason, u64, u64)> {
|
||||||
|
let dir_str = match cmd.direction {
|
||||||
|
CMD_DIR_RX => "RX", CMD_DIR_TX => "TX", _ => "BOTH"
|
||||||
|
};
|
||||||
|
let session_id = db.start_session(
|
||||||
|
username, &peer.ip().to_string(), "TCP", dir_str,
|
||||||
|
)?;
|
||||||
|
|
||||||
|
tracing::info!(
|
||||||
|
"Starting TCP multi-conn test: {} streams, dir={}",
|
||||||
|
streams.len(), dir_str,
|
||||||
|
);
|
||||||
|
|
||||||
|
let state = BandwidthState::new();
|
||||||
|
|
||||||
|
let budget = quota_mgr.remaining_budget(username, &peer.ip());
|
||||||
|
if budget < u64::MAX {
|
||||||
|
state.set_budget(budget);
|
||||||
|
}
|
||||||
|
|
||||||
|
let enforcer = QuotaEnforcer::new(
|
||||||
|
quota_mgr.clone(),
|
||||||
|
username.to_string(),
|
||||||
|
peer.ip(),
|
||||||
|
state.clone(),
|
||||||
|
quota_check_interval,
|
||||||
|
quota_mgr.max_duration(),
|
||||||
|
);
|
||||||
|
|
||||||
|
let enforcer_state = state.clone();
|
||||||
|
let enforcer_handle = tokio::spawn(async move {
|
||||||
|
enforcer.run().await
|
||||||
|
});
|
||||||
|
|
||||||
|
// Use the standard multi-connection handler which correctly handles
|
||||||
|
// all direction modes (TX, RX, BOTH with status injection)
|
||||||
|
let _test_result = btest_rs::server::run_tcp_multiconn_test(
|
||||||
|
streams, cmd, state.clone(),
|
||||||
|
).await;
|
||||||
|
|
||||||
|
enforcer_state.running.store(false, std::sync::atomic::Ordering::SeqCst);
|
||||||
|
let stop_reason = enforcer_handle.await.unwrap_or(StopReason::ClientDisconnected);
|
||||||
|
|
||||||
|
let (total_tx, total_rx, _, _) = state.summary();
|
||||||
|
quota_mgr.record_usage(username, &peer.ip().to_string(), total_tx, total_rx);
|
||||||
|
db.end_session(session_id, total_tx, total_rx)?;
|
||||||
|
|
||||||
|
Ok((stop_reason, total_tx, total_rx))
|
||||||
|
}
|
||||||
629
src/server_pro/user_db.rs
Normal file
629
src/server_pro/user_db.rs
Normal file
@@ -0,0 +1,629 @@
|
|||||||
|
//! SQLite-based user database for btest-server-pro.
|
||||||
|
//!
|
||||||
|
//! Stores users with credentials, quotas, and usage tracking.
|
||||||
|
|
||||||
|
use rusqlite::{Connection, params};
|
||||||
|
use std::sync::{Arc, Mutex};
|
||||||
|
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub struct UserDb {
|
||||||
|
conn: Arc<Mutex<Connection>>,
|
||||||
|
path: Arc<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
pub struct User {
|
||||||
|
pub id: i64,
|
||||||
|
pub username: String,
|
||||||
|
pub password_hash: String, // stored as hex of SHA256(username:password)
|
||||||
|
pub daily_quota: i64, // 0 = use default
|
||||||
|
pub weekly_quota: i64, // 0 = use default
|
||||||
|
pub enabled: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct UsageRecord {
|
||||||
|
pub username: String,
|
||||||
|
pub date: String, // YYYY-MM-DD
|
||||||
|
pub tx_bytes: u64,
|
||||||
|
pub rx_bytes: u64,
|
||||||
|
pub test_count: u32,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Per-second bandwidth interval data for graphing.
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
pub struct IntervalData {
|
||||||
|
pub interval_num: i32,
|
||||||
|
pub tx_mbps: f64,
|
||||||
|
pub rx_mbps: f64,
|
||||||
|
pub local_cpu: i32,
|
||||||
|
pub remote_cpu: i32,
|
||||||
|
pub lost: i64,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Summary of a single test session.
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
pub struct SessionSummary {
|
||||||
|
pub id: i64,
|
||||||
|
pub started_at: String,
|
||||||
|
pub ended_at: Option<String>,
|
||||||
|
pub protocol: String,
|
||||||
|
pub direction: String,
|
||||||
|
pub tx_bytes: u64,
|
||||||
|
pub rx_bytes: u64,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Aggregate statistics for an IP address.
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
pub struct IpStats {
|
||||||
|
pub total_tests: u64,
|
||||||
|
pub total_inbound: u64,
|
||||||
|
pub total_outbound: u64,
|
||||||
|
pub avg_tx_mbps: f64,
|
||||||
|
pub avg_rx_mbps: f64,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl UserDb {
|
||||||
|
pub fn open(path: &str) -> anyhow::Result<Self> {
|
||||||
|
let conn = Connection::open(path)?;
|
||||||
|
conn.execute_batch("PRAGMA journal_mode=WAL; PRAGMA busy_timeout=5000;")?;
|
||||||
|
Ok(Self {
|
||||||
|
conn: Arc::new(Mutex::new(conn)),
|
||||||
|
path: Arc::new(path.to_string()),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Return the database file path.
|
||||||
|
pub fn path(&self) -> &str {
|
||||||
|
&self.path
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn ensure_tables(&self) -> anyhow::Result<()> {
|
||||||
|
let conn = self.conn.lock().unwrap();
|
||||||
|
conn.execute_batch("
|
||||||
|
CREATE TABLE IF NOT EXISTS users (
|
||||||
|
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||||
|
username TEXT UNIQUE NOT NULL,
|
||||||
|
password_hash TEXT NOT NULL,
|
||||||
|
daily_quota INTEGER DEFAULT 0,
|
||||||
|
weekly_quota INTEGER DEFAULT 0,
|
||||||
|
enabled INTEGER DEFAULT 1,
|
||||||
|
created_at TEXT DEFAULT (datetime('now'))
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE TABLE IF NOT EXISTS usage (
|
||||||
|
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||||
|
username TEXT NOT NULL,
|
||||||
|
date TEXT NOT NULL,
|
||||||
|
tx_bytes INTEGER DEFAULT 0,
|
||||||
|
rx_bytes INTEGER DEFAULT 0,
|
||||||
|
test_count INTEGER DEFAULT 0,
|
||||||
|
UNIQUE(username, date)
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE TABLE IF NOT EXISTS ip_usage (
|
||||||
|
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||||
|
ip TEXT NOT NULL,
|
||||||
|
date TEXT NOT NULL,
|
||||||
|
inbound_bytes INTEGER DEFAULT 0,
|
||||||
|
outbound_bytes INTEGER DEFAULT 0,
|
||||||
|
test_count INTEGER DEFAULT 0,
|
||||||
|
UNIQUE(ip, date)
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE TABLE IF NOT EXISTS sessions (
|
||||||
|
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||||
|
username TEXT NOT NULL,
|
||||||
|
peer_ip TEXT NOT NULL,
|
||||||
|
started_at TEXT DEFAULT (datetime('now')),
|
||||||
|
ended_at TEXT,
|
||||||
|
tx_bytes INTEGER DEFAULT 0,
|
||||||
|
rx_bytes INTEGER DEFAULT 0,
|
||||||
|
protocol TEXT,
|
||||||
|
direction TEXT
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE TABLE IF NOT EXISTS test_intervals (
|
||||||
|
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||||
|
session_id INTEGER NOT NULL,
|
||||||
|
interval_num INTEGER NOT NULL,
|
||||||
|
tx_bytes INTEGER DEFAULT 0,
|
||||||
|
rx_bytes INTEGER DEFAULT 0,
|
||||||
|
tx_mbps REAL DEFAULT 0,
|
||||||
|
rx_mbps REAL DEFAULT 0,
|
||||||
|
local_cpu INTEGER DEFAULT 0,
|
||||||
|
remote_cpu INTEGER DEFAULT 0,
|
||||||
|
lost_packets INTEGER DEFAULT 0,
|
||||||
|
FOREIGN KEY(session_id) REFERENCES sessions(id)
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_usage_user_date ON usage(username, date);
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_ip_usage_date ON ip_usage(ip, date);
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_sessions_peer ON sessions(peer_ip, started_at);
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_intervals_session ON test_intervals(session_id);
|
||||||
|
")?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn user_count(&self) -> anyhow::Result<u64> {
|
||||||
|
let conn = self.conn.lock().unwrap();
|
||||||
|
let count: i64 = conn.query_row("SELECT COUNT(*) FROM users", [], |r| r.get(0))?;
|
||||||
|
Ok(count as u64)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn add_user(&self, username: &str, password: &str) -> anyhow::Result<()> {
|
||||||
|
let hash = hash_password(username, password);
|
||||||
|
let conn = self.conn.lock().unwrap();
|
||||||
|
// Ensure password_raw column exists (migration for older databases)
|
||||||
|
let _ = conn.execute("ALTER TABLE users ADD COLUMN password_raw TEXT DEFAULT ''", []);
|
||||||
|
conn.execute(
|
||||||
|
"INSERT OR REPLACE INTO users (username, password_hash, password_raw) VALUES (?1, ?2, ?3)",
|
||||||
|
params![username, hash, password],
|
||||||
|
)?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get the raw password for MD5 challenge-response auth.
|
||||||
|
pub fn get_password(&self, username: &str) -> anyhow::Result<Option<String>> {
|
||||||
|
let conn = self.conn.lock().unwrap();
|
||||||
|
let result = conn.query_row(
|
||||||
|
"SELECT password_raw FROM users WHERE username = ?1 AND enabled = 1",
|
||||||
|
params![username],
|
||||||
|
|row| row.get::<_, String>(0),
|
||||||
|
).optional()?;
|
||||||
|
Ok(result)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn get_user(&self, username: &str) -> anyhow::Result<Option<User>> {
|
||||||
|
let conn = self.conn.lock().unwrap();
|
||||||
|
let mut stmt = conn.prepare(
|
||||||
|
"SELECT id, username, password_hash, daily_quota, weekly_quota, enabled FROM users WHERE username = ?1"
|
||||||
|
)?;
|
||||||
|
let user = stmt.query_row(params![username], |row| {
|
||||||
|
Ok(User {
|
||||||
|
id: row.get(0)?,
|
||||||
|
username: row.get(1)?,
|
||||||
|
password_hash: row.get(2)?,
|
||||||
|
daily_quota: row.get(3)?,
|
||||||
|
weekly_quota: row.get(4)?,
|
||||||
|
enabled: row.get::<_, i32>(5)? != 0,
|
||||||
|
})
|
||||||
|
}).optional()?;
|
||||||
|
Ok(user)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn verify_password(&self, username: &str, password: &str) -> anyhow::Result<bool> {
|
||||||
|
let expected = hash_password(username, password);
|
||||||
|
match self.get_user(username)? {
|
||||||
|
Some(user) => Ok(user.enabled && user.password_hash == expected),
|
||||||
|
None => Ok(false),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn record_usage(&self, username: &str, tx_bytes: u64, rx_bytes: u64) -> anyhow::Result<()> {
|
||||||
|
let conn = self.conn.lock().unwrap();
|
||||||
|
let today = chrono_date_today();
|
||||||
|
conn.execute(
|
||||||
|
"INSERT INTO usage (username, date, tx_bytes, rx_bytes, test_count)
|
||||||
|
VALUES (?1, ?2, ?3, ?4, 1)
|
||||||
|
ON CONFLICT(username, date) DO UPDATE SET
|
||||||
|
tx_bytes = tx_bytes + ?3,
|
||||||
|
rx_bytes = rx_bytes + ?4,
|
||||||
|
test_count = test_count + 1",
|
||||||
|
params![username, today, tx_bytes as i64, rx_bytes as i64],
|
||||||
|
)?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn get_daily_usage(&self, username: &str) -> anyhow::Result<(u64, u64)> {
|
||||||
|
let conn = self.conn.lock().unwrap();
|
||||||
|
let today = chrono_date_today();
|
||||||
|
let result = conn.query_row(
|
||||||
|
"SELECT COALESCE(SUM(tx_bytes),0), COALESCE(SUM(rx_bytes),0) FROM usage WHERE username = ?1 AND date = ?2",
|
||||||
|
params![username, today],
|
||||||
|
|row| {
|
||||||
|
let a: i64 = row.get(0)?;
|
||||||
|
let b: i64 = row.get(1)?;
|
||||||
|
Ok((a as u64, b as u64))
|
||||||
|
},
|
||||||
|
)?;
|
||||||
|
Ok(result)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn get_weekly_usage(&self, username: &str) -> anyhow::Result<(u64, u64)> {
|
||||||
|
let conn = self.conn.lock().unwrap();
|
||||||
|
let result = conn.query_row(
|
||||||
|
"SELECT COALESCE(SUM(tx_bytes),0), COALESCE(SUM(rx_bytes),0) FROM usage
|
||||||
|
WHERE username = ?1 AND date >= date('now', '-7 days')",
|
||||||
|
params![username],
|
||||||
|
|row| {
|
||||||
|
let a: i64 = row.get(0)?;
|
||||||
|
let b: i64 = row.get(1)?;
|
||||||
|
Ok((a as u64, b as u64))
|
||||||
|
},
|
||||||
|
)?;
|
||||||
|
Ok(result)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn get_monthly_usage(&self, username: &str) -> anyhow::Result<(u64, u64)> {
|
||||||
|
let conn = self.conn.lock().unwrap();
|
||||||
|
let result = conn.query_row(
|
||||||
|
"SELECT COALESCE(SUM(tx_bytes),0), COALESCE(SUM(rx_bytes),0) FROM usage
|
||||||
|
WHERE username = ?1 AND date >= date('now', '-30 days')",
|
||||||
|
params![username],
|
||||||
|
|row| {
|
||||||
|
let a: i64 = row.get(0)?;
|
||||||
|
let b: i64 = row.get(1)?;
|
||||||
|
Ok((a as u64, b as u64))
|
||||||
|
},
|
||||||
|
)?;
|
||||||
|
Ok(result)
|
||||||
|
}
|
||||||
|
|
||||||
|
// --- Per-IP usage tracking ---
|
||||||
|
|
||||||
|
pub fn record_ip_usage(&self, ip: &str, tx_bytes: u64, rx_bytes: u64) -> anyhow::Result<()> {
|
||||||
|
let conn = self.conn.lock().unwrap();
|
||||||
|
let today = chrono_date_today();
|
||||||
|
// From the server's perspective: inbound = data coming FROM the client (rx),
|
||||||
|
// outbound = data going TO the client (tx).
|
||||||
|
let inbound = rx_bytes;
|
||||||
|
let outbound = tx_bytes;
|
||||||
|
conn.execute(
|
||||||
|
"INSERT INTO ip_usage (ip, date, inbound_bytes, outbound_bytes, test_count)
|
||||||
|
VALUES (?1, ?2, ?3, ?4, 1)
|
||||||
|
ON CONFLICT(ip, date) DO UPDATE SET
|
||||||
|
inbound_bytes = inbound_bytes + ?3,
|
||||||
|
outbound_bytes = outbound_bytes + ?4,
|
||||||
|
test_count = test_count + 1",
|
||||||
|
params![ip, today, inbound as i64, outbound as i64],
|
||||||
|
)?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn get_ip_daily_usage(&self, ip: &str) -> anyhow::Result<(u64, u64)> {
|
||||||
|
let conn = self.conn.lock().unwrap();
|
||||||
|
let today = chrono_date_today();
|
||||||
|
let result = conn.query_row(
|
||||||
|
"SELECT COALESCE(SUM(inbound_bytes),0), COALESCE(SUM(outbound_bytes),0) FROM ip_usage WHERE ip = ?1 AND date = ?2",
|
||||||
|
params![ip, today],
|
||||||
|
|row| {
|
||||||
|
let inbound: i64 = row.get(0)?;
|
||||||
|
let outbound: i64 = row.get(1)?;
|
||||||
|
Ok((inbound as u64, outbound as u64))
|
||||||
|
},
|
||||||
|
)?;
|
||||||
|
Ok(result)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn get_ip_weekly_usage(&self, ip: &str) -> anyhow::Result<(u64, u64)> {
|
||||||
|
let conn = self.conn.lock().unwrap();
|
||||||
|
let result = conn.query_row(
|
||||||
|
"SELECT COALESCE(SUM(inbound_bytes),0), COALESCE(SUM(outbound_bytes),0) FROM ip_usage
|
||||||
|
WHERE ip = ?1 AND date >= date('now', '-7 days')",
|
||||||
|
params![ip],
|
||||||
|
|row| {
|
||||||
|
let inbound: i64 = row.get(0)?;
|
||||||
|
let outbound: i64 = row.get(1)?;
|
||||||
|
Ok((inbound as u64, outbound as u64))
|
||||||
|
},
|
||||||
|
)?;
|
||||||
|
Ok(result)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn get_ip_monthly_usage(&self, ip: &str) -> anyhow::Result<(u64, u64)> {
|
||||||
|
let conn = self.conn.lock().unwrap();
|
||||||
|
let result = conn.query_row(
|
||||||
|
"SELECT COALESCE(SUM(inbound_bytes),0), COALESCE(SUM(outbound_bytes),0) FROM ip_usage
|
||||||
|
WHERE ip = ?1 AND date >= date('now', '-30 days')",
|
||||||
|
params![ip],
|
||||||
|
|row| {
|
||||||
|
let inbound: i64 = row.get(0)?;
|
||||||
|
let outbound: i64 = row.get(1)?;
|
||||||
|
Ok((inbound as u64, outbound as u64))
|
||||||
|
},
|
||||||
|
)?;
|
||||||
|
Ok(result)
|
||||||
|
}
|
||||||
|
|
||||||
|
// --- Per-IP directional usage (single-column queries) ---
|
||||||
|
|
||||||
|
/// Record inbound-only IP usage (data coming FROM the client).
|
||||||
|
pub fn record_ip_inbound_usage(&self, ip: &str, bytes: u64) -> anyhow::Result<()> {
|
||||||
|
let conn = self.conn.lock().unwrap();
|
||||||
|
let today = chrono_date_today();
|
||||||
|
conn.execute(
|
||||||
|
"INSERT INTO ip_usage (ip, date, inbound_bytes, test_count)
|
||||||
|
VALUES (?1, ?2, ?3, 0)
|
||||||
|
ON CONFLICT(ip, date) DO UPDATE SET
|
||||||
|
inbound_bytes = inbound_bytes + ?3",
|
||||||
|
params![ip, today, bytes as i64],
|
||||||
|
)?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Record outbound-only IP usage (data going TO the client).
|
||||||
|
pub fn record_ip_outbound_usage(&self, ip: &str, bytes: u64) -> anyhow::Result<()> {
|
||||||
|
let conn = self.conn.lock().unwrap();
|
||||||
|
let today = chrono_date_today();
|
||||||
|
conn.execute(
|
||||||
|
"INSERT INTO ip_usage (ip, date, outbound_bytes, test_count)
|
||||||
|
VALUES (?1, ?2, ?3, 0)
|
||||||
|
ON CONFLICT(ip, date) DO UPDATE SET
|
||||||
|
outbound_bytes = outbound_bytes + ?3",
|
||||||
|
params![ip, today, bytes as i64],
|
||||||
|
)?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get daily inbound bytes for an IP.
|
||||||
|
pub fn get_ip_daily_inbound(&self, ip: &str) -> anyhow::Result<u64> {
|
||||||
|
let conn = self.conn.lock().unwrap();
|
||||||
|
let today = chrono_date_today();
|
||||||
|
let result: i64 = conn.query_row(
|
||||||
|
"SELECT COALESCE(SUM(inbound_bytes),0) FROM ip_usage WHERE ip = ?1 AND date = ?2",
|
||||||
|
params![ip, today],
|
||||||
|
|row| row.get(0),
|
||||||
|
)?;
|
||||||
|
Ok(result as u64)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get weekly inbound bytes for an IP.
|
||||||
|
pub fn get_ip_weekly_inbound(&self, ip: &str) -> anyhow::Result<u64> {
|
||||||
|
let conn = self.conn.lock().unwrap();
|
||||||
|
let result: i64 = conn.query_row(
|
||||||
|
"SELECT COALESCE(SUM(inbound_bytes),0) FROM ip_usage WHERE ip = ?1 AND date >= date('now', '-7 days')",
|
||||||
|
params![ip],
|
||||||
|
|row| row.get(0),
|
||||||
|
)?;
|
||||||
|
Ok(result as u64)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get monthly inbound bytes for an IP.
|
||||||
|
pub fn get_ip_monthly_inbound(&self, ip: &str) -> anyhow::Result<u64> {
|
||||||
|
let conn = self.conn.lock().unwrap();
|
||||||
|
let result: i64 = conn.query_row(
|
||||||
|
"SELECT COALESCE(SUM(inbound_bytes),0) FROM ip_usage WHERE ip = ?1 AND date >= date('now', '-30 days')",
|
||||||
|
params![ip],
|
||||||
|
|row| row.get(0),
|
||||||
|
)?;
|
||||||
|
Ok(result as u64)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get daily outbound bytes for an IP.
|
||||||
|
pub fn get_ip_daily_outbound(&self, ip: &str) -> anyhow::Result<u64> {
|
||||||
|
let conn = self.conn.lock().unwrap();
|
||||||
|
let today = chrono_date_today();
|
||||||
|
let result: i64 = conn.query_row(
|
||||||
|
"SELECT COALESCE(SUM(outbound_bytes),0) FROM ip_usage WHERE ip = ?1 AND date = ?2",
|
||||||
|
params![ip, today],
|
||||||
|
|row| row.get(0),
|
||||||
|
)?;
|
||||||
|
Ok(result as u64)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get weekly outbound bytes for an IP.
|
||||||
|
pub fn get_ip_weekly_outbound(&self, ip: &str) -> anyhow::Result<u64> {
|
||||||
|
let conn = self.conn.lock().unwrap();
|
||||||
|
let result: i64 = conn.query_row(
|
||||||
|
"SELECT COALESCE(SUM(outbound_bytes),0) FROM ip_usage WHERE ip = ?1 AND date >= date('now', '-7 days')",
|
||||||
|
params![ip],
|
||||||
|
|row| row.get(0),
|
||||||
|
)?;
|
||||||
|
Ok(result as u64)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get monthly outbound bytes for an IP.
|
||||||
|
pub fn get_ip_monthly_outbound(&self, ip: &str) -> anyhow::Result<u64> {
|
||||||
|
let conn = self.conn.lock().unwrap();
|
||||||
|
let result: i64 = conn.query_row(
|
||||||
|
"SELECT COALESCE(SUM(outbound_bytes),0) FROM ip_usage WHERE ip = ?1 AND date >= date('now', '-30 days')",
|
||||||
|
params![ip],
|
||||||
|
|row| row.get(0),
|
||||||
|
)?;
|
||||||
|
Ok(result as u64)
|
||||||
|
}
|
||||||
|
|
||||||
|
// --- Session tracking ---
|
||||||
|
|
||||||
|
pub fn start_session(&self, username: &str, peer_ip: &str, protocol: &str, direction: &str) -> anyhow::Result<i64> {
|
||||||
|
let conn = self.conn.lock().unwrap();
|
||||||
|
conn.execute(
|
||||||
|
"INSERT INTO sessions (username, peer_ip, protocol, direction) VALUES (?1, ?2, ?3, ?4)",
|
||||||
|
params![username, peer_ip, protocol, direction],
|
||||||
|
)?;
|
||||||
|
Ok(conn.last_insert_rowid())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn end_session(&self, session_id: i64, tx_bytes: u64, rx_bytes: u64) -> anyhow::Result<()> {
|
||||||
|
let conn = self.conn.lock().unwrap();
|
||||||
|
conn.execute(
|
||||||
|
"UPDATE sessions SET ended_at = datetime('now'), tx_bytes = ?1, rx_bytes = ?2 WHERE id = ?3",
|
||||||
|
params![tx_bytes as i64, rx_bytes as i64, session_id],
|
||||||
|
)?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
// --- Per-second interval tracking ---
|
||||||
|
|
||||||
|
/// Record a single per-second interval data point for a session.
|
||||||
|
#[allow(clippy::too_many_arguments)]
|
||||||
|
pub fn record_test_interval(
|
||||||
|
&self,
|
||||||
|
session_id: i64,
|
||||||
|
interval_num: i32,
|
||||||
|
tx_bytes: u64,
|
||||||
|
rx_bytes: u64,
|
||||||
|
tx_mbps: f64,
|
||||||
|
rx_mbps: f64,
|
||||||
|
local_cpu: i32,
|
||||||
|
remote_cpu: i32,
|
||||||
|
lost: i64,
|
||||||
|
) -> anyhow::Result<()> {
|
||||||
|
let conn = self.conn.lock().unwrap();
|
||||||
|
conn.execute(
|
||||||
|
"INSERT INTO test_intervals (session_id, interval_num, tx_bytes, rx_bytes, tx_mbps, rx_mbps, local_cpu, remote_cpu, lost_packets)
|
||||||
|
VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9)",
|
||||||
|
params![
|
||||||
|
session_id,
|
||||||
|
interval_num,
|
||||||
|
tx_bytes as i64,
|
||||||
|
rx_bytes as i64,
|
||||||
|
tx_mbps,
|
||||||
|
rx_mbps,
|
||||||
|
local_cpu,
|
||||||
|
remote_cpu,
|
||||||
|
lost,
|
||||||
|
],
|
||||||
|
)?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Retrieve all interval data points for a given session, ordered by interval number.
|
||||||
|
pub fn get_session_intervals(&self, session_id: i64) -> anyhow::Result<Vec<IntervalData>> {
|
||||||
|
let conn = self.conn.lock().unwrap();
|
||||||
|
let mut stmt = conn.prepare(
|
||||||
|
"SELECT interval_num, tx_mbps, rx_mbps, local_cpu, remote_cpu, lost_packets
|
||||||
|
FROM test_intervals WHERE session_id = ?1 ORDER BY interval_num"
|
||||||
|
)?;
|
||||||
|
let rows = stmt.query_map(params![session_id], |row| {
|
||||||
|
Ok(IntervalData {
|
||||||
|
interval_num: row.get(0)?,
|
||||||
|
tx_mbps: row.get(1)?,
|
||||||
|
rx_mbps: row.get(2)?,
|
||||||
|
local_cpu: row.get(3)?,
|
||||||
|
remote_cpu: row.get(4)?,
|
||||||
|
lost: row.get(5)?,
|
||||||
|
})
|
||||||
|
})?.filter_map(|r| r.ok()).collect();
|
||||||
|
Ok(rows)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Return the last N sessions for a given IP address, most recent first.
|
||||||
|
pub fn get_ip_sessions(&self, ip: &str, limit: u32) -> anyhow::Result<Vec<SessionSummary>> {
|
||||||
|
let conn = self.conn.lock().unwrap();
|
||||||
|
let mut stmt = conn.prepare(
|
||||||
|
"SELECT id, started_at, ended_at, protocol, direction, tx_bytes, rx_bytes
|
||||||
|
FROM sessions WHERE peer_ip = ?1 ORDER BY started_at DESC LIMIT ?2"
|
||||||
|
)?;
|
||||||
|
let rows = stmt.query_map(params![ip, limit], |row| {
|
||||||
|
Ok(SessionSummary {
|
||||||
|
id: row.get(0)?,
|
||||||
|
started_at: row.get(1)?,
|
||||||
|
ended_at: row.get(2)?,
|
||||||
|
protocol: row.get::<_, Option<String>>(3)?.unwrap_or_default(),
|
||||||
|
direction: row.get::<_, Option<String>>(4)?.unwrap_or_default(),
|
||||||
|
tx_bytes: row.get::<_, i64>(5).map(|v| v as u64)?,
|
||||||
|
rx_bytes: row.get::<_, i64>(6).map(|v| v as u64)?,
|
||||||
|
})
|
||||||
|
})?.filter_map(|r| r.ok()).collect();
|
||||||
|
Ok(rows)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Return aggregate statistics for an IP address across all sessions.
|
||||||
|
pub fn get_ip_stats(&self, ip: &str) -> anyhow::Result<IpStats> {
|
||||||
|
let conn = self.conn.lock().unwrap();
|
||||||
|
let result = conn.query_row(
|
||||||
|
"SELECT
|
||||||
|
COUNT(*) as total_tests,
|
||||||
|
COALESCE(SUM(inbound_bytes), 0) as total_inbound,
|
||||||
|
COALESCE(SUM(outbound_bytes), 0) as total_outbound
|
||||||
|
FROM ip_usage WHERE ip = ?1",
|
||||||
|
params![ip],
|
||||||
|
|row| {
|
||||||
|
let total_tests: i64 = row.get(0)?;
|
||||||
|
let total_inbound: i64 = row.get(1)?;
|
||||||
|
let total_outbound: i64 = row.get(2)?;
|
||||||
|
Ok((total_tests as u64, total_inbound as u64, total_outbound as u64))
|
||||||
|
},
|
||||||
|
)?;
|
||||||
|
|
||||||
|
// Compute average Mbps from test_intervals joined through sessions
|
||||||
|
let (avg_tx, avg_rx) = conn.query_row(
|
||||||
|
"SELECT
|
||||||
|
COALESCE(AVG(ti.tx_mbps), 0.0),
|
||||||
|
COALESCE(AVG(ti.rx_mbps), 0.0)
|
||||||
|
FROM test_intervals ti
|
||||||
|
INNER JOIN sessions s ON ti.session_id = s.id
|
||||||
|
WHERE s.peer_ip = ?1",
|
||||||
|
params![ip],
|
||||||
|
|row| {
|
||||||
|
let avg_tx: f64 = row.get(0)?;
|
||||||
|
let avg_rx: f64 = row.get(1)?;
|
||||||
|
Ok((avg_tx, avg_rx))
|
||||||
|
},
|
||||||
|
)?;
|
||||||
|
|
||||||
|
Ok(IpStats {
|
||||||
|
total_tests: result.0,
|
||||||
|
total_inbound: result.1,
|
||||||
|
total_outbound: result.2,
|
||||||
|
avg_tx_mbps: avg_tx,
|
||||||
|
avg_rx_mbps: avg_rx,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn delete_user(&self, username: &str) -> anyhow::Result<bool> {
|
||||||
|
let conn = self.conn.lock().unwrap();
|
||||||
|
let rows = conn.execute("DELETE FROM users WHERE username = ?1", params![username])?;
|
||||||
|
Ok(rows > 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn set_user_enabled(&self, username: &str, enabled: bool) -> anyhow::Result<()> {
|
||||||
|
let conn = self.conn.lock().unwrap();
|
||||||
|
conn.execute(
|
||||||
|
"UPDATE users SET enabled = ?1 WHERE username = ?2",
|
||||||
|
params![enabled as i32, username],
|
||||||
|
)?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn set_user_quota(&self, username: &str, daily: i64, weekly: i64, monthly: i64) -> anyhow::Result<()> {
|
||||||
|
let conn = self.conn.lock().unwrap();
|
||||||
|
conn.execute(
|
||||||
|
"UPDATE users SET daily_quota = ?1, weekly_quota = ?2 WHERE username = ?3",
|
||||||
|
params![daily, weekly, username],
|
||||||
|
)?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn list_users(&self) -> anyhow::Result<Vec<User>> {
|
||||||
|
let conn = self.conn.lock().unwrap();
|
||||||
|
let mut stmt = conn.prepare(
|
||||||
|
"SELECT id, username, password_hash, daily_quota, weekly_quota, enabled FROM users ORDER BY username"
|
||||||
|
)?;
|
||||||
|
let users = stmt.query_map([], |row| {
|
||||||
|
Ok(User {
|
||||||
|
id: row.get(0)?,
|
||||||
|
username: row.get(1)?,
|
||||||
|
password_hash: row.get(2)?,
|
||||||
|
daily_quota: row.get(3)?,
|
||||||
|
weekly_quota: row.get(4)?,
|
||||||
|
enabled: row.get::<_, i32>(5)? != 0,
|
||||||
|
})
|
||||||
|
})?.filter_map(|r| r.ok()).collect();
|
||||||
|
Ok(users)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn hash_password(username: &str, password: &str) -> String {
|
||||||
|
use sha2::{Sha256, Digest};
|
||||||
|
let mut hasher = Sha256::new();
|
||||||
|
hasher.update(username.as_bytes());
|
||||||
|
hasher.update(b":");
|
||||||
|
hasher.update(password.as_bytes());
|
||||||
|
let result = hasher.finalize();
|
||||||
|
let mut hex = String::with_capacity(64);
|
||||||
|
for b in result {
|
||||||
|
use std::fmt::Write;
|
||||||
|
let _ = write!(hex, "{:02x}", b);
|
||||||
|
}
|
||||||
|
hex
|
||||||
|
}
|
||||||
|
|
||||||
|
fn chrono_date_today() -> String {
|
||||||
|
chrono::Local::now().format("%Y-%m-%d").to_string()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Re-export for use by rusqlite
|
||||||
|
use rusqlite::OptionalExtension;
|
||||||
811
src/server_pro/web/mod.rs
Normal file
811
src/server_pro/web/mod.rs
Normal file
@@ -0,0 +1,811 @@
|
|||||||
|
//! Web dashboard module for btest-server-pro.
|
||||||
|
//!
|
||||||
|
//! Provides an axum-based HTTP dashboard with:
|
||||||
|
//! - Landing page with IP lookup
|
||||||
|
//! - Per-IP session history and statistics
|
||||||
|
//! - Chart.js throughput graphs
|
||||||
|
//!
|
||||||
|
//! # Feature gate
|
||||||
|
//!
|
||||||
|
//! This entire module is compiled only when the `pro` feature is active
|
||||||
|
//! (it lives inside the `btest-server-pro` binary crate which already
|
||||||
|
//! requires `--features pro`).
|
||||||
|
//!
|
||||||
|
//! # Template files
|
||||||
|
//!
|
||||||
|
//! The HTML source lives in `src/server_pro/web/templates/` as standalone
|
||||||
|
//! `.html` files for easy editing. The Rust code embeds them via the askama
|
||||||
|
//! `source` attribute so no `askama.toml` configuration is needed. If you
|
||||||
|
//! prefer external template files, create `askama.toml` at the crate root:
|
||||||
|
//!
|
||||||
|
//! ```toml
|
||||||
|
//! [[dirs]]
|
||||||
|
//! path = "src/server_pro/web/templates"
|
||||||
|
//! ```
|
||||||
|
//!
|
||||||
|
//! Then change `source = "..."` to `path = "index.html"` (etc.) in the
|
||||||
|
//! template structs below.
|
||||||
|
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use askama::Template;
|
||||||
|
use axum::extract::{Path, State};
|
||||||
|
use axum::http::StatusCode;
|
||||||
|
use axum::response::{Html, IntoResponse, Response};
|
||||||
|
use axum::routing::get;
|
||||||
|
use axum::Router;
|
||||||
|
use rusqlite::{params, Connection};
|
||||||
|
use serde::Serialize;
|
||||||
|
|
||||||
|
use super::user_db::UserDb;
|
||||||
|
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
// Shared state
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
/// Shared application state passed to all handlers via axum's `State`.
|
||||||
|
pub struct WebState {
|
||||||
|
/// Reference to the main user/session database.
|
||||||
|
pub db: UserDb,
|
||||||
|
/// Separate read-only connection for dashboard queries that are not
|
||||||
|
/// exposed by [`UserDb`] (e.g. listing sessions, aggregate stats).
|
||||||
|
/// Wrapped in a [`std::sync::Mutex`] because [`rusqlite::Connection`]
|
||||||
|
/// is not `Send + Sync` on its own.
|
||||||
|
pub query_conn: std::sync::Mutex<Connection>,
|
||||||
|
}
|
||||||
|
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
// Router constructor
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
/// Default database filename used when `BTEST_DB_PATH` is not set.
|
||||||
|
const DEFAULT_DB_PATH: &str = "btest-users.db";
|
||||||
|
|
||||||
|
/// Build the axum [`Router`] for the web dashboard.
|
||||||
|
///
|
||||||
|
/// The database path for the read-only query connection is resolved in the
|
||||||
|
/// following order:
|
||||||
|
///
|
||||||
|
/// 1. The `BTEST_DB_PATH` environment variable (if set).
|
||||||
|
/// 2. The compile-time default `btest-users.db`.
|
||||||
|
///
|
||||||
|
/// # Panics
|
||||||
|
///
|
||||||
|
/// Panics if the read-only database connection or the DDL for the
|
||||||
|
/// `session_intervals` table cannot be established. This is intentional:
|
||||||
|
/// the web module is optional and failure during startup should surface
|
||||||
|
/// loudly rather than silently serving broken pages.
|
||||||
|
pub fn create_router(db: UserDb) -> Router {
|
||||||
|
let db_path = db.path().to_string();
|
||||||
|
|
||||||
|
let query_conn = Connection::open_with_flags(
|
||||||
|
&db_path,
|
||||||
|
rusqlite::OpenFlags::SQLITE_OPEN_READ_ONLY
|
||||||
|
| rusqlite::OpenFlags::SQLITE_OPEN_NO_MUTEX,
|
||||||
|
)
|
||||||
|
.expect("web: failed to open read-only database connection");
|
||||||
|
query_conn
|
||||||
|
.execute_batch("PRAGMA busy_timeout=5000;")
|
||||||
|
.expect("web: failed to set PRAGMA on query connection");
|
||||||
|
|
||||||
|
// Ensure the `session_intervals` table exists. The server loop must
|
||||||
|
// INSERT rows for the chart to have data; the table is created here so
|
||||||
|
// the schema is ready.
|
||||||
|
ensure_web_tables(&db_path).expect("web: failed to create session_intervals table");
|
||||||
|
|
||||||
|
let state = Arc::new(WebState {
|
||||||
|
db,
|
||||||
|
query_conn: std::sync::Mutex::new(query_conn),
|
||||||
|
});
|
||||||
|
|
||||||
|
// axum 0.8 uses `{param}` syntax for path parameters.
|
||||||
|
Router::new()
|
||||||
|
.route("/", get(index_page))
|
||||||
|
.route("/dashboard/{ip}", get(dashboard_page))
|
||||||
|
.route("/api/ip/{ip}/sessions", get(api_sessions))
|
||||||
|
.route("/api/ip/{ip}/stats", get(api_stats))
|
||||||
|
.route("/api/ip/{ip}/export", get(api_export))
|
||||||
|
.route("/api/ip/{ip}/quota", get(api_quota))
|
||||||
|
.route("/api/session/{id}/intervals", get(api_intervals))
|
||||||
|
.with_state(state)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Create additional tables the web dashboard depends on.
|
||||||
|
///
|
||||||
|
/// Opens a short-lived writable connection solely for DDL so it does not
|
||||||
|
/// interfere with the main [`UserDb`] connection.
|
||||||
|
fn ensure_web_tables(db_path: &str) -> anyhow::Result<()> {
|
||||||
|
let conn = Connection::open(db_path)?;
|
||||||
|
conn.execute_batch("PRAGMA busy_timeout=5000;")?;
|
||||||
|
conn.execute_batch(
|
||||||
|
"CREATE TABLE IF NOT EXISTS session_intervals (
|
||||||
|
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||||
|
session_id INTEGER NOT NULL,
|
||||||
|
second INTEGER NOT NULL,
|
||||||
|
tx_bytes INTEGER NOT NULL DEFAULT 0,
|
||||||
|
rx_bytes INTEGER NOT NULL DEFAULT 0,
|
||||||
|
UNIQUE(session_id, second)
|
||||||
|
);
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_intervals_session
|
||||||
|
ON session_intervals(session_id, second);",
|
||||||
|
)?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
// Askama templates (embedded via `source`)
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
/// Landing / index page template.
|
||||||
|
#[derive(Template)]
|
||||||
|
#[template(
|
||||||
|
source = r##"<!DOCTYPE html>
|
||||||
|
<html lang="en">
|
||||||
|
<head>
|
||||||
|
<meta charset="utf-8">
|
||||||
|
<meta name="viewport" content="width=device-width, initial-scale=1">
|
||||||
|
<title>btest-rs — Free Public Bandwidth Test Server</title>
|
||||||
|
<style>
|
||||||
|
*{margin:0;padding:0;box-sizing:border-box}
|
||||||
|
body{font-family:-apple-system,BlinkMacSystemFont,"Segoe UI",Roboto,Helvetica,Arial,sans-serif;background:#0f1117;color:#e1e4e8;min-height:100vh;display:flex;flex-direction:column;align-items:center;padding:2rem 1rem}
|
||||||
|
.container{max-width:720px;width:100%;padding:1rem 0}
|
||||||
|
h1{font-size:2.2rem;margin-bottom:.25rem;color:#58a6ff;text-align:center}
|
||||||
|
.subtitle{color:#8b949e;margin-bottom:2.5rem;line-height:1.6;text-align:center;font-size:1.05rem}
|
||||||
|
.section{background:#161b22;border:1px solid #30363d;border-radius:8px;padding:1.5rem;margin-bottom:1.5rem;text-align:left;line-height:1.7;color:#c9d1d9}
|
||||||
|
.section h2{color:#e1e4e8;font-size:1.15rem;margin-bottom:.75rem}
|
||||||
|
.section h3{color:#e1e4e8;font-size:1rem;margin-bottom:.5rem;margin-top:1rem}
|
||||||
|
.section h3:first-child{margin-top:0}
|
||||||
|
.section p{margin-bottom:.5rem}
|
||||||
|
.section ul{margin:.5rem 0 .5rem 1.5rem;color:#8b949e}
|
||||||
|
.section li{margin-bottom:.35rem}
|
||||||
|
code{background:#0d1117;padding:.2rem .5rem;border-radius:4px;font-size:.85em;color:#58a6ff;word-break:break-all}
|
||||||
|
pre{background:#0d1117;border:1px solid #30363d;border-radius:6px;padding:1rem;overflow-x:auto;margin:.75rem 0;line-height:1.5}
|
||||||
|
pre code{padding:0;background:none;font-size:.85em}
|
||||||
|
.label-tag{display:inline-block;padding:.15rem .5rem;border-radius:4px;font-size:.75rem;font-weight:600;text-transform:uppercase;letter-spacing:.03em;margin-right:.5rem;vertical-align:middle}
|
||||||
|
.tag-tcp{background:rgba(63,185,80,0.15);color:#3fb950}
|
||||||
|
.tag-udp{background:rgba(210,153,34,0.15);color:#d29922}
|
||||||
|
.note{background:#1c1e26;border-left:3px solid #d29922;padding:.75rem 1rem;border-radius:0 6px 6px 0;margin:.75rem 0;font-size:.92rem;color:#8b949e}
|
||||||
|
.note strong{color:#d29922}
|
||||||
|
.search-section{text-align:center}
|
||||||
|
.search-section h2{text-align:center}
|
||||||
|
.search-box{display:flex;gap:.5rem;margin-bottom:1rem}
|
||||||
|
.search-box input{flex:1;padding:.75rem 1rem;border:1px solid #30363d;border-radius:6px;background:#161b22;color:#e1e4e8;font-size:1rem;outline:none}
|
||||||
|
.search-box input:focus{border-color:#58a6ff}
|
||||||
|
.search-box input::placeholder{color:#484f58}
|
||||||
|
.search-box button{padding:.75rem 1.5rem;background:#238636;color:#fff;border:none;border-radius:6px;font-size:1rem;cursor:pointer;white-space:nowrap}
|
||||||
|
.search-box button:hover{background:#2ea043}
|
||||||
|
.auto-link{font-size:.9rem;color:#8b949e}
|
||||||
|
.auto-link a{color:#58a6ff;text-decoration:none}
|
||||||
|
.auto-link a:hover{text-decoration:underline}
|
||||||
|
.footer{margin-top:2rem;color:#484f58;font-size:.8rem;text-align:center}
|
||||||
|
.footer a{color:#58a6ff;text-decoration:none}
|
||||||
|
.footer a:hover{text-decoration:underline}
|
||||||
|
</style>
|
||||||
|
</head>
|
||||||
|
<body>
|
||||||
|
<div class="container">
|
||||||
|
<h1>btest-rs</h1>
|
||||||
|
<p class="subtitle">Free public MikroTik-compatible bandwidth test server.<br>Test your link speed from any RouterOS device — no registration required.</p>
|
||||||
|
|
||||||
|
<div class="section">
|
||||||
|
<h2>Quick Start</h2>
|
||||||
|
<p>Open a terminal on your MikroTik router and run one of the following commands:</p>
|
||||||
|
<h3><span class="label-tag tag-tcp">TCP</span> Recommended</h3>
|
||||||
|
<pre><code>/tool bandwidth-test address=104.225.217.60 user=btest password=btest protocol=tcp direction=both</code></pre>
|
||||||
|
<h3><span class="label-tag tag-udp">UDP</span></h3>
|
||||||
|
<pre><code>/tool bandwidth-test address=104.225.217.60 user=btest password=btest protocol=udp direction=both</code></pre>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="section">
|
||||||
|
<h2>Important Notes</h2>
|
||||||
|
<ul>
|
||||||
|
<li><strong style="color:#e1e4e8">Credentials:</strong> <code>user=btest</code> <code>password=btest</code></li>
|
||||||
|
<li><strong style="color:#e1e4e8">TCP is recommended</strong> for remote testing — it works reliably through any NAT or firewall</li>
|
||||||
|
<li><strong style="color:#e1e4e8">Per-IP daily quotas</strong> apply to keep the service fair for everyone</li>
|
||||||
|
<li><strong style="color:#e1e4e8">Maximum test duration:</strong> 120 seconds</li>
|
||||||
|
<li><strong style="color:#e1e4e8">Connection limit:</strong> 3 concurrent tests per IP</li>
|
||||||
|
</ul>
|
||||||
|
<div class="note">
|
||||||
|
<strong>UDP bidirectional may not work through NAT/firewall.</strong>
|
||||||
|
UDP <code>direction=both</code> requires the server to send packets to a pre-calculated client port, which NAT routers typically block. If you need UDP testing:<br>
|
||||||
|
• Forward UDP ports 2001–2100 on your router, or<br>
|
||||||
|
• Use <code>direction=send</code> or <code>direction=receive</code> (one-way works fine), or<br>
|
||||||
|
• Test from a device with a public IP
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="section search-section">
|
||||||
|
<h2>Check Your Results</h2>
|
||||||
|
<p style="margin-bottom:1rem;color:#8b949e">After running a test, enter your public IP to view throughput charts, session history, and statistics.</p>
|
||||||
|
<form class="search-box" id="ip-form" onsubmit="return goToDashboard()">
|
||||||
|
<input type="text" id="ip-input" placeholder="Enter your IP address (e.g. 203.0.113.5)" autocomplete="off">
|
||||||
|
<button type="submit">View Results</button>
|
||||||
|
</form>
|
||||||
|
<div class="auto-link" id="auto-detect">Detecting your IP...</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="footer">Powered by <a href="https://github.com/manawenuz/btest-rs">btest-rs</a> — open source MikroTik bandwidth test server</div>
|
||||||
|
</div>
|
||||||
|
<script>
|
||||||
|
function goToDashboard(){var ip=document.getElementById('ip-input').value.trim();if(ip){window.location.href='/dashboard/'+encodeURIComponent(ip);}return false;}
|
||||||
|
fetch('https://api.ipify.org?format=json')
|
||||||
|
.then(function(r){return r.json();})
|
||||||
|
.then(function(d){if(d.ip){document.getElementById('ip-input').value=d.ip;document.getElementById('auto-detect').innerHTML='Detected IP: <a href="/dashboard/'+encodeURIComponent(d.ip)+'">'+d.ip+'</a> — click to view your dashboard';}})
|
||||||
|
.catch(function(){document.getElementById('auto-detect').textContent='';});
|
||||||
|
</script>
|
||||||
|
</body>
|
||||||
|
</html>"##,
|
||||||
|
ext = "html"
|
||||||
|
)]
|
||||||
|
struct IndexTemplate;
|
||||||
|
|
||||||
|
/// Per-IP dashboard page template.
|
||||||
|
#[derive(Template)]
|
||||||
|
#[template(
|
||||||
|
source = r##"<!DOCTYPE html>
|
||||||
|
<html lang="en">
|
||||||
|
<head>
|
||||||
|
<meta charset="utf-8">
|
||||||
|
<meta name="viewport" content="width=device-width, initial-scale=1">
|
||||||
|
<title>Dashboard — {{ ip }} — btest-rs</title>
|
||||||
|
<style>
|
||||||
|
*{margin:0;padding:0;box-sizing:border-box}
|
||||||
|
body{font-family:-apple-system,BlinkMacSystemFont,"Segoe UI",Roboto,Helvetica,Arial,sans-serif;background:#0f1117;color:#e1e4e8;min-height:100vh;padding:1.5rem}
|
||||||
|
a{color:#58a6ff;text-decoration:none}a:hover{text-decoration:underline}
|
||||||
|
.header{display:flex;align-items:center;gap:1rem;margin-bottom:1.5rem;flex-wrap:wrap}
|
||||||
|
.header h1{font-size:1.5rem;color:#58a6ff}
|
||||||
|
.header .ip-label{font-size:1.1rem;color:#8b949e;font-family:monospace}
|
||||||
|
.header .home-link{margin-left:auto}
|
||||||
|
.btn{display:inline-block;padding:.5rem 1rem;border-radius:6px;font-size:.85rem;font-weight:500;cursor:pointer;border:1px solid #30363d;text-decoration:none}
|
||||||
|
.btn-json{background:#161b22;color:#3fb950}.btn-json:hover{background:#1c2128;text-decoration:none}
|
||||||
|
.stats{display:grid;grid-template-columns:repeat(auto-fit,minmax(160px,1fr));gap:1rem;margin-bottom:1.5rem}
|
||||||
|
.stat-card{background:#161b22;border:1px solid #30363d;border-radius:8px;padding:1rem}
|
||||||
|
.stat-card .label{color:#8b949e;font-size:.8rem;text-transform:uppercase;letter-spacing:.05em}
|
||||||
|
.stat-card .value{font-size:1.4rem;font-weight:600;margin-top:.25rem}
|
||||||
|
.table-wrap{overflow-x:auto;margin-bottom:1.5rem}
|
||||||
|
table{width:100%;border-collapse:collapse;background:#161b22;border-radius:8px;overflow:hidden}
|
||||||
|
th,td{padding:.6rem 1rem;text-align:left;border-bottom:1px solid #21262d;white-space:nowrap}
|
||||||
|
th{background:#0d1117;color:#8b949e;font-size:.8rem;text-transform:uppercase;letter-spacing:.04em}
|
||||||
|
tr{cursor:pointer}tr:hover td{background:#1c2128}tr.selected td{background:#1f3a5f}
|
||||||
|
.proto-tcp{color:#3fb950}.proto-udp{color:#d29922}
|
||||||
|
.dir-tx{color:#f78166}.dir-rx{color:#58a6ff}.dir-both{color:#bc8cff}
|
||||||
|
.chart-section{background:#161b22;border:1px solid #30363d;border-radius:8px;padding:1.5rem;margin-bottom:1.5rem}
|
||||||
|
.chart-section h2{font-size:1rem;color:#8b949e;margin-bottom:1rem}
|
||||||
|
.chart-container{position:relative;width:100%;max-height:360px}
|
||||||
|
.chart-placeholder{text-align:center;color:#484f58;padding:3rem 0}
|
||||||
|
.footer{text-align:center;color:#484f58;font-size:.8rem;margin-top:2rem}
|
||||||
|
.no-data{text-align:center;padding:3rem;color:#484f58}
|
||||||
|
.quota-section{background:#161b22;border:1px solid #30363d;border-radius:8px;padding:1.25rem;margin-bottom:1.5rem}
|
||||||
|
.quota-section h2{font-size:1rem;color:#8b949e;margin-bottom:1rem}
|
||||||
|
.quota-row{display:flex;align-items:center;gap:1rem;margin-bottom:.75rem}
|
||||||
|
.quota-row:last-child{margin-bottom:0}
|
||||||
|
.quota-label{min-width:70px;font-size:.85rem;color:#8b949e;text-transform:uppercase;letter-spacing:.04em}
|
||||||
|
.quota-bar-wrap{flex:1;background:#21262d;border-radius:4px;height:22px;position:relative;overflow:hidden}
|
||||||
|
.quota-bar{height:100%;border-radius:4px;transition:width .5s ease}
|
||||||
|
.quota-bar.low{background:#238636}.quota-bar.mid{background:#d29922}.quota-bar.high{background:#da3633}
|
||||||
|
.quota-text{min-width:180px;font-size:.85rem;color:#e1e4e8;text-align:right;font-family:monospace}
|
||||||
|
</style>
|
||||||
|
</head>
|
||||||
|
<body>
|
||||||
|
<div class="header">
|
||||||
|
<h1>btest-rs</h1>
|
||||||
|
<span class="ip-label">{{ ip }}</span>
|
||||||
|
<a class="btn btn-json" href="/api/ip/{{ ip }}/export" download>Export JSON</a>
|
||||||
|
<span class="home-link"><a href="/">Home</a></span>
|
||||||
|
</div>
|
||||||
|
<div class="stats" id="stats-grid">
|
||||||
|
<div class="stat-card"><div class="label">Total Tests</div><div class="value" id="stat-total-tests">—</div></div>
|
||||||
|
<div class="stat-card"><div class="label">Total TX</div><div class="value" id="stat-total-tx">—</div></div>
|
||||||
|
<div class="stat-card"><div class="label">Total RX</div><div class="value" id="stat-total-rx">—</div></div>
|
||||||
|
<div class="stat-card"><div class="label">Avg TX Mbps</div><div class="value" id="stat-avg-tx">—</div></div>
|
||||||
|
<div class="stat-card"><div class="label">Avg RX Mbps</div><div class="value" id="stat-avg-rx">—</div></div>
|
||||||
|
</div>
|
||||||
|
<div class="quota-section" id="quota-section">
|
||||||
|
<h2>Quota Usage</h2>
|
||||||
|
<div class="quota-row"><span class="quota-label">Daily</span><div class="quota-bar-wrap"><div class="quota-bar low" id="bar-daily" style="width:0%"></div></div><span class="quota-text" id="text-daily">—</span></div>
|
||||||
|
<div class="quota-row"><span class="quota-label">Weekly</span><div class="quota-bar-wrap"><div class="quota-bar low" id="bar-weekly" style="width:0%"></div></div><span class="quota-text" id="text-weekly">—</span></div>
|
||||||
|
<div class="quota-row"><span class="quota-label">Monthly</span><div class="quota-bar-wrap"><div class="quota-bar low" id="bar-monthly" style="width:0%"></div></div><span class="quota-text" id="text-monthly">—</span></div>
|
||||||
|
</div>
|
||||||
|
<div class="chart-section">
|
||||||
|
<h2 id="chart-title">Select a test below to view its throughput chart</h2>
|
||||||
|
<div class="chart-container">
|
||||||
|
<canvas id="throughput-chart"></canvas>
|
||||||
|
<div class="chart-placeholder" id="chart-placeholder">Click a row in the table to load the throughput graph for that session.</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
<div class="table-wrap">
|
||||||
|
<table>
|
||||||
|
<thead><tr><th>#</th><th>Date</th><th>Protocol</th><th>Direction</th><th>TX Bytes</th><th>RX Bytes</th><th>Duration</th><th>Avg TX Mbps</th><th>Avg RX Mbps</th></tr></thead>
|
||||||
|
<tbody id="sessions-body"><tr><td colspan="9" class="no-data">Loading sessions...</td></tr></tbody>
|
||||||
|
</table>
|
||||||
|
</div>
|
||||||
|
<div class="footer">Powered by btest-rs</div>
|
||||||
|
<script src="https://cdn.jsdelivr.net/npm/chart.js"></script>
|
||||||
|
<script>
|
||||||
|
var currentIp="{{ ip }}";
|
||||||
|
var throughputChart=null;
|
||||||
|
function formatBytes(b){if(b===0)return'0 B';var u=['B','KB','MB','GB','TB'];var i=Math.floor(Math.log(b)/Math.log(1024));if(i>=u.length)i=u.length-1;return(b/Math.pow(1024,i)).toFixed(1)+' '+u[i];}
|
||||||
|
function formatMbps(bps){return(bps*8/1e6).toFixed(2);}
|
||||||
|
fetch('/api/ip/'+encodeURIComponent(currentIp)+'/quota').then(function(r){return r.json();}).then(function(q){
|
||||||
|
function upd(id,used,limit){
|
||||||
|
var pct=limit>0?Math.min(used/limit*100,100):0;
|
||||||
|
var bar=document.getElementById('bar-'+id);
|
||||||
|
var txt=document.getElementById('text-'+id);
|
||||||
|
bar.style.width=pct.toFixed(1)+'%';
|
||||||
|
bar.className='quota-bar '+(pct<50?'low':pct<80?'mid':'high');
|
||||||
|
txt.textContent=formatBytes(used)+' / '+formatBytes(limit)+' ('+pct.toFixed(1)+'%)';
|
||||||
|
}
|
||||||
|
upd('daily',q.daily_used,q.daily_limit);
|
||||||
|
upd('weekly',q.weekly_used,q.weekly_limit);
|
||||||
|
upd('monthly',q.monthly_used,q.monthly_limit);
|
||||||
|
}).catch(function(){});
|
||||||
|
function durationStr(s,e){if(!s||!e)return'--';var ms=new Date(e)-new Date(s);if(ms<0)return'--';var sec=Math.round(ms/1000);if(sec<60)return sec+'s';return Math.floor(sec/60)+'m '+(sec%60)+'s';}
|
||||||
|
function durationSec(s,e){if(!s||!e)return 0;return Math.max((new Date(e)-new Date(s))/1000,0.001);}
|
||||||
|
fetch('/api/ip/'+encodeURIComponent(currentIp)+'/stats').then(function(r){return r.json();}).then(function(d){
|
||||||
|
document.getElementById('stat-total-tests').textContent=d.total_sessions||0;
|
||||||
|
document.getElementById('stat-total-tx').textContent=formatBytes(d.total_tx_bytes||0);
|
||||||
|
document.getElementById('stat-total-rx').textContent=formatBytes(d.total_rx_bytes||0);
|
||||||
|
document.getElementById('stat-avg-tx').textContent=d.avg_tx_mbps?d.avg_tx_mbps.toFixed(2):'0.00';
|
||||||
|
document.getElementById('stat-avg-rx').textContent=d.avg_rx_mbps?d.avg_rx_mbps.toFixed(2):'0.00';
|
||||||
|
}).catch(function(){});
|
||||||
|
fetch('/api/ip/'+encodeURIComponent(currentIp)+'/sessions').then(function(r){return r.json();}).then(function(sessions){
|
||||||
|
var tbody=document.getElementById('sessions-body');
|
||||||
|
if(!sessions||sessions.length===0){tbody.innerHTML='<tr><td colspan="9" class="no-data">No test sessions found for this IP.</td></tr>';return;}
|
||||||
|
tbody.innerHTML='';
|
||||||
|
sessions.forEach(function(s,i){
|
||||||
|
var tr=document.createElement('tr');tr.dataset.sessionId=s.id;tr.onclick=function(){selectSession(s.id,tr);};
|
||||||
|
var dur=durationSec(s.started_at,s.ended_at);var avgTx=dur>0?formatMbps(s.tx_bytes/dur):'0.00';var avgRx=dur>0?formatMbps(s.rx_bytes/dur):'0.00';
|
||||||
|
var proto=(s.protocol||'TCP').toUpperCase();var dir=(s.direction||'BOTH').toUpperCase();
|
||||||
|
var pc=proto==='UDP'?'proto-udp':'proto-tcp';var dc=dir==='TX'?'dir-tx':dir==='RX'?'dir-rx':'dir-both';
|
||||||
|
tr.innerHTML='<td>'+(i+1)+'</td><td>'+(s.started_at||'--')+'</td><td class="'+pc+'">'+proto+'</td><td class="'+dc+'">'+dir+'</td><td>'+formatBytes(s.tx_bytes||0)+'</td><td>'+formatBytes(s.rx_bytes||0)+'</td><td>'+durationStr(s.started_at,s.ended_at)+'</td><td>'+avgTx+'</td><td>'+avgRx+'</td>';
|
||||||
|
tbody.appendChild(tr);
|
||||||
|
});
|
||||||
|
if(sessions.length>0){var fr=tbody.querySelector('tr');if(fr)selectSession(sessions[0].id,fr);}
|
||||||
|
}).catch(function(){document.getElementById('sessions-body').innerHTML='<tr><td colspan="9" class="no-data">Failed to load sessions.</td></tr>';});
|
||||||
|
function selectSession(sid,row){
|
||||||
|
document.querySelectorAll('#sessions-body tr').forEach(function(r){r.classList.remove('selected');});
|
||||||
|
row.classList.add('selected');
|
||||||
|
document.getElementById('chart-title').textContent='Throughput for session #'+sid;
|
||||||
|
document.getElementById('chart-placeholder').style.display='none';
|
||||||
|
fetch('/api/session/'+sid+'/intervals').then(function(r){return r.json();}).then(function(iv){renderChart(iv);}).catch(function(){
|
||||||
|
document.getElementById('chart-placeholder').style.display='block';
|
||||||
|
document.getElementById('chart-placeholder').textContent='Failed to load interval data.';
|
||||||
|
});
|
||||||
|
}
|
||||||
|
function renderChart(iv){
|
||||||
|
var canvas=document.getElementById('throughput-chart');
|
||||||
|
if(throughputChart)throughputChart.destroy();
|
||||||
|
if(!iv||iv.length===0){document.getElementById('chart-placeholder').style.display='block';document.getElementById('chart-placeholder').textContent='No interval data available for this session.';return;}
|
||||||
|
var labels=iv.map(function(d){return d.second+'s';});
|
||||||
|
var tx=iv.map(function(d){return(d.tx_bytes*8/1e6).toFixed(2);});
|
||||||
|
var rx=iv.map(function(d){return(d.rx_bytes*8/1e6).toFixed(2);});
|
||||||
|
throughputChart=new Chart(canvas,{type:'line',data:{labels:labels,datasets:[
|
||||||
|
{label:'TX Mbps',data:tx,borderColor:'#f78166',backgroundColor:'rgba(247,129,102,0.1)',borderWidth:2,fill:true,tension:0.3,pointRadius:1},
|
||||||
|
{label:'RX Mbps',data:rx,borderColor:'#58a6ff',backgroundColor:'rgba(88,166,255,0.1)',borderWidth:2,fill:true,tension:0.3,pointRadius:1}
|
||||||
|
]},options:{responsive:true,maintainAspectRatio:false,interaction:{intersect:false,mode:'index'},
|
||||||
|
scales:{x:{title:{display:true,text:'Time',color:'#8b949e'},ticks:{color:'#8b949e'},grid:{color:'#21262d'}},
|
||||||
|
y:{title:{display:true,text:'Mbps',color:'#8b949e'},ticks:{color:'#8b949e'},grid:{color:'#21262d'},beginAtZero:true}},
|
||||||
|
plugins:{legend:{labels:{color:'#e1e4e8'}},tooltip:{backgroundColor:'#161b22',borderColor:'#30363d',borderWidth:1,titleColor:'#e1e4e8',bodyColor:'#8b949e'}}}});
|
||||||
|
}
|
||||||
|
</script>
|
||||||
|
</body>
|
||||||
|
</html>"##,
|
||||||
|
ext = "html"
|
||||||
|
)]
|
||||||
|
struct DashboardTemplate {
|
||||||
|
ip: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
// JSON response types
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
/// A single test session as returned by the sessions API.
|
||||||
|
#[derive(Serialize)]
|
||||||
|
struct SessionJson {
|
||||||
|
id: i64,
|
||||||
|
username: String,
|
||||||
|
peer_ip: String,
|
||||||
|
started_at: Option<String>,
|
||||||
|
ended_at: Option<String>,
|
||||||
|
tx_bytes: i64,
|
||||||
|
rx_bytes: i64,
|
||||||
|
protocol: Option<String>,
|
||||||
|
direction: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Aggregate statistics for an IP address.
|
||||||
|
#[derive(Serialize)]
|
||||||
|
struct StatsJson {
|
||||||
|
total_sessions: i64,
|
||||||
|
total_tx_bytes: i64,
|
||||||
|
total_rx_bytes: i64,
|
||||||
|
avg_tx_mbps: f64,
|
||||||
|
avg_rx_mbps: f64,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// One second of throughput data within a session.
|
||||||
|
#[derive(Serialize)]
|
||||||
|
struct IntervalJson {
|
||||||
|
second: i64,
|
||||||
|
tx_bytes: i64,
|
||||||
|
rx_bytes: i64,
|
||||||
|
}
|
||||||
|
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
// Error helper
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
/// Uniform error wrapper so handlers can use `?` freely.
|
||||||
|
///
|
||||||
|
/// All errors are rendered as `500 Internal Server Error` with a plain-text
|
||||||
|
/// body. The full error chain is logged via [`tracing`].
|
||||||
|
struct AppError(anyhow::Error);
|
||||||
|
|
||||||
|
impl IntoResponse for AppError {
|
||||||
|
fn into_response(self) -> Response {
|
||||||
|
tracing::error!("web handler error: {:#}", self.0);
|
||||||
|
(StatusCode::INTERNAL_SERVER_ERROR, self.0.to_string()).into_response()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<E: Into<anyhow::Error>> From<E> for AppError {
|
||||||
|
fn from(err: E) -> Self {
|
||||||
|
Self(err.into())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
// Handlers
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
/// `GET /` -- render the landing page.
|
||||||
|
async fn index_page() -> Result<Html<String>, AppError> {
|
||||||
|
let rendered = IndexTemplate
|
||||||
|
.render()
|
||||||
|
.map_err(|e| anyhow::anyhow!("template render: {}", e))?;
|
||||||
|
Ok(Html(rendered))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// `GET /dashboard/{ip}` -- render the per-IP dashboard.
|
||||||
|
async fn dashboard_page(Path(ip): Path<String>) -> Result<Html<String>, AppError> {
|
||||||
|
let rendered = DashboardTemplate { ip }
|
||||||
|
.render()
|
||||||
|
.map_err(|e| anyhow::anyhow!("template render: {}", e))?;
|
||||||
|
Ok(Html(rendered))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// `GET /api/ip/{ip}/sessions` -- return the most recent 100 sessions for
|
||||||
|
/// the given peer IP as a JSON array.
|
||||||
|
async fn api_sessions(
|
||||||
|
State(state): State<Arc<WebState>>,
|
||||||
|
Path(ip): Path<String>,
|
||||||
|
) -> Result<axum::Json<Vec<SessionJson>>, AppError> {
|
||||||
|
let sessions = {
|
||||||
|
let conn = state
|
||||||
|
.query_conn
|
||||||
|
.lock()
|
||||||
|
.map_err(|e| anyhow::anyhow!("lock: {}", e))?;
|
||||||
|
let mut stmt = conn.prepare(
|
||||||
|
"SELECT id, username, peer_ip, started_at, ended_at,
|
||||||
|
tx_bytes, rx_bytes, protocol, direction
|
||||||
|
FROM sessions
|
||||||
|
WHERE peer_ip = ?1
|
||||||
|
ORDER BY started_at DESC
|
||||||
|
LIMIT 100",
|
||||||
|
)?;
|
||||||
|
let rows = stmt.query_map(params![ip], |row| {
|
||||||
|
Ok(SessionJson {
|
||||||
|
id: row.get(0)?,
|
||||||
|
username: row.get(1)?,
|
||||||
|
peer_ip: row.get(2)?,
|
||||||
|
started_at: row.get(3)?,
|
||||||
|
ended_at: row.get(4)?,
|
||||||
|
tx_bytes: row.get(5)?,
|
||||||
|
rx_bytes: row.get(6)?,
|
||||||
|
protocol: row.get(7)?,
|
||||||
|
direction: row.get(8)?,
|
||||||
|
})
|
||||||
|
})?;
|
||||||
|
rows.filter_map(Result::ok).collect::<Vec<_>>()
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(axum::Json(sessions))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// `GET /api/ip/{ip}/stats` -- return aggregate statistics (total bytes,
|
||||||
|
/// session count, average throughput) for the given IP.
|
||||||
|
async fn api_stats(
|
||||||
|
State(state): State<Arc<WebState>>,
|
||||||
|
Path(ip): Path<String>,
|
||||||
|
) -> Result<axum::Json<StatsJson>, AppError> {
|
||||||
|
let stats = {
|
||||||
|
let conn = state
|
||||||
|
.query_conn
|
||||||
|
.lock()
|
||||||
|
.map_err(|e| anyhow::anyhow!("lock: {}", e))?;
|
||||||
|
conn.query_row(
|
||||||
|
"SELECT
|
||||||
|
COUNT(*) AS total_sessions,
|
||||||
|
COALESCE(SUM(tx_bytes), 0) AS total_tx,
|
||||||
|
COALESCE(SUM(rx_bytes), 0) AS total_rx,
|
||||||
|
COALESCE(SUM(
|
||||||
|
CASE WHEN ended_at IS NOT NULL AND started_at IS NOT NULL
|
||||||
|
THEN (julianday(ended_at) - julianday(started_at)) * 86400.0
|
||||||
|
ELSE 0 END
|
||||||
|
), 0) AS total_seconds
|
||||||
|
FROM sessions
|
||||||
|
WHERE peer_ip = ?1",
|
||||||
|
params![ip],
|
||||||
|
|row| {
|
||||||
|
let total_sessions: i64 = row.get(0)?;
|
||||||
|
let total_tx: i64 = row.get(1)?;
|
||||||
|
let total_rx: i64 = row.get(2)?;
|
||||||
|
let total_seconds: f64 = row.get(3)?;
|
||||||
|
|
||||||
|
let avg_tx_mbps = if total_seconds > 0.0 {
|
||||||
|
(total_tx as f64) * 8.0 / total_seconds / 1_000_000.0
|
||||||
|
} else {
|
||||||
|
0.0
|
||||||
|
};
|
||||||
|
let avg_rx_mbps = if total_seconds > 0.0 {
|
||||||
|
(total_rx as f64) * 8.0 / total_seconds / 1_000_000.0
|
||||||
|
} else {
|
||||||
|
0.0
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(StatsJson {
|
||||||
|
total_sessions,
|
||||||
|
total_tx_bytes: total_tx,
|
||||||
|
total_rx_bytes: total_rx,
|
||||||
|
avg_tx_mbps,
|
||||||
|
avg_rx_mbps,
|
||||||
|
})
|
||||||
|
},
|
||||||
|
)?
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(axum::Json(stats))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Quota usage for an IP — daily/weekly/monthly with limits.
|
||||||
|
#[derive(Serialize)]
|
||||||
|
struct QuotaUsageJson {
|
||||||
|
daily_used: i64,
|
||||||
|
daily_limit: i64,
|
||||||
|
weekly_used: i64,
|
||||||
|
weekly_limit: i64,
|
||||||
|
monthly_used: i64,
|
||||||
|
monthly_limit: i64,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// `GET /api/ip/{ip}/quota` -- return current quota usage for the IP.
|
||||||
|
async fn api_quota(
|
||||||
|
State(state): State<Arc<WebState>>,
|
||||||
|
Path(ip): Path<String>,
|
||||||
|
) -> Result<axum::Json<QuotaUsageJson>, AppError> {
|
||||||
|
let conn = state.query_conn.lock().map_err(|e| anyhow::anyhow!("lock: {}", e))?;
|
||||||
|
|
||||||
|
let daily: i64 = conn.query_row(
|
||||||
|
"SELECT COALESCE(SUM(inbound_bytes + outbound_bytes), 0) FROM ip_usage WHERE ip = ?1 AND date = date('now')",
|
||||||
|
params![ip], |row| row.get(0),
|
||||||
|
).unwrap_or(0);
|
||||||
|
|
||||||
|
let weekly: i64 = conn.query_row(
|
||||||
|
"SELECT COALESCE(SUM(inbound_bytes + outbound_bytes), 0) FROM ip_usage WHERE ip = ?1 AND date >= date('now', '-7 days')",
|
||||||
|
params![ip], |row| row.get(0),
|
||||||
|
).unwrap_or(0);
|
||||||
|
|
||||||
|
let monthly: i64 = conn.query_row(
|
||||||
|
"SELECT COALESCE(SUM(inbound_bytes + outbound_bytes), 0) FROM ip_usage WHERE ip = ?1 AND date >= date('now', '-30 days')",
|
||||||
|
params![ip], |row| row.get(0),
|
||||||
|
).unwrap_or(0);
|
||||||
|
|
||||||
|
// Limits: 2GB daily, 8GB weekly, 24GB monthly
|
||||||
|
Ok(axum::Json(QuotaUsageJson {
|
||||||
|
daily_used: daily,
|
||||||
|
daily_limit: 2_147_483_648,
|
||||||
|
weekly_used: weekly,
|
||||||
|
weekly_limit: 8_589_934_592,
|
||||||
|
monthly_used: monthly,
|
||||||
|
monthly_limit: 25_769_803_776,
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Full export of all data for an IP — stats + sessions with human-readable fields.
|
||||||
|
#[derive(Serialize)]
|
||||||
|
struct ExportJson {
|
||||||
|
ip: String,
|
||||||
|
exported_at: String,
|
||||||
|
stats: StatsJson,
|
||||||
|
quota: QuotaJson,
|
||||||
|
sessions: Vec<ExportSessionJson>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize)]
|
||||||
|
struct QuotaJson {
|
||||||
|
daily_used_bytes: i64,
|
||||||
|
daily_used_human: String,
|
||||||
|
daily_limit_bytes: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize)]
|
||||||
|
struct ExportSessionJson {
|
||||||
|
id: i64,
|
||||||
|
started_at: Option<String>,
|
||||||
|
ended_at: Option<String>,
|
||||||
|
protocol: Option<String>,
|
||||||
|
direction: Option<String>,
|
||||||
|
tx_bytes: i64,
|
||||||
|
rx_bytes: i64,
|
||||||
|
tx_human: String,
|
||||||
|
rx_human: String,
|
||||||
|
duration_secs: f64,
|
||||||
|
avg_tx_mbps: f64,
|
||||||
|
avg_rx_mbps: f64,
|
||||||
|
}
|
||||||
|
|
||||||
|
fn human_bytes(b: i64) -> String {
|
||||||
|
let b = b as f64;
|
||||||
|
if b >= 1_073_741_824.0 {
|
||||||
|
format!("{:.2} GB", b / 1_073_741_824.0)
|
||||||
|
} else if b >= 1_048_576.0 {
|
||||||
|
format!("{:.1} MB", b / 1_048_576.0)
|
||||||
|
} else if b >= 1024.0 {
|
||||||
|
format!("{:.1} KB", b / 1024.0)
|
||||||
|
} else {
|
||||||
|
format!("{} B", b as i64)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// `GET /api/ip/{ip}/export` -- return a comprehensive JSON export of all
|
||||||
|
/// sessions, stats, and quota usage for an IP. Suitable for download/archival.
|
||||||
|
async fn api_export(
|
||||||
|
State(state): State<Arc<WebState>>,
|
||||||
|
Path(ip): Path<String>,
|
||||||
|
) -> Result<impl IntoResponse, AppError> {
|
||||||
|
let conn = state
|
||||||
|
.query_conn
|
||||||
|
.lock()
|
||||||
|
.map_err(|e| anyhow::anyhow!("lock: {}", e))?;
|
||||||
|
|
||||||
|
// Stats
|
||||||
|
let stats = conn.query_row(
|
||||||
|
"SELECT COUNT(*), COALESCE(SUM(tx_bytes),0), COALESCE(SUM(rx_bytes),0),
|
||||||
|
COALESCE(SUM(CASE WHEN ended_at IS NOT NULL AND started_at IS NOT NULL
|
||||||
|
THEN (julianday(ended_at)-julianday(started_at))*86400.0 ELSE 0 END),0)
|
||||||
|
FROM sessions WHERE peer_ip = ?1",
|
||||||
|
params![ip],
|
||||||
|
|row| {
|
||||||
|
let n: i64 = row.get(0)?;
|
||||||
|
let tx: i64 = row.get(1)?;
|
||||||
|
let rx: i64 = row.get(2)?;
|
||||||
|
let secs: f64 = row.get(3)?;
|
||||||
|
Ok(StatsJson {
|
||||||
|
total_sessions: n,
|
||||||
|
total_tx_bytes: tx,
|
||||||
|
total_rx_bytes: rx,
|
||||||
|
avg_tx_mbps: if secs > 0.0 { tx as f64 * 8.0 / secs / 1e6 } else { 0.0 },
|
||||||
|
avg_rx_mbps: if secs > 0.0 { rx as f64 * 8.0 / secs / 1e6 } else { 0.0 },
|
||||||
|
})
|
||||||
|
},
|
||||||
|
)?;
|
||||||
|
|
||||||
|
// Quota
|
||||||
|
let daily_used: i64 = conn.query_row(
|
||||||
|
"SELECT COALESCE(SUM(inbound_bytes + outbound_bytes), 0) FROM ip_usage
|
||||||
|
WHERE ip = ?1 AND date = date('now')",
|
||||||
|
params![ip],
|
||||||
|
|row| row.get(0),
|
||||||
|
).unwrap_or(0);
|
||||||
|
|
||||||
|
let quota = QuotaJson {
|
||||||
|
daily_used_bytes: daily_used,
|
||||||
|
daily_used_human: human_bytes(daily_used),
|
||||||
|
daily_limit_bytes: "see server config".to_string(),
|
||||||
|
};
|
||||||
|
|
||||||
|
// Sessions with computed fields (duration computed by SQLite)
|
||||||
|
let mut stmt = conn.prepare(
|
||||||
|
"SELECT id, started_at, ended_at, protocol, direction, tx_bytes, rx_bytes,
|
||||||
|
CASE WHEN ended_at IS NOT NULL AND started_at IS NOT NULL
|
||||||
|
THEN (julianday(ended_at) - julianday(started_at)) * 86400.0
|
||||||
|
ELSE 0 END AS dur_secs
|
||||||
|
FROM sessions WHERE peer_ip = ?1 ORDER BY started_at DESC LIMIT 100",
|
||||||
|
)?;
|
||||||
|
let sessions: Vec<ExportSessionJson> = stmt.query_map(params![ip], |row| {
|
||||||
|
let tx: i64 = row.get(5)?;
|
||||||
|
let rx: i64 = row.get(6)?;
|
||||||
|
let dur: f64 = row.get(7)?;
|
||||||
|
Ok(ExportSessionJson {
|
||||||
|
id: row.get(0)?,
|
||||||
|
started_at: row.get(1)?,
|
||||||
|
ended_at: row.get(2)?,
|
||||||
|
protocol: row.get(3)?,
|
||||||
|
direction: row.get(4)?,
|
||||||
|
tx_bytes: tx,
|
||||||
|
rx_bytes: rx,
|
||||||
|
tx_human: human_bytes(tx),
|
||||||
|
rx_human: human_bytes(rx),
|
||||||
|
duration_secs: dur,
|
||||||
|
avg_tx_mbps: if dur > 0.0 { tx as f64 * 8.0 / dur / 1e6 } else { 0.0 },
|
||||||
|
avg_rx_mbps: if dur > 0.0 { rx as f64 * 8.0 / dur / 1e6 } else { 0.0 },
|
||||||
|
})
|
||||||
|
})?.filter_map(Result::ok).collect();
|
||||||
|
|
||||||
|
let export = ExportJson {
|
||||||
|
ip: ip.clone(),
|
||||||
|
exported_at: {
|
||||||
|
// Simple UTC timestamp without chrono
|
||||||
|
use std::time::{SystemTime, UNIX_EPOCH};
|
||||||
|
let secs = SystemTime::now().duration_since(UNIX_EPOCH).unwrap_or_default().as_secs();
|
||||||
|
format!("{}", secs) // Unix timestamp — universally parseable
|
||||||
|
},
|
||||||
|
stats,
|
||||||
|
quota,
|
||||||
|
sessions,
|
||||||
|
};
|
||||||
|
|
||||||
|
let json_string = serde_json::to_string_pretty(&export)
|
||||||
|
.map_err(|e| anyhow::anyhow!("json serialize: {}", e))?;
|
||||||
|
|
||||||
|
Ok((
|
||||||
|
StatusCode::OK,
|
||||||
|
[
|
||||||
|
(axum::http::header::CONTENT_TYPE, "application/json".to_string()),
|
||||||
|
(axum::http::header::CONTENT_DISPOSITION,
|
||||||
|
format!("attachment; filename=\"btest-{}.json\"", ip)),
|
||||||
|
],
|
||||||
|
json_string,
|
||||||
|
))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// `GET /api/session/{id}/intervals` -- return per-second throughput data
|
||||||
|
/// for a session.
|
||||||
|
///
|
||||||
|
/// If the `session_intervals` table does not exist or contains no rows for
|
||||||
|
/// the requested session, an empty JSON array is returned.
|
||||||
|
async fn api_intervals(
|
||||||
|
State(state): State<Arc<WebState>>,
|
||||||
|
Path(id): Path<i64>,
|
||||||
|
) -> Result<axum::Json<Vec<IntervalJson>>, AppError> {
|
||||||
|
let intervals = {
|
||||||
|
let conn = state
|
||||||
|
.query_conn
|
||||||
|
.lock()
|
||||||
|
.map_err(|e| anyhow::anyhow!("lock: {}", e))?;
|
||||||
|
|
||||||
|
// Guard against the table not existing (e.g. first run before
|
||||||
|
// `ensure_web_tables` was ever called on this database file).
|
||||||
|
let table_exists: bool = conn
|
||||||
|
.query_row(
|
||||||
|
"SELECT COUNT(*) FROM sqlite_master \
|
||||||
|
WHERE type = 'table' AND name = 'session_intervals'",
|
||||||
|
[],
|
||||||
|
|row| row.get::<_, i64>(0),
|
||||||
|
)
|
||||||
|
.map(|c| c > 0)
|
||||||
|
.unwrap_or(false);
|
||||||
|
|
||||||
|
if !table_exists {
|
||||||
|
Vec::new()
|
||||||
|
} else {
|
||||||
|
let mut stmt = conn.prepare(
|
||||||
|
"SELECT second, tx_bytes, rx_bytes
|
||||||
|
FROM session_intervals
|
||||||
|
WHERE session_id = ?1
|
||||||
|
ORDER BY second ASC",
|
||||||
|
)?;
|
||||||
|
let rows = stmt.query_map(params![id], |row| {
|
||||||
|
Ok(IntervalJson {
|
||||||
|
second: row.get(0)?,
|
||||||
|
tx_bytes: row.get(1)?,
|
||||||
|
rx_bytes: row.get(2)?,
|
||||||
|
})
|
||||||
|
})?;
|
||||||
|
rows.filter_map(Result::ok).collect::<Vec<_>>()
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(axum::Json(intervals))
|
||||||
|
}
|
||||||
387
src/server_pro/web/templates/dashboard.html
Normal file
387
src/server_pro/web/templates/dashboard.html
Normal file
@@ -0,0 +1,387 @@
|
|||||||
|
<!DOCTYPE html>
|
||||||
|
<html lang="en">
|
||||||
|
<head>
|
||||||
|
<meta charset="utf-8">
|
||||||
|
<meta name="viewport" content="width=device-width, initial-scale=1">
|
||||||
|
<title>Dashboard — {{ ip }} — btest-rs</title>
|
||||||
|
<style>
|
||||||
|
* { margin: 0; padding: 0; box-sizing: border-box; }
|
||||||
|
body {
|
||||||
|
font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Helvetica, Arial, sans-serif;
|
||||||
|
background: #0f1117;
|
||||||
|
color: #e1e4e8;
|
||||||
|
min-height: 100vh;
|
||||||
|
padding: 1.5rem;
|
||||||
|
}
|
||||||
|
a { color: #58a6ff; text-decoration: none; }
|
||||||
|
a:hover { text-decoration: underline; }
|
||||||
|
|
||||||
|
.header {
|
||||||
|
display: flex;
|
||||||
|
align-items: center;
|
||||||
|
gap: 1rem;
|
||||||
|
margin-bottom: 1.5rem;
|
||||||
|
flex-wrap: wrap;
|
||||||
|
}
|
||||||
|
.header h1 { font-size: 1.5rem; color: #58a6ff; }
|
||||||
|
.header .ip-label {
|
||||||
|
font-size: 1.1rem;
|
||||||
|
color: #8b949e;
|
||||||
|
font-family: monospace;
|
||||||
|
}
|
||||||
|
.header .home-link { margin-left: auto; }
|
||||||
|
|
||||||
|
/* Stats cards */
|
||||||
|
.stats {
|
||||||
|
display: grid;
|
||||||
|
grid-template-columns: repeat(auto-fit, minmax(160px, 1fr));
|
||||||
|
gap: 1rem;
|
||||||
|
margin-bottom: 1.5rem;
|
||||||
|
}
|
||||||
|
.stat-card {
|
||||||
|
background: #161b22;
|
||||||
|
border: 1px solid #30363d;
|
||||||
|
border-radius: 8px;
|
||||||
|
padding: 1rem;
|
||||||
|
}
|
||||||
|
.stat-card .label {
|
||||||
|
color: #8b949e;
|
||||||
|
font-size: 0.8rem;
|
||||||
|
text-transform: uppercase;
|
||||||
|
letter-spacing: 0.05em;
|
||||||
|
}
|
||||||
|
.stat-card .value {
|
||||||
|
font-size: 1.4rem;
|
||||||
|
font-weight: 600;
|
||||||
|
margin-top: 0.25rem;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Table */
|
||||||
|
.table-wrap {
|
||||||
|
overflow-x: auto;
|
||||||
|
margin-bottom: 1.5rem;
|
||||||
|
}
|
||||||
|
table {
|
||||||
|
width: 100%;
|
||||||
|
border-collapse: collapse;
|
||||||
|
background: #161b22;
|
||||||
|
border-radius: 8px;
|
||||||
|
overflow: hidden;
|
||||||
|
}
|
||||||
|
th, td {
|
||||||
|
padding: 0.6rem 1rem;
|
||||||
|
text-align: left;
|
||||||
|
border-bottom: 1px solid #21262d;
|
||||||
|
white-space: nowrap;
|
||||||
|
}
|
||||||
|
th {
|
||||||
|
background: #0d1117;
|
||||||
|
color: #8b949e;
|
||||||
|
font-size: 0.8rem;
|
||||||
|
text-transform: uppercase;
|
||||||
|
letter-spacing: 0.04em;
|
||||||
|
}
|
||||||
|
tr { cursor: pointer; }
|
||||||
|
tr:hover td { background: #1c2128; }
|
||||||
|
tr.selected td { background: #1f3a5f; }
|
||||||
|
|
||||||
|
.proto-tcp { color: #3fb950; }
|
||||||
|
.proto-udp { color: #d29922; }
|
||||||
|
.dir-tx { color: #f78166; }
|
||||||
|
.dir-rx { color: #58a6ff; }
|
||||||
|
.dir-both { color: #bc8cff; }
|
||||||
|
|
||||||
|
/* Chart area */
|
||||||
|
.chart-section {
|
||||||
|
background: #161b22;
|
||||||
|
border: 1px solid #30363d;
|
||||||
|
border-radius: 8px;
|
||||||
|
padding: 1.5rem;
|
||||||
|
margin-bottom: 1.5rem;
|
||||||
|
}
|
||||||
|
.chart-section h2 {
|
||||||
|
font-size: 1rem;
|
||||||
|
color: #8b949e;
|
||||||
|
margin-bottom: 1rem;
|
||||||
|
}
|
||||||
|
.chart-container {
|
||||||
|
position: relative;
|
||||||
|
width: 100%;
|
||||||
|
max-height: 360px;
|
||||||
|
}
|
||||||
|
.chart-placeholder {
|
||||||
|
text-align: center;
|
||||||
|
color: #484f58;
|
||||||
|
padding: 3rem 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
.footer {
|
||||||
|
text-align: center;
|
||||||
|
color: #484f58;
|
||||||
|
font-size: 0.8rem;
|
||||||
|
margin-top: 2rem;
|
||||||
|
}
|
||||||
|
.no-data {
|
||||||
|
text-align: center;
|
||||||
|
padding: 3rem;
|
||||||
|
color: #484f58;
|
||||||
|
}
|
||||||
|
</style>
|
||||||
|
</head>
|
||||||
|
<body>
|
||||||
|
|
||||||
|
<div class="header">
|
||||||
|
<h1>btest-rs</h1>
|
||||||
|
<span class="ip-label">{{ ip }}</span>
|
||||||
|
<span class="home-link"><a href="/">Home</a></span>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<!-- Stats summary (filled via API) -->
|
||||||
|
<div class="stats" id="stats-grid">
|
||||||
|
<div class="stat-card">
|
||||||
|
<div class="label">Total Tests</div>
|
||||||
|
<div class="value" id="stat-total-tests">—</div>
|
||||||
|
</div>
|
||||||
|
<div class="stat-card">
|
||||||
|
<div class="label">Total TX</div>
|
||||||
|
<div class="value" id="stat-total-tx">—</div>
|
||||||
|
</div>
|
||||||
|
<div class="stat-card">
|
||||||
|
<div class="label">Total RX</div>
|
||||||
|
<div class="value" id="stat-total-rx">—</div>
|
||||||
|
</div>
|
||||||
|
<div class="stat-card">
|
||||||
|
<div class="label">Avg TX Mbps</div>
|
||||||
|
<div class="value" id="stat-avg-tx">—</div>
|
||||||
|
</div>
|
||||||
|
<div class="stat-card">
|
||||||
|
<div class="label">Avg RX Mbps</div>
|
||||||
|
<div class="value" id="stat-avg-rx">—</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<!-- Chart for selected session -->
|
||||||
|
<div class="chart-section">
|
||||||
|
<h2 id="chart-title">Select a test below to view its throughput chart</h2>
|
||||||
|
<div class="chart-container">
|
||||||
|
<canvas id="throughput-chart"></canvas>
|
||||||
|
<div class="chart-placeholder" id="chart-placeholder">Click a row in the table to load the throughput graph for that session.</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<!-- Sessions table -->
|
||||||
|
<div class="table-wrap">
|
||||||
|
<table>
|
||||||
|
<thead>
|
||||||
|
<tr>
|
||||||
|
<th>#</th>
|
||||||
|
<th>Date</th>
|
||||||
|
<th>Protocol</th>
|
||||||
|
<th>Direction</th>
|
||||||
|
<th>TX Bytes</th>
|
||||||
|
<th>RX Bytes</th>
|
||||||
|
<th>Duration</th>
|
||||||
|
<th>Avg TX Mbps</th>
|
||||||
|
<th>Avg RX Mbps</th>
|
||||||
|
</tr>
|
||||||
|
</thead>
|
||||||
|
<tbody id="sessions-body">
|
||||||
|
<tr><td colspan="9" class="no-data">Loading sessions...</td></tr>
|
||||||
|
</tbody>
|
||||||
|
</table>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="footer">Powered by btest-rs</div>
|
||||||
|
|
||||||
|
<script src="https://cdn.jsdelivr.net/npm/chart.js"></script>
|
||||||
|
<script>
|
||||||
|
var currentIp = "{{ ip }}";
|
||||||
|
var throughputChart = null;
|
||||||
|
|
||||||
|
function formatBytes(b) {
|
||||||
|
if (b === 0) return '0 B';
|
||||||
|
var units = ['B', 'KB', 'MB', 'GB', 'TB'];
|
||||||
|
var i = Math.floor(Math.log(b) / Math.log(1024));
|
||||||
|
if (i >= units.length) i = units.length - 1;
|
||||||
|
return (b / Math.pow(1024, i)).toFixed(1) + ' ' + units[i];
|
||||||
|
}
|
||||||
|
|
||||||
|
function formatMbps(bytesPerSec) {
|
||||||
|
return (bytesPerSec * 8 / 1e6).toFixed(2);
|
||||||
|
}
|
||||||
|
|
||||||
|
function durationStr(startedAt, endedAt) {
|
||||||
|
if (!startedAt || !endedAt) return '--';
|
||||||
|
var ms = new Date(endedAt) - new Date(startedAt);
|
||||||
|
if (ms < 0) return '--';
|
||||||
|
var s = Math.round(ms / 1000);
|
||||||
|
if (s < 60) return s + 's';
|
||||||
|
return Math.floor(s / 60) + 'm ' + (s % 60) + 's';
|
||||||
|
}
|
||||||
|
|
||||||
|
function durationSec(startedAt, endedAt) {
|
||||||
|
if (!startedAt || !endedAt) return 0;
|
||||||
|
var ms = new Date(endedAt) - new Date(startedAt);
|
||||||
|
return Math.max(ms / 1000, 0.001);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Load summary stats
|
||||||
|
fetch('/api/ip/' + encodeURIComponent(currentIp) + '/stats')
|
||||||
|
.then(function(r) { return r.json(); })
|
||||||
|
.then(function(data) {
|
||||||
|
document.getElementById('stat-total-tests').textContent = data.total_sessions || 0;
|
||||||
|
document.getElementById('stat-total-tx').textContent = formatBytes(data.total_tx_bytes || 0);
|
||||||
|
document.getElementById('stat-total-rx').textContent = formatBytes(data.total_rx_bytes || 0);
|
||||||
|
document.getElementById('stat-avg-tx').textContent = data.avg_tx_mbps ? data.avg_tx_mbps.toFixed(2) : '0.00';
|
||||||
|
document.getElementById('stat-avg-rx').textContent = data.avg_rx_mbps ? data.avg_rx_mbps.toFixed(2) : '0.00';
|
||||||
|
})
|
||||||
|
.catch(function() {});
|
||||||
|
|
||||||
|
// Load sessions list
|
||||||
|
fetch('/api/ip/' + encodeURIComponent(currentIp) + '/sessions')
|
||||||
|
.then(function(r) { return r.json(); })
|
||||||
|
.then(function(sessions) {
|
||||||
|
var tbody = document.getElementById('sessions-body');
|
||||||
|
if (!sessions || sessions.length === 0) {
|
||||||
|
tbody.innerHTML = '<tr><td colspan="9" class="no-data">No test sessions found for this IP.</td></tr>';
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
tbody.innerHTML = '';
|
||||||
|
sessions.forEach(function(s, i) {
|
||||||
|
var tr = document.createElement('tr');
|
||||||
|
tr.dataset.sessionId = s.id;
|
||||||
|
tr.onclick = function() { selectSession(s.id, tr); };
|
||||||
|
|
||||||
|
var dur = durationSec(s.started_at, s.ended_at);
|
||||||
|
var avgTx = dur > 0 ? formatMbps(s.tx_bytes / dur) : '0.00';
|
||||||
|
var avgRx = dur > 0 ? formatMbps(s.rx_bytes / dur) : '0.00';
|
||||||
|
var proto = (s.protocol || 'TCP').toUpperCase();
|
||||||
|
var dir = (s.direction || 'BOTH').toUpperCase();
|
||||||
|
var protoClass = proto === 'UDP' ? 'proto-udp' : 'proto-tcp';
|
||||||
|
var dirClass = dir === 'TX' ? 'dir-tx' : dir === 'RX' ? 'dir-rx' : 'dir-both';
|
||||||
|
|
||||||
|
tr.innerHTML =
|
||||||
|
'<td>' + (i + 1) + '</td>' +
|
||||||
|
'<td>' + (s.started_at || '--') + '</td>' +
|
||||||
|
'<td class="' + protoClass + '">' + proto + '</td>' +
|
||||||
|
'<td class="' + dirClass + '">' + dir + '</td>' +
|
||||||
|
'<td>' + formatBytes(s.tx_bytes || 0) + '</td>' +
|
||||||
|
'<td>' + formatBytes(s.rx_bytes || 0) + '</td>' +
|
||||||
|
'<td>' + durationStr(s.started_at, s.ended_at) + '</td>' +
|
||||||
|
'<td>' + avgTx + '</td>' +
|
||||||
|
'<td>' + avgRx + '</td>';
|
||||||
|
tbody.appendChild(tr);
|
||||||
|
});
|
||||||
|
|
||||||
|
// Auto-select the first (most recent) session
|
||||||
|
if (sessions.length > 0) {
|
||||||
|
var firstRow = tbody.querySelector('tr');
|
||||||
|
if (firstRow) selectSession(sessions[0].id, firstRow);
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.catch(function() {
|
||||||
|
document.getElementById('sessions-body').innerHTML =
|
||||||
|
'<tr><td colspan="9" class="no-data">Failed to load sessions.</td></tr>';
|
||||||
|
});
|
||||||
|
|
||||||
|
function selectSession(sessionId, rowEl) {
|
||||||
|
// Highlight selected row
|
||||||
|
var rows = document.querySelectorAll('#sessions-body tr');
|
||||||
|
rows.forEach(function(r) { r.classList.remove('selected'); });
|
||||||
|
rowEl.classList.add('selected');
|
||||||
|
|
||||||
|
document.getElementById('chart-title').textContent = 'Throughput for session #' + sessionId;
|
||||||
|
document.getElementById('chart-placeholder').style.display = 'none';
|
||||||
|
|
||||||
|
fetch('/api/session/' + sessionId + '/intervals')
|
||||||
|
.then(function(r) { return r.json(); })
|
||||||
|
.then(function(intervals) {
|
||||||
|
renderChart(intervals);
|
||||||
|
})
|
||||||
|
.catch(function() {
|
||||||
|
document.getElementById('chart-placeholder').style.display = 'block';
|
||||||
|
document.getElementById('chart-placeholder').textContent = 'Failed to load interval data.';
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
function renderChart(intervals) {
|
||||||
|
var canvas = document.getElementById('throughput-chart');
|
||||||
|
if (throughputChart) {
|
||||||
|
throughputChart.destroy();
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!intervals || intervals.length === 0) {
|
||||||
|
document.getElementById('chart-placeholder').style.display = 'block';
|
||||||
|
document.getElementById('chart-placeholder').textContent = 'No interval data available for this session.';
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
var labels = intervals.map(function(d) { return d.second + 's'; });
|
||||||
|
var txData = intervals.map(function(d) { return (d.tx_bytes * 8 / 1e6).toFixed(2); });
|
||||||
|
var rxData = intervals.map(function(d) { return (d.rx_bytes * 8 / 1e6).toFixed(2); });
|
||||||
|
|
||||||
|
throughputChart = new Chart(canvas, {
|
||||||
|
type: 'line',
|
||||||
|
data: {
|
||||||
|
labels: labels,
|
||||||
|
datasets: [
|
||||||
|
{
|
||||||
|
label: 'TX Mbps',
|
||||||
|
data: txData,
|
||||||
|
borderColor: '#f78166',
|
||||||
|
backgroundColor: 'rgba(247, 129, 102, 0.1)',
|
||||||
|
borderWidth: 2,
|
||||||
|
fill: true,
|
||||||
|
tension: 0.3,
|
||||||
|
pointRadius: 1
|
||||||
|
},
|
||||||
|
{
|
||||||
|
label: 'RX Mbps',
|
||||||
|
data: rxData,
|
||||||
|
borderColor: '#58a6ff',
|
||||||
|
backgroundColor: 'rgba(88, 166, 255, 0.1)',
|
||||||
|
borderWidth: 2,
|
||||||
|
fill: true,
|
||||||
|
tension: 0.3,
|
||||||
|
pointRadius: 1
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
options: {
|
||||||
|
responsive: true,
|
||||||
|
maintainAspectRatio: false,
|
||||||
|
interaction: {
|
||||||
|
intersect: false,
|
||||||
|
mode: 'index'
|
||||||
|
},
|
||||||
|
scales: {
|
||||||
|
x: {
|
||||||
|
title: { display: true, text: 'Time', color: '#8b949e' },
|
||||||
|
ticks: { color: '#8b949e' },
|
||||||
|
grid: { color: '#21262d' }
|
||||||
|
},
|
||||||
|
y: {
|
||||||
|
title: { display: true, text: 'Mbps', color: '#8b949e' },
|
||||||
|
ticks: { color: '#8b949e' },
|
||||||
|
grid: { color: '#21262d' },
|
||||||
|
beginAtZero: true
|
||||||
|
}
|
||||||
|
},
|
||||||
|
plugins: {
|
||||||
|
legend: {
|
||||||
|
labels: { color: '#e1e4e8' }
|
||||||
|
},
|
||||||
|
tooltip: {
|
||||||
|
backgroundColor: '#161b22',
|
||||||
|
borderColor: '#30363d',
|
||||||
|
borderWidth: 1,
|
||||||
|
titleColor: '#e1e4e8',
|
||||||
|
bodyColor: '#8b949e'
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
</script>
|
||||||
|
</body>
|
||||||
|
</html>
|
||||||
160
src/server_pro/web/templates/index.html
Normal file
160
src/server_pro/web/templates/index.html
Normal file
@@ -0,0 +1,160 @@
|
|||||||
|
<!DOCTYPE html>
|
||||||
|
<html lang="en">
|
||||||
|
<head>
|
||||||
|
<meta charset="utf-8">
|
||||||
|
<meta name="viewport" content="width=device-width, initial-scale=1">
|
||||||
|
<title>btest-rs Public Bandwidth Test Server</title>
|
||||||
|
<style>
|
||||||
|
* { margin: 0; padding: 0; box-sizing: border-box; }
|
||||||
|
body {
|
||||||
|
font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Helvetica, Arial, sans-serif;
|
||||||
|
background: #0f1117;
|
||||||
|
color: #e1e4e8;
|
||||||
|
min-height: 100vh;
|
||||||
|
display: flex;
|
||||||
|
flex-direction: column;
|
||||||
|
align-items: center;
|
||||||
|
justify-content: center;
|
||||||
|
}
|
||||||
|
.container {
|
||||||
|
max-width: 560px;
|
||||||
|
width: 90%;
|
||||||
|
text-align: center;
|
||||||
|
padding: 2rem;
|
||||||
|
}
|
||||||
|
h1 {
|
||||||
|
font-size: 2rem;
|
||||||
|
margin-bottom: 0.5rem;
|
||||||
|
color: #58a6ff;
|
||||||
|
}
|
||||||
|
.subtitle {
|
||||||
|
color: #8b949e;
|
||||||
|
margin-bottom: 2rem;
|
||||||
|
line-height: 1.5;
|
||||||
|
}
|
||||||
|
.search-box {
|
||||||
|
display: flex;
|
||||||
|
gap: 0.5rem;
|
||||||
|
margin-bottom: 1.5rem;
|
||||||
|
}
|
||||||
|
.search-box input {
|
||||||
|
flex: 1;
|
||||||
|
padding: 0.75rem 1rem;
|
||||||
|
border: 1px solid #30363d;
|
||||||
|
border-radius: 6px;
|
||||||
|
background: #161b22;
|
||||||
|
color: #e1e4e8;
|
||||||
|
font-size: 1rem;
|
||||||
|
outline: none;
|
||||||
|
}
|
||||||
|
.search-box input:focus {
|
||||||
|
border-color: #58a6ff;
|
||||||
|
}
|
||||||
|
.search-box input::placeholder {
|
||||||
|
color: #484f58;
|
||||||
|
}
|
||||||
|
.search-box button {
|
||||||
|
padding: 0.75rem 1.5rem;
|
||||||
|
background: #238636;
|
||||||
|
color: #fff;
|
||||||
|
border: none;
|
||||||
|
border-radius: 6px;
|
||||||
|
font-size: 1rem;
|
||||||
|
cursor: pointer;
|
||||||
|
white-space: nowrap;
|
||||||
|
}
|
||||||
|
.search-box button:hover {
|
||||||
|
background: #2ea043;
|
||||||
|
}
|
||||||
|
.info {
|
||||||
|
background: #161b22;
|
||||||
|
border: 1px solid #30363d;
|
||||||
|
border-radius: 8px;
|
||||||
|
padding: 1.5rem;
|
||||||
|
text-align: left;
|
||||||
|
line-height: 1.6;
|
||||||
|
color: #8b949e;
|
||||||
|
}
|
||||||
|
.info h3 {
|
||||||
|
color: #e1e4e8;
|
||||||
|
margin-bottom: 0.5rem;
|
||||||
|
}
|
||||||
|
.info code {
|
||||||
|
background: #0d1117;
|
||||||
|
padding: 0.15rem 0.4rem;
|
||||||
|
border-radius: 4px;
|
||||||
|
font-size: 0.9em;
|
||||||
|
color: #58a6ff;
|
||||||
|
}
|
||||||
|
.auto-link {
|
||||||
|
margin-top: 1rem;
|
||||||
|
font-size: 0.9rem;
|
||||||
|
}
|
||||||
|
.auto-link a {
|
||||||
|
color: #58a6ff;
|
||||||
|
text-decoration: none;
|
||||||
|
}
|
||||||
|
.auto-link a:hover {
|
||||||
|
text-decoration: underline;
|
||||||
|
}
|
||||||
|
.footer {
|
||||||
|
margin-top: 2rem;
|
||||||
|
color: #484f58;
|
||||||
|
font-size: 0.8rem;
|
||||||
|
}
|
||||||
|
</style>
|
||||||
|
</head>
|
||||||
|
<body>
|
||||||
|
<div class="container">
|
||||||
|
<h1>btest-rs</h1>
|
||||||
|
<p class="subtitle">Public MikroTik Bandwidth Test Server — view your test results and history.</p>
|
||||||
|
|
||||||
|
<form class="search-box" id="ip-form" onsubmit="return goToDashboard()">
|
||||||
|
<input type="text" id="ip-input" placeholder="Enter your IP address (e.g. 203.0.113.5)" autocomplete="off">
|
||||||
|
<button type="submit">View Results</button>
|
||||||
|
</form>
|
||||||
|
|
||||||
|
<div class="auto-link" id="auto-detect">
|
||||||
|
Detecting your IP...
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="info">
|
||||||
|
<h3>How it works</h3>
|
||||||
|
<p>
|
||||||
|
Run a bandwidth test from your MikroTik router targeting this server.
|
||||||
|
After the test completes, enter your public IP above to see
|
||||||
|
throughput charts, session history, and aggregate statistics.
|
||||||
|
</p>
|
||||||
|
<p style="margin-top: 0.5rem;">
|
||||||
|
Example: <code>/tool bandwidth-test address=this-server protocol=tcp direction=both</code>
|
||||||
|
</p>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="footer">Powered by btest-rs</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<script>
|
||||||
|
function goToDashboard() {
|
||||||
|
var ip = document.getElementById('ip-input').value.trim();
|
||||||
|
if (ip) {
|
||||||
|
window.location.href = '/dashboard/' + encodeURIComponent(ip);
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Auto-detect visitor IP and offer a direct link
|
||||||
|
fetch('https://api.ipify.org?format=json')
|
||||||
|
.then(function(r) { return r.json(); })
|
||||||
|
.then(function(data) {
|
||||||
|
if (data.ip) {
|
||||||
|
document.getElementById('ip-input').value = data.ip;
|
||||||
|
document.getElementById('auto-detect').innerHTML =
|
||||||
|
'Detected IP: <a href="/dashboard/' + encodeURIComponent(data.ip) + '">' + data.ip + '</a> — click to view your dashboard';
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.catch(function() {
|
||||||
|
document.getElementById('auto-detect').textContent = '';
|
||||||
|
});
|
||||||
|
</script>
|
||||||
|
</body>
|
||||||
|
</html>
|
||||||
@@ -36,12 +36,12 @@ pub fn init(target: &str) -> std::io::Result<()> {
|
|||||||
/// Send a syslog message with the given severity and message.
|
/// Send a syslog message with the given severity and message.
|
||||||
/// Severity: 6=info, 4=warning, 3=error
|
/// Severity: 6=info, 4=warning, 3=error
|
||||||
fn send(severity: u8, msg: &str) {
|
fn send(severity: u8, msg: &str) {
|
||||||
let guard = SYSLOG.lock().unwrap();
|
// Format timestamp outside the lock to minimize contention
|
||||||
if let Some(ref sender) = *guard {
|
|
||||||
// RFC 3164 (BSD syslog): <priority>Mon DD HH:MM:SS hostname program: message
|
|
||||||
// facility=16 (local0) * 8 + severity
|
|
||||||
let priority = 128 + severity;
|
let priority = 128 + severity;
|
||||||
let timestamp = bsd_timestamp();
|
let timestamp = bsd_timestamp();
|
||||||
|
|
||||||
|
let guard = SYSLOG.lock().unwrap();
|
||||||
|
if let Some(ref sender) = *guard {
|
||||||
let syslog_msg = format!(
|
let syslog_msg = format!(
|
||||||
"<{}>{} {} btest-rs: {}",
|
"<{}>{} {} btest-rs: {}",
|
||||||
priority, timestamp, sender.hostname, msg,
|
priority, timestamp, sender.hostname, msg,
|
||||||
@@ -52,44 +52,7 @@ fn send(severity: u8, msg: &str) {
|
|||||||
|
|
||||||
fn bsd_timestamp() -> String {
|
fn bsd_timestamp() -> String {
|
||||||
// RFC 3164 format: "Mon DD HH:MM:SS" (no year)
|
// RFC 3164 format: "Mon DD HH:MM:SS" (no year)
|
||||||
use std::time::{SystemTime, UNIX_EPOCH};
|
chrono::Local::now().format("%b %e %H:%M:%S").to_string()
|
||||||
let now = SystemTime::now()
|
|
||||||
.duration_since(UNIX_EPOCH)
|
|
||||||
.unwrap_or_default()
|
|
||||||
.as_secs();
|
|
||||||
|
|
||||||
// Simple conversion — good enough for syslog
|
|
||||||
let secs_in_day = 86400u64;
|
|
||||||
let days = now / secs_in_day;
|
|
||||||
let time_of_day = now % secs_in_day;
|
|
||||||
let hours = time_of_day / 3600;
|
|
||||||
let minutes = (time_of_day % 3600) / 60;
|
|
||||||
let seconds = time_of_day % 60;
|
|
||||||
|
|
||||||
// Day of year calculation (approximate months)
|
|
||||||
let months = ["Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"];
|
|
||||||
let days_in_months = [31u64,28,31,30,31,30,31,31,30,31,30,31];
|
|
||||||
|
|
||||||
// Days since epoch to year/month/day
|
|
||||||
let mut y = 1970u64;
|
|
||||||
let mut remaining = days;
|
|
||||||
loop {
|
|
||||||
let leap = if y % 4 == 0 && (y % 100 != 0 || y % 400 == 0) { 366 } else { 365 };
|
|
||||||
if remaining < leap { break; }
|
|
||||||
remaining -= leap;
|
|
||||||
y += 1;
|
|
||||||
}
|
|
||||||
let leap = y % 4 == 0 && (y % 100 != 0 || y % 400 == 0);
|
|
||||||
let mut m = 0usize;
|
|
||||||
for i in 0..12 {
|
|
||||||
let mut d = days_in_months[i];
|
|
||||||
if i == 1 && leap { d += 1; }
|
|
||||||
if remaining < d { m = i; break; }
|
|
||||||
remaining -= d;
|
|
||||||
}
|
|
||||||
let day = remaining + 1;
|
|
||||||
|
|
||||||
format!("{} {:2} {:02}:{:02}:{:02}", months[m], day, hours, minutes, seconds)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// --- Public logging functions ---
|
// --- Public logging functions ---
|
||||||
|
|||||||
@@ -235,7 +235,7 @@ async fn test_csv_created_client() {
|
|||||||
// Initialize CSV
|
// Initialize CSV
|
||||||
btest_rs::csv_output::init(&csv_path).unwrap();
|
btest_rs::csv_output::init(&csv_path).unwrap();
|
||||||
|
|
||||||
let (tx, rx, lost, intervals) = run_client_test(
|
let (tx, rx, lost, _intervals) = run_client_test(
|
||||||
"127.0.0.1", port, false, true, false, None, None,
|
"127.0.0.1", port, false, true, false, None, None,
|
||||||
).await;
|
).await;
|
||||||
|
|
||||||
@@ -336,3 +336,67 @@ async fn test_bandwidth_state_running_flag() {
|
|||||||
state.running.store(false, Ordering::SeqCst);
|
state.running.store(false, Ordering::SeqCst);
|
||||||
assert!(!state.running.load(Ordering::Relaxed));
|
assert!(!state.running.load(Ordering::Relaxed));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// --- CPU Reporting Tests ---
|
||||||
|
|
||||||
|
/// Helper that returns the full BandwidthState (not just summary) so we can check remote_cpu.
|
||||||
|
async fn run_client_with_state(
|
||||||
|
host: &str, port: u16, transmit: bool, receive: bool, udp: bool,
|
||||||
|
secs: u64,
|
||||||
|
) -> std::sync::Arc<btest_rs::bandwidth::BandwidthState> {
|
||||||
|
let direction = match (transmit, receive) {
|
||||||
|
(true, false) => btest_rs::protocol::CMD_DIR_RX,
|
||||||
|
(false, true) => btest_rs::protocol::CMD_DIR_TX,
|
||||||
|
(true, true) => btest_rs::protocol::CMD_DIR_BOTH,
|
||||||
|
_ => panic!("must specify direction"),
|
||||||
|
};
|
||||||
|
let state = btest_rs::bandwidth::BandwidthState::new();
|
||||||
|
let state_clone = state.clone();
|
||||||
|
let host = host.to_string();
|
||||||
|
|
||||||
|
let handle = tokio::spawn(async move {
|
||||||
|
btest_rs::client::run_client(
|
||||||
|
&host, port, direction, udp,
|
||||||
|
0, 0, None, None, false, state_clone,
|
||||||
|
).await
|
||||||
|
});
|
||||||
|
|
||||||
|
tokio::time::sleep(Duration::from_secs(secs)).await;
|
||||||
|
state.running.store(false, Ordering::SeqCst);
|
||||||
|
tokio::time::sleep(Duration::from_millis(500)).await;
|
||||||
|
handle.abort();
|
||||||
|
|
||||||
|
state
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_local_cpu_nonzero() {
|
||||||
|
// CPU sampler should return > 0 on supported platforms after warming up
|
||||||
|
btest_rs::cpu::start_sampler();
|
||||||
|
std::thread::sleep(Duration::from_secs(2));
|
||||||
|
let cpu = btest_rs::cpu::get();
|
||||||
|
// On CI or idle machines, CPU may genuinely be 0, so just check it doesn't panic
|
||||||
|
// and returns a value in range
|
||||||
|
assert!(cpu <= 100, "CPU should be 0-100, got {}", cpu);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_tcp_remote_cpu_both() {
|
||||||
|
let port = BASE_PORT + 20;
|
||||||
|
start_server_noauth(port).await;
|
||||||
|
let state = run_client_with_state("127.0.0.1", port, true, true, false, 3).await;
|
||||||
|
let remote_cpu = state.remote_cpu.load(Ordering::Relaxed);
|
||||||
|
// On loopback with bidirectional traffic, server CPU should be > 0
|
||||||
|
// The status messages are interleaved in the TCP data stream
|
||||||
|
assert!(remote_cpu > 0, "TCP BOTH: remote CPU should be > 0 on loopback, got {}", remote_cpu);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_tcp_remote_cpu_tx_only() {
|
||||||
|
let port = BASE_PORT + 21;
|
||||||
|
start_server_noauth(port).await;
|
||||||
|
let state = run_client_with_state("127.0.0.1", port, true, false, false, 3).await;
|
||||||
|
let remote_cpu = state.remote_cpu.load(Ordering::Relaxed);
|
||||||
|
// TX-only: server sends status messages that the status reader should parse
|
||||||
|
assert!(remote_cpu > 0, "TCP TX-only: remote CPU should be > 0 on loopback, got {}", remote_cpu);
|
||||||
|
}
|
||||||
|
|||||||
Reference in New Issue
Block a user