Compare commits
23 Commits
v0.6.0
...
da76c76c93
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
da76c76c93 | ||
|
|
27c69d8982 | ||
|
|
2cb8519c95 | ||
|
|
9ca124cb76 | ||
|
|
c06a4d0c9a | ||
|
|
817535a0ad | ||
|
|
ba02ed36b5 | ||
|
|
4cdcc4e6c4 | ||
|
|
7dd4820d2c | ||
|
|
2087e5a75f | ||
|
|
9e3cd6d6d4 | ||
|
|
4403eae4b9 | ||
|
|
c08bcffaff | ||
|
|
d61fdb1b94 | ||
|
|
89391e1781 | ||
|
|
d2fdc9c6ae | ||
|
|
8c853c3605 | ||
|
|
fe28c04c19 | ||
|
|
66be99bef0 | ||
|
|
94b122ac25 | ||
|
|
a07158ed22 | ||
|
|
1cd552d2dc | ||
|
|
3af40cb275 |
@@ -14,7 +14,7 @@ jobs:
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
apt-get update && apt-get install -y --no-install-recommends \
|
||||
git curl jq ca-certificates zip \
|
||||
git curl jq ca-certificates zip unzip \
|
||||
musl-tools \
|
||||
gcc-aarch64-linux-gnu \
|
||||
gcc-arm-linux-gnueabihf \
|
||||
@@ -23,7 +23,14 @@ jobs:
|
||||
x86_64-unknown-linux-musl \
|
||||
aarch64-unknown-linux-musl \
|
||||
armv7-unknown-linux-musleabihf \
|
||||
x86_64-pc-windows-gnu
|
||||
x86_64-pc-windows-gnu \
|
||||
aarch64-linux-android \
|
||||
armv7-linux-androideabi
|
||||
# Install Android NDK for cross-compilation
|
||||
NDK_VER=r27c
|
||||
curl -sL https://dl.google.com/android/repository/android-ndk-${NDK_VER}-linux.zip -o /tmp/ndk.zip
|
||||
unzip -q /tmp/ndk.zip -d /opt && rm /tmp/ndk.zip
|
||||
export ANDROID_NDK_HOME=/opt/android-ndk-${NDK_VER}
|
||||
|
||||
- name: Ensure code is present
|
||||
run: |
|
||||
@@ -47,6 +54,12 @@ jobs:
|
||||
|
||||
[target.x86_64-pc-windows-gnu]
|
||||
linker = "x86_64-w64-mingw32-gcc"
|
||||
|
||||
[target.aarch64-linux-android]
|
||||
linker = "/opt/android-ndk-r27c/toolchains/llvm/prebuilt/linux-x86_64/bin/aarch64-linux-android35-clang"
|
||||
|
||||
[target.armv7-linux-androideabi]
|
||||
linker = "/opt/android-ndk-r27c/toolchains/llvm/prebuilt/linux-x86_64/bin/armv7a-linux-androideabi35-clang"
|
||||
TOML
|
||||
|
||||
- name: Build Linux x86_64
|
||||
@@ -61,6 +74,12 @@ jobs:
|
||||
- name: Build Windows x86_64
|
||||
run: cargo build --release --target x86_64-pc-windows-gnu
|
||||
|
||||
- name: Build Android aarch64 (ARMv8)
|
||||
run: cargo build --release --target aarch64-linux-android
|
||||
|
||||
- name: Build Android armv7 (ARMv7)
|
||||
run: cargo build --release --target armv7-linux-androideabi
|
||||
|
||||
- name: Package all
|
||||
run: |
|
||||
mkdir -p /artifacts
|
||||
@@ -81,6 +100,14 @@ jobs:
|
||||
zip /artifacts/btest-windows-x86_64.zip btest.exe
|
||||
cd -
|
||||
|
||||
cd target/aarch64-linux-android/release
|
||||
tar czf /artifacts/btest-android-aarch64.tar.gz btest
|
||||
cd -
|
||||
|
||||
cd target/armv7-linux-androideabi/release
|
||||
tar czf /artifacts/btest-android-armv7.tar.gz btest
|
||||
cd -
|
||||
|
||||
cd /artifacts
|
||||
sha256sum * > checksums-sha256.txt
|
||||
cat checksums-sha256.txt
|
||||
@@ -103,6 +130,8 @@ jobs:
|
||||
| Linux | aarch64 (RPi 64-bit) | btest-linux-aarch64.tar.gz |
|
||||
| Linux | armv7 (RPi 32-bit) | btest-linux-armv7.tar.gz |
|
||||
| Windows | x86_64 | btest-windows-x86_64.zip |
|
||||
| Android | aarch64 (ARMv8, Termux) | btest-android-aarch64.tar.gz |
|
||||
| Android | armv7 (ARMv7, Termux) | btest-android-armv7.tar.gz |
|
||||
| macOS | aarch64 / x86_64 | Run \`scripts/build-macos-release.sh --upload ${TAG}\` |
|
||||
| Docker | x86_64 | \`docker pull ${REGISTRY}/manawenuz/btest-rs:${TAG}\` |
|
||||
|
||||
|
||||
1434
Cargo.lock
generated
1434
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
30
Cargo.toml
30
Cargo.toml
@@ -16,6 +16,23 @@ path = "src/lib.rs"
|
||||
name = "btest"
|
||||
path = "src/main.rs"
|
||||
|
||||
[[bin]]
|
||||
name = "btest-client"
|
||||
path = "src/bin/client_only.rs"
|
||||
|
||||
[[bin]]
|
||||
name = "btest-server"
|
||||
path = "src/bin/server_only.rs"
|
||||
|
||||
[[bin]]
|
||||
name = "btest-server-pro"
|
||||
path = "src/server_pro/main.rs"
|
||||
required-features = ["pro"]
|
||||
|
||||
[features]
|
||||
default = []
|
||||
pro = ["dep:rusqlite", "dep:ldap3", "dep:axum", "dep:tower-http", "dep:serde", "dep:serde_json", "dep:askama"]
|
||||
|
||||
[dependencies]
|
||||
tokio = { version = "1", features = ["full"] }
|
||||
clap = { version = "4", features = ["derive"] }
|
||||
@@ -32,9 +49,22 @@ num-traits = "0.2.19"
|
||||
num-integer = "0.1.46"
|
||||
sha2 = "0.11.0"
|
||||
hostname = "0.4.2"
|
||||
rusqlite = { version = "0.39.0", features = ["bundled"], optional = true }
|
||||
ldap3 = { version = "0.12.1", optional = true }
|
||||
axum = { version = "0.8.8", features = ["tokio"], optional = true }
|
||||
tower-http = { version = "0.6.8", features = ["fs", "cors"], optional = true }
|
||||
serde = { version = "1.0.228", features = ["derive"], optional = true }
|
||||
serde_json = { version = "1.0.149", optional = true }
|
||||
askama = { version = "0.15.6", optional = true }
|
||||
|
||||
[profile.release]
|
||||
opt-level = 3
|
||||
lto = true
|
||||
strip = true
|
||||
codegen-units = 1
|
||||
|
||||
# Minimal size profile for embedded/OpenWrt targets
|
||||
[profile.release-small]
|
||||
inherits = "release"
|
||||
opt-level = "z"
|
||||
panic = "abort"
|
||||
|
||||
99
README.md
99
README.md
@@ -2,6 +2,25 @@
|
||||
|
||||
A Rust reimplementation of the [MikroTik Bandwidth Test (btest)](https://wiki.mikrotik.com/wiki/Manual:Tools/Bandwidth_Test) protocol. Both server and client modes, fully compatible with MikroTik RouterOS devices.
|
||||
|
||||
## Free Public Servers
|
||||
|
||||
Test your MikroTik link speed right now — no setup, no registration:
|
||||
|
||||
| Server | Location | Dashboard |
|
||||
|--------|----------|-----------|
|
||||
| `104.225.217.60` | US | [btest.home.kg](https://btest.home.kg) |
|
||||
| `188.245.59.196` | EU | [btest.mikata.ru](https://btest.mikata.ru) |
|
||||
|
||||
```
|
||||
/tool bandwidth-test address=104.225.217.60 user=btest password=btest protocol=tcp direction=both
|
||||
```
|
||||
|
||||
After the test, visit `https://btest.home.kg/dashboard/YOUR_IP` to see your results, throughput history, and quota usage. Per-IP limits: 2 GB daily / 8 GB weekly / 24 GB monthly.
|
||||
|
||||
> **Note:** TCP is recommended for remote testing. UDP bidirectional through NAT will only show one direction — this is a btest protocol limitation, not specific to btest-rs. See [KNOWN_ISSUES.md](KNOWN_ISSUES.md) for details.
|
||||
|
||||
Want to run your own public server? Build with `cargo build --release --features pro` — see [Server Pro](#server-pro) below.
|
||||
|
||||
## Features
|
||||
|
||||
- **Full protocol support** -- TCP and UDP data transfer, IPv4 and IPv6
|
||||
@@ -16,7 +35,7 @@ A Rust reimplementation of the [MikroTik Bandwidth Test (btest)](https://wiki.mi
|
||||
- **Quiet mode** -- suppress terminal output for scripted/automated use
|
||||
- **NAT traversal** -- probe packet to open firewall holes for UDP receive
|
||||
- **Single static binary** -- ~2 MB, zero runtime dependencies (musl build)
|
||||
- **Cross-platform** -- macOS, Linux (x86_64, ARM64), Docker
|
||||
- **Cross-platform** -- macOS, Linux (x86_64, ARM64, ARMv7), Windows, Android (Termux), Docker
|
||||
- **Async I/O** -- tokio-based, handles many concurrent connections efficiently
|
||||
|
||||
## Performance
|
||||
@@ -42,14 +61,55 @@ On wired gigabit links, expect line-rate performance in both TCP and UDP modes.
|
||||
cargo install --path .
|
||||
```
|
||||
|
||||
### Pre-built binary (Linux x86_64)
|
||||
### Pre-built binaries
|
||||
|
||||
Download from [releases](https://git.manko.yoga/manawenuz/btest-rs/releases) or [GitHub releases](https://github.com/manawenuz/btest-rs/releases):
|
||||
|
||||
```bash
|
||||
# Cross-compile from macOS (requires Docker)
|
||||
scripts/build-linux.sh
|
||||
# Linux x86_64
|
||||
curl -L <release-url>/btest-linux-x86_64.tar.gz | tar xz
|
||||
sudo mv btest /usr/local/bin/
|
||||
|
||||
# Copy to server
|
||||
scp dist/btest root@yourserver:/usr/local/bin/btest
|
||||
# Raspberry Pi 4/5 (64-bit OS)
|
||||
curl -L <release-url>/btest-linux-aarch64.tar.gz | tar xz
|
||||
sudo mv btest /usr/local/bin/
|
||||
|
||||
# Raspberry Pi 3/Zero 2 (32-bit OS)
|
||||
curl -L <release-url>/btest-linux-armv7.tar.gz | tar xz
|
||||
sudo mv btest /usr/local/bin/
|
||||
|
||||
# Windows
|
||||
# Download btest-windows-x86_64.zip from releases
|
||||
|
||||
# Android (Termux, no root needed)
|
||||
curl -L <release-url>/btest-android-aarch64.tar.gz | tar xz
|
||||
mv btest $PREFIX/bin/
|
||||
```
|
||||
|
||||
### Raspberry Pi
|
||||
|
||||
The static musl binaries run on any Raspberry Pi without dependencies:
|
||||
|
||||
```bash
|
||||
# On the Pi — detect architecture and install
|
||||
ARCH=$(uname -m)
|
||||
case $ARCH in
|
||||
aarch64) FILE=btest-linux-aarch64.tar.gz ;;
|
||||
armv7l) FILE=btest-linux-armv7.tar.gz ;;
|
||||
*) echo "Unsupported: $ARCH"; exit 1 ;;
|
||||
esac
|
||||
|
||||
curl -LO "https://github.com/manawenuz/btest-rs/releases/latest/download/$FILE"
|
||||
tar xzf "$FILE"
|
||||
sudo mv btest /usr/local/bin/
|
||||
rm "$FILE"
|
||||
|
||||
# Run as server
|
||||
btest -s -a admin -p password --ecsrp5
|
||||
|
||||
# Or install as systemd service
|
||||
curl -LO https://raw.githubusercontent.com/manawenuz/btest-rs/main/scripts/install-service.sh
|
||||
sudo bash install-service.sh --auth-user admin --auth-pass password
|
||||
```
|
||||
|
||||
### Docker
|
||||
@@ -208,7 +268,9 @@ See [KNOWN_ISSUES.md](KNOWN_ISSUES.md) for the full list including:
|
||||
- **Windows binaries** — cross-compiled but untested
|
||||
- **IPv6 UDP on Linux** — untested, likely works fine
|
||||
|
||||
Contributions and bug reports welcome: https://git.manko.yoga/manawenuz/btest-rs/issues
|
||||
Contributions and bug reports welcome:
|
||||
- https://github.com/manawenuz/btest-rs/issues
|
||||
- https://git.manko.yoga/manawenuz/btest-rs/issues
|
||||
|
||||
## Documentation
|
||||
|
||||
@@ -228,6 +290,29 @@ scripts/test-mikrotik.sh <ip> # Test against MikroTik device
|
||||
scripts/test-docker.sh # Docker container test
|
||||
```
|
||||
|
||||
## Server Pro
|
||||
|
||||
An optional superset of the standard server with multi-user support, quotas, and a web dashboard. Build with `--features pro`:
|
||||
|
||||
```bash
|
||||
cargo build --release --features pro --bin btest-server-pro
|
||||
```
|
||||
|
||||
Features:
|
||||
- **SQLite user database** — add/remove users, per-user quotas
|
||||
- **Per-IP bandwidth quotas** — daily, weekly, monthly limits with inline byte budget enforcement
|
||||
- **Web dashboard** — session history, throughput stats, quota progress bars, JSON export
|
||||
- **TCP multi-connection** — handles MikroTik's default 20-connection mode
|
||||
- **MD5 auth against DB** — proper challenge-response verification
|
||||
|
||||
```bash
|
||||
# Create a user and start the server
|
||||
btest-server-pro --users-db users.db useradd btest btest
|
||||
btest-server-pro --users-db users.db --ip-daily 2147483648 --ip-weekly 8589934592 --web-port 8080
|
||||
```
|
||||
|
||||
The pro features are completely optional and don't affect the standard `btest` binary.
|
||||
|
||||
## Credits
|
||||
|
||||
- **[btest-opensource](https://github.com/samm-git/btest-opensource)** by [Alex Samorukov](https://github.com/samm-git) -- original C implementation and protocol reverse-engineering. Licensed under **MIT**.
|
||||
|
||||
52
deploy/alpine/APKBUILD
Normal file
52
deploy/alpine/APKBUILD
Normal file
@@ -0,0 +1,52 @@
|
||||
# Maintainer: Siavash Sameni <manwe at manko dot yoga>
|
||||
pkgname=btest-rs
|
||||
pkgver=0.6.0
|
||||
pkgrel=0
|
||||
pkgdesc="MikroTik Bandwidth Test server and client with EC-SRP5 auth"
|
||||
url="https://github.com/manawenuz/btest-rs"
|
||||
license="MIT AND Apache-2.0"
|
||||
arch="x86_64 aarch64 armv7"
|
||||
makedepends="cargo rust"
|
||||
install="$pkgname.pre-install"
|
||||
source="$pkgname-$pkgver.tar.gz::https://github.com/manawenuz/btest-rs/archive/refs/tags/v$pkgver.tar.gz
|
||||
btest.initd
|
||||
"
|
||||
sha256sums="SKIP
|
||||
SKIP
|
||||
"
|
||||
|
||||
prepare() {
|
||||
default_prepare
|
||||
cd "$builddir"
|
||||
cargo fetch --locked --target "$(rustc -vV | sed -n 's/host: //p')"
|
||||
}
|
||||
|
||||
build() {
|
||||
cd "$builddir"
|
||||
export CARGO_TARGET_DIR=target
|
||||
cargo build --frozen --release
|
||||
}
|
||||
|
||||
check() {
|
||||
cd "$builddir"
|
||||
cargo test --frozen --release
|
||||
}
|
||||
|
||||
package() {
|
||||
cd "$builddir"
|
||||
|
||||
# binary
|
||||
install -Dm755 "target/release/btest" "$pkgdir/usr/bin/btest"
|
||||
|
||||
# man page
|
||||
install -Dm644 "docs/man/btest.1" "$pkgdir/usr/share/man/man1/btest.1"
|
||||
|
||||
# license
|
||||
install -Dm644 "LICENSE" "$pkgdir/usr/share/licenses/$pkgname/LICENSE"
|
||||
|
||||
# documentation
|
||||
install -Dm644 "README.md" "$pkgdir/usr/share/doc/$pkgname/README.md"
|
||||
|
||||
# OpenRC init script
|
||||
install -Dm755 "$srcdir/btest.initd" "$pkgdir/etc/init.d/btest"
|
||||
}
|
||||
37
deploy/alpine/btest.initd
Executable file
37
deploy/alpine/btest.initd
Executable file
@@ -0,0 +1,37 @@
|
||||
#!/sbin/openrc-run
|
||||
# OpenRC init script for btest-rs
|
||||
# MikroTik Bandwidth Test server
|
||||
|
||||
name="btest"
|
||||
description="MikroTik Bandwidth Test Server (btest-rs)"
|
||||
command="/usr/bin/btest"
|
||||
command_args="-s"
|
||||
command_background=true
|
||||
pidfile="/run/$name.pid"
|
||||
|
||||
# Run as dedicated user if it exists, otherwise root
|
||||
command_user="btest:btest"
|
||||
|
||||
# Logging
|
||||
output_log="/var/log/$name/$name.log"
|
||||
error_log="/var/log/$name/$name.err"
|
||||
|
||||
depend() {
|
||||
need net
|
||||
after firewall
|
||||
use dns logger
|
||||
}
|
||||
|
||||
start_pre() {
|
||||
# Create log directory
|
||||
checkpath -d -m 0755 -o "$command_user" /var/log/$name
|
||||
|
||||
# Create runtime directory
|
||||
checkpath -d -m 0755 -o "$command_user" /run
|
||||
}
|
||||
|
||||
stop() {
|
||||
ebegin "Stopping $name"
|
||||
start-stop-daemon --stop --pidfile "$pidfile" --retry TERM/5/KILL/3
|
||||
eend $?
|
||||
}
|
||||
118
deploy/alpine/test-alpine.sh
Executable file
118
deploy/alpine/test-alpine.sh
Executable file
@@ -0,0 +1,118 @@
|
||||
#!/bin/sh
|
||||
# Test Alpine Linux packaging for btest-rs
|
||||
# Runs inside an Alpine Docker container to build and verify the APK.
|
||||
#
|
||||
# Usage (from repository root):
|
||||
# docker run --rm -v "$PWD":/src alpine:latest /src/deploy/alpine/test-alpine.sh
|
||||
#
|
||||
set -eu
|
||||
|
||||
ALPINE_DIR="/src/deploy/alpine"
|
||||
|
||||
echo "=== Alpine APK packaging test ==="
|
||||
echo "Alpine version: $(cat /etc/alpine-release)"
|
||||
|
||||
# ── Install build dependencies ──────────────────────────────────────
|
||||
echo "--- Installing build dependencies ---"
|
||||
apk update
|
||||
apk add --no-cache \
|
||||
alpine-sdk \
|
||||
rust \
|
||||
cargo \
|
||||
sudo
|
||||
|
||||
# ── Create a non-root build user (abuild refuses to run as root) ──
|
||||
echo "--- Setting up build user ---"
|
||||
adduser -D builder
|
||||
addgroup builder abuild
|
||||
echo "builder ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers
|
||||
|
||||
# ── Prepare build tree ──────────────────────────────────────────────
|
||||
echo "--- Preparing build tree ---"
|
||||
BUILD_DIR="/home/builder/btest-rs"
|
||||
mkdir -p "$BUILD_DIR"
|
||||
cp "$ALPINE_DIR/APKBUILD" "$BUILD_DIR/"
|
||||
cp "$ALPINE_DIR/btest.initd" "$BUILD_DIR/"
|
||||
|
||||
# Generate signing key (required by abuild)
|
||||
su builder -c "abuild-keygen -a -n -q"
|
||||
sudo cp /home/builder/.abuild/*.rsa.pub /etc/apk/keys/
|
||||
|
||||
# ── Build the package ──────────────────────────────────────────────
|
||||
echo "--- Building APK ---"
|
||||
cd "$BUILD_DIR"
|
||||
chown -R builder:builder "$BUILD_DIR"
|
||||
su builder -c "abuild -r"
|
||||
|
||||
echo "--- Build succeeded ---"
|
||||
|
||||
# ── Locate and install the package ──────────────────────────────────
|
||||
echo "--- Installing built APK ---"
|
||||
APK_FILE=$(find /home/builder/packages -name "btest-rs-*.apk" -not -name "*doc*" | head -1)
|
||||
if [ -z "$APK_FILE" ]; then
|
||||
echo "FAIL: APK file not found"
|
||||
exit 1
|
||||
fi
|
||||
echo "Found APK: $APK_FILE"
|
||||
apk add --allow-untrusted "$APK_FILE"
|
||||
|
||||
# ── Verify installation ────────────────────────────────────────────
|
||||
echo "--- Verifying installation ---"
|
||||
FAIL=0
|
||||
|
||||
# Binary exists and is executable
|
||||
if command -v btest >/dev/null 2>&1; then
|
||||
echo "PASS: btest binary installed"
|
||||
else
|
||||
echo "FAIL: btest binary not found in PATH"
|
||||
FAIL=1
|
||||
fi
|
||||
|
||||
# Binary runs (show version / help)
|
||||
if btest --help >/dev/null 2>&1; then
|
||||
echo "PASS: btest --help exits successfully"
|
||||
else
|
||||
echo "FAIL: btest --help failed"
|
||||
FAIL=1
|
||||
fi
|
||||
|
||||
# Man page installed
|
||||
if [ -f /usr/share/man/man1/btest.1 ]; then
|
||||
echo "PASS: man page installed"
|
||||
else
|
||||
echo "FAIL: man page not found"
|
||||
FAIL=1
|
||||
fi
|
||||
|
||||
# License installed
|
||||
if [ -f /usr/share/licenses/btest-rs/LICENSE ]; then
|
||||
echo "PASS: LICENSE installed"
|
||||
else
|
||||
echo "FAIL: LICENSE not found"
|
||||
FAIL=1
|
||||
fi
|
||||
|
||||
# OpenRC init script installed
|
||||
if [ -f /etc/init.d/btest ]; then
|
||||
echo "PASS: OpenRC init script installed"
|
||||
else
|
||||
echo "FAIL: OpenRC init script not found"
|
||||
FAIL=1
|
||||
fi
|
||||
|
||||
# Init script is executable
|
||||
if [ -x /etc/init.d/btest ]; then
|
||||
echo "PASS: init script is executable"
|
||||
else
|
||||
echo "FAIL: init script is not executable"
|
||||
FAIL=1
|
||||
fi
|
||||
|
||||
# ── Summary ─────────────────────────────────────────────────────────
|
||||
echo ""
|
||||
if [ "$FAIL" -eq 0 ]; then
|
||||
echo "=== All Alpine packaging tests PASSED ==="
|
||||
else
|
||||
echo "=== Some Alpine packaging tests FAILED ==="
|
||||
exit 1
|
||||
fi
|
||||
15
deploy/aur/.SRCINFO
Normal file
15
deploy/aur/.SRCINFO
Normal file
@@ -0,0 +1,15 @@
|
||||
pkgbase = btest-rs
|
||||
pkgdesc = MikroTik Bandwidth Test (btest) server and client with EC-SRP5 auth
|
||||
pkgver = 0.6.0
|
||||
pkgrel = 1
|
||||
url = https://github.com/manawenuz/btest-rs
|
||||
arch = x86_64
|
||||
arch = aarch64
|
||||
arch = armv7h
|
||||
license = MIT
|
||||
license = Apache-2.0
|
||||
makedepends = cargo
|
||||
source = btest-rs-0.6.0.tar.gz::https://github.com/manawenuz/btest-rs/archive/refs/tags/v0.6.0.tar.gz
|
||||
sha256sums = SKIP
|
||||
|
||||
pkgname = btest-rs
|
||||
58
deploy/aur/PKGBUILD
Normal file
58
deploy/aur/PKGBUILD
Normal file
@@ -0,0 +1,58 @@
|
||||
# Maintainer: Siavash Sameni <manwe at manko dot yoga>
|
||||
pkgname=btest-rs
|
||||
pkgver=0.6.0
|
||||
pkgrel=1
|
||||
pkgdesc="MikroTik Bandwidth Test (btest) server and client with EC-SRP5 auth"
|
||||
arch=('x86_64' 'aarch64' 'armv7h')
|
||||
url="https://github.com/manawenuz/btest-rs"
|
||||
license=('MIT' 'Apache-2.0')
|
||||
depends=()
|
||||
makedepends=('cargo')
|
||||
source=("$pkgname-$pkgver.tar.gz::https://github.com/manawenuz/btest-rs/archive/refs/tags/v$pkgver.tar.gz")
|
||||
sha256sums=('SKIP')
|
||||
|
||||
prepare() {
|
||||
cd "$pkgname-$pkgver"
|
||||
export RUSTUP_TOOLCHAIN=stable
|
||||
cargo fetch --locked --target "$(rustc -vV | sed -n 's/host: //p')"
|
||||
}
|
||||
|
||||
build() {
|
||||
cd "$pkgname-$pkgver"
|
||||
export RUSTUP_TOOLCHAIN=stable
|
||||
export CARGO_TARGET_DIR=target
|
||||
cargo build --frozen --release
|
||||
}
|
||||
|
||||
package() {
|
||||
cd "$pkgname-$pkgver"
|
||||
install -Dm755 "target/release/btest" "$pkgdir/usr/bin/btest"
|
||||
install -Dm644 "LICENSE" "$pkgdir/usr/share/licenses/$pkgname/LICENSE"
|
||||
install -Dm644 "docs/man/btest.1" "$pkgdir/usr/share/man/man1/btest.1"
|
||||
install -Dm644 "README.md" "$pkgdir/usr/share/doc/$pkgname/README.md"
|
||||
|
||||
# systemd service
|
||||
install -Dm644 /dev/stdin "$pkgdir/usr/lib/systemd/system/btest.service" <<EOF
|
||||
[Unit]
|
||||
Description=MikroTik Bandwidth Test Server (btest-rs)
|
||||
After=network-online.target
|
||||
Wants=network-online.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
ExecStart=/usr/bin/btest -s
|
||||
Restart=always
|
||||
RestartSec=5
|
||||
DynamicUser=yes
|
||||
NoNewPrivileges=yes
|
||||
ProtectSystem=strict
|
||||
ProtectHome=yes
|
||||
PrivateTmp=yes
|
||||
AmbientCapabilities=CAP_NET_BIND_SERVICE
|
||||
CapabilityBoundingSet=CAP_NET_BIND_SERVICE
|
||||
LimitNOFILE=65535
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
EOF
|
||||
}
|
||||
40
deploy/aur/test-aur.sh
Executable file
40
deploy/aur/test-aur.sh
Executable file
@@ -0,0 +1,40 @@
|
||||
#!/usr/bin/env bash
|
||||
# Test the PKGBUILD in a Docker Arch Linux container.
|
||||
# Usage: ./deploy/aur/test-aur.sh
|
||||
set -euo pipefail
|
||||
|
||||
cd "$(dirname "$0")/../.."
|
||||
|
||||
echo "=== Testing AUR PKGBUILD in Arch Linux container ==="
|
||||
|
||||
docker run --rm -v "$(pwd):/src:ro" archlinux:latest bash -c '
|
||||
set -euo pipefail
|
||||
|
||||
# Install base-devel and rust
|
||||
pacman -Syu --noconfirm base-devel rustup git
|
||||
rustup default stable
|
||||
|
||||
# Create build user (makepkg refuses to run as root)
|
||||
useradd -m builder
|
||||
echo "builder ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers
|
||||
|
||||
# Copy source and PKGBUILD
|
||||
su builder -c "
|
||||
mkdir -p /tmp/build && cd /tmp/build
|
||||
cp /src/deploy/aur/PKGBUILD .
|
||||
|
||||
# Build the package
|
||||
makepkg -si --noconfirm
|
||||
|
||||
# Verify
|
||||
echo ''
|
||||
echo '=== Installed ==='
|
||||
btest --version
|
||||
btest --help | head -5
|
||||
echo ''
|
||||
echo '=== Files ==='
|
||||
pacman -Ql btest-rs
|
||||
echo ''
|
||||
echo '=== SUCCESS ==='
|
||||
"
|
||||
'
|
||||
208
deploy/deb/build-deb.sh
Executable file
208
deploy/deb/build-deb.sh
Executable file
@@ -0,0 +1,208 @@
|
||||
#!/usr/bin/env bash
|
||||
# build-deb.sh -- Build a Debian/Ubuntu .deb package for btest-rs
|
||||
#
|
||||
# Usage:
|
||||
# ./deploy/deb/build-deb.sh # uses dist/btest or target/release/btest
|
||||
# BTEST_BIN=path/to/btest ./deploy/deb/build-deb.sh
|
||||
#
|
||||
# Requirements: dpkg-deb, gzip (standard on Debian/Ubuntu build hosts)
|
||||
set -euo pipefail
|
||||
|
||||
###############################################################################
|
||||
# Package metadata
|
||||
###############################################################################
|
||||
PKG_NAME="btest-rs"
|
||||
PKG_VERSION="0.6.0"
|
||||
PKG_ARCH="amd64"
|
||||
PKG_MAINTAINER="Siavash Sameni <manwe@manko.yoga>"
|
||||
PKG_DESCRIPTION="MikroTik Bandwidth Test (btest) server and client with EC-SRP5 auth"
|
||||
PKG_HOMEPAGE="https://github.com/manawenuz/btest-rs"
|
||||
PKG_LICENSE="MIT AND Apache-2.0"
|
||||
PKG_SECTION="net"
|
||||
PKG_PRIORITY="optional"
|
||||
|
||||
###############################################################################
|
||||
# Paths
|
||||
###############################################################################
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
REPO_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
|
||||
|
||||
# Locate the pre-built binary
|
||||
if [[ -n "${BTEST_BIN:-}" ]]; then
|
||||
: # caller provided an explicit path
|
||||
elif [[ -f "$REPO_ROOT/dist/btest" ]]; then
|
||||
BTEST_BIN="$REPO_ROOT/dist/btest"
|
||||
elif [[ -f "$REPO_ROOT/target/release/btest" ]]; then
|
||||
BTEST_BIN="$REPO_ROOT/target/release/btest"
|
||||
else
|
||||
echo "Error: cannot find btest binary."
|
||||
echo " Build first (cargo build --release) or set BTEST_BIN=path/to/btest"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Verify the binary exists and is executable
|
||||
if [[ ! -f "$BTEST_BIN" ]]; then
|
||||
echo "Error: $BTEST_BIN does not exist."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "==> Using binary: $BTEST_BIN"
|
||||
|
||||
###############################################################################
|
||||
# Prepare staging tree
|
||||
###############################################################################
|
||||
DEB_FILE="${PKG_NAME}_${PKG_VERSION}_${PKG_ARCH}.deb"
|
||||
STAGE="$(mktemp -d)"
|
||||
trap 'rm -rf "$STAGE"' EXIT
|
||||
|
||||
echo "==> Staging in $STAGE"
|
||||
|
||||
# Binary
|
||||
install -Dm755 "$BTEST_BIN" "$STAGE/usr/bin/btest"
|
||||
|
||||
# Man page
|
||||
if [[ -f "$REPO_ROOT/docs/man/btest.1" ]]; then
|
||||
install -Dm644 "$REPO_ROOT/docs/man/btest.1" "$STAGE/usr/share/man/man1/btest.1"
|
||||
gzip -9n "$STAGE/usr/share/man/man1/btest.1"
|
||||
else
|
||||
echo "Warning: docs/man/btest.1 not found -- skipping man page"
|
||||
fi
|
||||
|
||||
# systemd service unit
|
||||
install -d "$STAGE/usr/lib/systemd/system"
|
||||
cat > "$STAGE/usr/lib/systemd/system/btest.service" <<'UNIT'
|
||||
[Unit]
|
||||
Description=MikroTik Bandwidth Test Server (btest-rs)
|
||||
After=network-online.target
|
||||
Wants=network-online.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
ExecStart=/usr/bin/btest -s
|
||||
Restart=always
|
||||
RestartSec=5
|
||||
DynamicUser=yes
|
||||
NoNewPrivileges=yes
|
||||
ProtectSystem=strict
|
||||
ProtectHome=yes
|
||||
PrivateTmp=yes
|
||||
ProtectKernelTunables=yes
|
||||
ProtectControlGroups=yes
|
||||
AmbientCapabilities=CAP_NET_BIND_SERVICE
|
||||
CapabilityBoundingSet=CAP_NET_BIND_SERVICE
|
||||
LimitNOFILE=65535
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
UNIT
|
||||
|
||||
# Documentation
|
||||
install -Dm644 "$REPO_ROOT/README.md" "$STAGE/usr/share/doc/$PKG_NAME/README.md"
|
||||
|
||||
# License
|
||||
install -Dm644 "$REPO_ROOT/LICENSE" "$STAGE/usr/share/licenses/$PKG_NAME/LICENSE"
|
||||
|
||||
# Debian copyright file (policy-compliant copy in /usr/share/doc)
|
||||
install -d "$STAGE/usr/share/doc/$PKG_NAME"
|
||||
cat > "$STAGE/usr/share/doc/$PKG_NAME/copyright" <<COPY
|
||||
Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
|
||||
Upstream-Name: $PKG_NAME
|
||||
Upstream-Contact: $PKG_MAINTAINER
|
||||
Source: $PKG_HOMEPAGE
|
||||
|
||||
Files: *
|
||||
Copyright: 2024-2026 Siavash Sameni
|
||||
License: MIT AND Apache-2.0
|
||||
COPY
|
||||
|
||||
###############################################################################
|
||||
# Calculate installed size (in KiB, as Debian policy requires)
|
||||
###############################################################################
|
||||
INSTALLED_SIZE=$(du -sk "$STAGE" | cut -f1)
|
||||
|
||||
###############################################################################
|
||||
# DEBIAN/control
|
||||
###############################################################################
|
||||
install -d "$STAGE/DEBIAN"
|
||||
cat > "$STAGE/DEBIAN/control" <<CTRL
|
||||
Package: $PKG_NAME
|
||||
Version: $PKG_VERSION
|
||||
Architecture: $PKG_ARCH
|
||||
Maintainer: $PKG_MAINTAINER
|
||||
Installed-Size: $INSTALLED_SIZE
|
||||
Section: $PKG_SECTION
|
||||
Priority: $PKG_PRIORITY
|
||||
Homepage: $PKG_HOMEPAGE
|
||||
Description: $PKG_DESCRIPTION
|
||||
A high-performance Rust implementation of the MikroTik Bandwidth Test
|
||||
protocol, supporting both server and client modes with EC-SRP5
|
||||
authentication. Supports TCP/UDP throughput testing and is fully
|
||||
compatible with RouterOS btest clients.
|
||||
CTRL
|
||||
|
||||
###############################################################################
|
||||
# DEBIAN/conffiles (mark the systemd unit as a conffile)
|
||||
###############################################################################
|
||||
cat > "$STAGE/DEBIAN/conffiles" <<'CF'
|
||||
/usr/lib/systemd/system/btest.service
|
||||
CF
|
||||
|
||||
###############################################################################
|
||||
# Maintainer scripts
|
||||
###############################################################################
|
||||
|
||||
# postinst -- reload systemd after install
|
||||
cat > "$STAGE/DEBIAN/postinst" <<'POST'
|
||||
#!/bin/sh
|
||||
set -e
|
||||
if [ "$1" = "configure" ]; then
|
||||
if command -v systemctl >/dev/null 2>&1; then
|
||||
systemctl daemon-reload || true
|
||||
echo ""
|
||||
echo "btest-rs installed. To start the server:"
|
||||
echo " sudo systemctl enable --now btest.service"
|
||||
echo ""
|
||||
fi
|
||||
fi
|
||||
POST
|
||||
chmod 755 "$STAGE/DEBIAN/postinst"
|
||||
|
||||
# prerm -- stop service before removal
|
||||
cat > "$STAGE/DEBIAN/prerm" <<'PRERM'
|
||||
#!/bin/sh
|
||||
set -e
|
||||
if [ "$1" = "remove" ] || [ "$1" = "deconfigure" ]; then
|
||||
if command -v systemctl >/dev/null 2>&1; then
|
||||
systemctl stop btest.service 2>/dev/null || true
|
||||
systemctl disable btest.service 2>/dev/null || true
|
||||
fi
|
||||
fi
|
||||
PRERM
|
||||
chmod 755 "$STAGE/DEBIAN/prerm"
|
||||
|
||||
# postrm -- clean up after removal
|
||||
cat > "$STAGE/DEBIAN/postrm" <<'POSTRM'
|
||||
#!/bin/sh
|
||||
set -e
|
||||
if [ "$1" = "purge" ] || [ "$1" = "remove" ]; then
|
||||
if command -v systemctl >/dev/null 2>&1; then
|
||||
systemctl daemon-reload || true
|
||||
fi
|
||||
fi
|
||||
POSTRM
|
||||
chmod 755 "$STAGE/DEBIAN/postrm"
|
||||
|
||||
###############################################################################
|
||||
# Build .deb
|
||||
###############################################################################
|
||||
OUTPUT_DIR="${OUTPUT_DIR:-$REPO_ROOT/dist}"
|
||||
mkdir -p "$OUTPUT_DIR"
|
||||
|
||||
echo "==> Building $DEB_FILE ..."
|
||||
dpkg-deb --root-owner-group --build "$STAGE" "$OUTPUT_DIR/$DEB_FILE"
|
||||
|
||||
echo "==> Package ready: $OUTPUT_DIR/$DEB_FILE"
|
||||
echo ""
|
||||
dpkg-deb --info "$OUTPUT_DIR/$DEB_FILE"
|
||||
echo ""
|
||||
dpkg-deb --contents "$OUTPUT_DIR/$DEB_FILE"
|
||||
104
deploy/deb/test-deb.sh
Executable file
104
deploy/deb/test-deb.sh
Executable file
@@ -0,0 +1,104 @@
|
||||
#!/usr/bin/env bash
|
||||
# test-deb.sh -- Smoke-test a btest-rs .deb inside an Ubuntu Docker container
|
||||
#
|
||||
# Usage:
|
||||
# ./deploy/deb/test-deb.sh # auto-finds dist/*.deb
|
||||
# ./deploy/deb/test-deb.sh path/to/btest-rs_*.deb
|
||||
#
|
||||
# Requirements: docker
|
||||
set -euo pipefail
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
REPO_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
|
||||
IMAGE="${TEST_IMAGE:-ubuntu:24.04}"
|
||||
|
||||
###############################################################################
|
||||
# Locate the .deb
|
||||
###############################################################################
|
||||
if [[ -n "${1:-}" ]]; then
|
||||
DEB_PATH="$1"
|
||||
else
|
||||
DEB_PATH="$(ls -1t "$REPO_ROOT"/dist/btest-rs_*.deb 2>/dev/null | head -1 || true)"
|
||||
fi
|
||||
|
||||
if [[ -z "$DEB_PATH" || ! -f "$DEB_PATH" ]]; then
|
||||
echo "Error: no .deb file found."
|
||||
echo " Build first: ./deploy/deb/build-deb.sh"
|
||||
echo " Or pass path: $0 path/to/btest-rs_*.deb"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
DEB_FILE="$(basename "$DEB_PATH")"
|
||||
DEB_DIR="$(cd "$(dirname "$DEB_PATH")" && pwd)"
|
||||
|
||||
echo "==> Testing $DEB_FILE in $IMAGE"
|
||||
echo ""
|
||||
|
||||
###############################################################################
|
||||
# Run tests inside a disposable container
|
||||
###############################################################################
|
||||
docker run --rm \
|
||||
-v "$DEB_DIR/$DEB_FILE:/tmp/$DEB_FILE:ro" \
|
||||
"$IMAGE" \
|
||||
bash -euxc "
|
||||
###################################################################
|
||||
# 1. Install the .deb
|
||||
###################################################################
|
||||
apt-get update -qq
|
||||
dpkg -i /tmp/$DEB_FILE || apt-get install -f -y # resolve deps if any
|
||||
|
||||
###################################################################
|
||||
# 2. Verify files are in place
|
||||
###################################################################
|
||||
echo '--- Checking installed files ---'
|
||||
test -x /usr/bin/btest
|
||||
test -f /usr/lib/systemd/system/btest.service
|
||||
test -f /usr/share/doc/btest-rs/README.md
|
||||
test -f /usr/share/licenses/btest-rs/LICENSE
|
||||
|
||||
# Man page (may be gzipped)
|
||||
test -f /usr/share/man/man1/btest.1.gz || test -f /usr/share/man/man1/btest.1
|
||||
echo 'All expected files present.'
|
||||
|
||||
###################################################################
|
||||
# 3. btest --version
|
||||
###################################################################
|
||||
echo ''
|
||||
echo '--- btest --version ---'
|
||||
btest --version
|
||||
|
||||
###################################################################
|
||||
# 4. Quick loopback server+client test
|
||||
###################################################################
|
||||
echo ''
|
||||
echo '--- Loopback smoke test ---'
|
||||
|
||||
# Start server in background
|
||||
btest -s &
|
||||
SERVER_PID=\$!
|
||||
sleep 1
|
||||
|
||||
# Run a short TCP test against localhost
|
||||
if btest -c 127.0.0.1 -d 2 2>&1; then
|
||||
echo 'Loopback TCP test passed.'
|
||||
else
|
||||
echo 'Warning: loopback test returned non-zero (may be expected in container).'
|
||||
fi
|
||||
|
||||
# Tear down
|
||||
kill \$SERVER_PID 2>/dev/null || true
|
||||
wait \$SERVER_PID 2>/dev/null || true
|
||||
|
||||
###################################################################
|
||||
# 5. Package metadata sanity
|
||||
###################################################################
|
||||
echo ''
|
||||
echo '--- dpkg metadata ---'
|
||||
dpkg -s btest-rs | head -20
|
||||
|
||||
echo ''
|
||||
echo '=== All tests passed ==='
|
||||
"
|
||||
|
||||
echo ""
|
||||
echo "==> .deb smoke test completed successfully."
|
||||
57
deploy/openwrt/Makefile
Normal file
57
deploy/openwrt/Makefile
Normal file
@@ -0,0 +1,57 @@
|
||||
# OpenWrt package Makefile for btest-rs
|
||||
#
|
||||
# To build:
|
||||
# 1. Clone the OpenWrt SDK for your target
|
||||
# 2. Copy this directory to package/btest-rs/ in the SDK
|
||||
# 3. Run: make package/btest-rs/compile V=s
|
||||
#
|
||||
# Or use the pre-built binary approach (see build-ipk.sh)
|
||||
|
||||
include $(TOPDIR)/rules.mk
|
||||
|
||||
PKG_NAME:=btest-rs
|
||||
PKG_VERSION:=0.6.0
|
||||
PKG_RELEASE:=1
|
||||
|
||||
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz
|
||||
PKG_SOURCE_URL:=https://github.com/manawenuz/btest-rs/archive/refs/tags/v$(PKG_VERSION).tar.gz
|
||||
PKG_HASH:=skip
|
||||
|
||||
PKG_BUILD_DEPENDS:=rust/host
|
||||
PKG_BUILD_DIR:=$(BUILD_DIR)/$(PKG_NAME)-$(PKG_VERSION)
|
||||
|
||||
include $(INCLUDE_DIR)/package.mk
|
||||
|
||||
define Package/btest-rs
|
||||
SECTION:=net
|
||||
CATEGORY:=Network
|
||||
TITLE:=MikroTik Bandwidth Test server and client
|
||||
URL:=https://github.com/manawenuz/btest-rs
|
||||
DEPENDS:=
|
||||
PKGARCH:=$(ARCH)
|
||||
endef
|
||||
|
||||
define Package/btest-rs/description
|
||||
A Rust reimplementation of the MikroTik Bandwidth Test (btest) protocol.
|
||||
Supports TCP/UDP, IPv4/IPv6, EC-SRP5 and MD5 authentication,
|
||||
multi-connection, syslog, CSV output, and CPU monitoring.
|
||||
endef
|
||||
|
||||
define Build/Compile
|
||||
cd $(PKG_BUILD_DIR) && \
|
||||
CARGO_TARGET_DIR=$(PKG_BUILD_DIR)/target \
|
||||
cargo build --release --target $(RUSTC_TARGET)
|
||||
endef
|
||||
|
||||
define Package/btest-rs/install
|
||||
$(INSTALL_DIR) $(1)/usr/bin
|
||||
$(INSTALL_BIN) $(PKG_BUILD_DIR)/target/$(RUSTC_TARGET)/release/btest $(1)/usr/bin/btest
|
||||
|
||||
$(INSTALL_DIR) $(1)/etc/init.d
|
||||
$(INSTALL_BIN) ./files/btest.init $(1)/etc/init.d/btest
|
||||
|
||||
$(INSTALL_DIR) $(1)/etc/config
|
||||
$(INSTALL_CONF) ./files/btest.config $(1)/etc/config/btest
|
||||
endef
|
||||
|
||||
$(eval $(call BuildPackage,btest-rs))
|
||||
117
deploy/openwrt/build-ipk.sh
Executable file
117
deploy/openwrt/build-ipk.sh
Executable file
@@ -0,0 +1,117 @@
|
||||
#!/usr/bin/env bash
|
||||
# Build an OpenWrt .ipk package from a pre-built static binary.
|
||||
# No OpenWrt SDK needed — just packages the binary with metadata.
|
||||
#
|
||||
# Usage:
|
||||
# ./deploy/openwrt/build-ipk.sh <arch> [binary-path]
|
||||
#
|
||||
# Examples:
|
||||
# ./deploy/openwrt/build-ipk.sh x86_64 dist/btest # from cross-compiled binary
|
||||
# ./deploy/openwrt/build-ipk.sh aarch64 dist/btest # for RPi/ARM64 routers
|
||||
# ./deploy/openwrt/build-ipk.sh mipsel target/release/btest # for MIPS little-endian
|
||||
#
|
||||
# Supported architectures: x86_64, aarch64, arm_cortex-a7, mipsel_24kc, mips_24kc
|
||||
set -euo pipefail
|
||||
|
||||
cd "$(dirname "$0")/../.."
|
||||
|
||||
ARCH="${1:?Usage: $0 <arch> [binary-path]}"
|
||||
BINARY="${2:-dist/btest}"
|
||||
VERSION="0.6.0"
|
||||
PKG_NAME="btest-rs"
|
||||
OUTPUT_DIR="dist"
|
||||
|
||||
if [ ! -f "$BINARY" ]; then
|
||||
echo "Error: binary not found at $BINARY"
|
||||
echo "Build it first: cargo build --release --target <target>"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
mkdir -p "$OUTPUT_DIR"
|
||||
WORKDIR=$(mktemp -d)
|
||||
trap "rm -rf $WORKDIR" EXIT
|
||||
|
||||
echo "=== Building ${PKG_NAME}_${VERSION}_${ARCH}.ipk ==="
|
||||
|
||||
# Create package structure
|
||||
mkdir -p "$WORKDIR/data/usr/bin"
|
||||
mkdir -p "$WORKDIR/data/etc/init.d"
|
||||
mkdir -p "$WORKDIR/data/etc/config"
|
||||
mkdir -p "$WORKDIR/control"
|
||||
|
||||
# Install files
|
||||
cp "$BINARY" "$WORKDIR/data/usr/bin/btest"
|
||||
chmod 755 "$WORKDIR/data/usr/bin/btest"
|
||||
cp deploy/openwrt/files/btest.init "$WORKDIR/data/etc/init.d/btest"
|
||||
chmod 755 "$WORKDIR/data/etc/init.d/btest"
|
||||
cp deploy/openwrt/files/btest.config "$WORKDIR/data/etc/config/btest"
|
||||
|
||||
# Calculate installed size
|
||||
INSTALLED_SIZE=$(du -sk "$WORKDIR/data" | awk '{print $1}')
|
||||
|
||||
# Control file
|
||||
cat > "$WORKDIR/control/control" << EOF
|
||||
Package: ${PKG_NAME}
|
||||
Version: ${VERSION}-1
|
||||
Depends: libc
|
||||
Source: https://github.com/manawenuz/btest-rs
|
||||
License: MIT AND Apache-2.0
|
||||
Section: net
|
||||
SourceName: ${PKG_NAME}
|
||||
Maintainer: Siavash Sameni <manwe@manko.yoga>
|
||||
Architecture: ${ARCH}
|
||||
Installed-Size: ${INSTALLED_SIZE}
|
||||
Description: MikroTik Bandwidth Test server and client
|
||||
A Rust reimplementation of the MikroTik btest protocol.
|
||||
Supports TCP/UDP, EC-SRP5 and MD5 auth, IPv4/IPv6.
|
||||
EOF
|
||||
|
||||
# Post-install script
|
||||
cat > "$WORKDIR/control/postinst" << 'EOF'
|
||||
#!/bin/sh
|
||||
[ "${IPKG_NO_SCRIPT}" = "1" ] && exit 0
|
||||
/etc/init.d/btest enable 2>/dev/null || true
|
||||
exit 0
|
||||
EOF
|
||||
chmod 755 "$WORKDIR/control/postinst"
|
||||
|
||||
# Pre-remove script
|
||||
cat > "$WORKDIR/control/prerm" << 'EOF'
|
||||
#!/bin/sh
|
||||
/etc/init.d/btest stop 2>/dev/null || true
|
||||
/etc/init.d/btest disable 2>/dev/null || true
|
||||
exit 0
|
||||
EOF
|
||||
chmod 755 "$WORKDIR/control/prerm"
|
||||
|
||||
# Conffiles
|
||||
cat > "$WORKDIR/control/conffiles" << EOF
|
||||
/etc/config/btest
|
||||
EOF
|
||||
|
||||
# Build the .ipk (it's just a tar.gz of tar.gz's)
|
||||
cd "$WORKDIR"
|
||||
|
||||
# Create data.tar.gz
|
||||
(cd data && tar czf ../data.tar.gz .)
|
||||
|
||||
# Create control.tar.gz
|
||||
(cd control && tar czf ../control.tar.gz .)
|
||||
|
||||
# Create debian-binary
|
||||
echo "2.0" > debian-binary
|
||||
|
||||
# Package it all
|
||||
tar czf "${PKG_NAME}_${VERSION}-1_${ARCH}.ipk" debian-binary control.tar.gz data.tar.gz
|
||||
|
||||
cd -
|
||||
cp "$WORKDIR/${PKG_NAME}_${VERSION}-1_${ARCH}.ipk" "$OUTPUT_DIR/"
|
||||
|
||||
echo ""
|
||||
echo "Package: $OUTPUT_DIR/${PKG_NAME}_${VERSION}-1_${ARCH}.ipk"
|
||||
ls -lh "$OUTPUT_DIR/${PKG_NAME}_${VERSION}-1_${ARCH}.ipk"
|
||||
echo ""
|
||||
echo "Install on OpenWrt:"
|
||||
echo " scp $OUTPUT_DIR/${PKG_NAME}_${VERSION}-1_${ARCH}.ipk root@router:/tmp/"
|
||||
echo " ssh root@router 'opkg install /tmp/${PKG_NAME}_${VERSION}-1_${ARCH}.ipk'"
|
||||
echo " ssh root@router '/etc/init.d/btest enable && /etc/init.d/btest start'"
|
||||
7
deploy/openwrt/files/btest.config
Normal file
7
deploy/openwrt/files/btest.config
Normal file
@@ -0,0 +1,7 @@
|
||||
config server
|
||||
option enabled '0'
|
||||
option port '2000'
|
||||
option auth_user ''
|
||||
option auth_pass ''
|
||||
option ecsrp5 '0'
|
||||
option syslog ''
|
||||
34
deploy/openwrt/files/btest.init
Executable file
34
deploy/openwrt/files/btest.init
Executable file
@@ -0,0 +1,34 @@
|
||||
#!/bin/sh /etc/rc.common
|
||||
# btest-rs OpenWrt init script
|
||||
|
||||
START=90
|
||||
STOP=10
|
||||
|
||||
USE_PROCD=1
|
||||
|
||||
start_service() {
|
||||
local enabled port auth_user auth_pass ecsrp5 syslog
|
||||
|
||||
config_load btest
|
||||
config_get_bool enabled server enabled 0
|
||||
[ "$enabled" -eq 0 ] && return
|
||||
|
||||
config_get port server port 2000
|
||||
config_get auth_user server auth_user ''
|
||||
config_get auth_pass server auth_pass ''
|
||||
config_get_bool ecsrp5 server ecsrp5 0
|
||||
config_get syslog server syslog ''
|
||||
|
||||
procd_open_instance
|
||||
procd_set_param command /usr/bin/btest -s -P "$port"
|
||||
|
||||
[ -n "$auth_user" ] && procd_append_param command -a "$auth_user"
|
||||
[ -n "$auth_pass" ] && procd_append_param command -p "$auth_pass"
|
||||
[ "$ecsrp5" -eq 1 ] && procd_append_param command --ecsrp5
|
||||
[ -n "$syslog" ] && procd_append_param command --syslog "$syslog"
|
||||
|
||||
procd_set_param respawn
|
||||
procd_set_param stdout 1
|
||||
procd_set_param stderr 1
|
||||
procd_close_instance
|
||||
}
|
||||
73
deploy/rpm/btest-rs.spec
Normal file
73
deploy/rpm/btest-rs.spec
Normal file
@@ -0,0 +1,73 @@
|
||||
Name: btest-rs
|
||||
Version: 0.6.0
|
||||
Release: 1%{?dist}
|
||||
Summary: MikroTik Bandwidth Test (btest) server and client with EC-SRP5 auth
|
||||
|
||||
License: MIT AND Apache-2.0
|
||||
URL: https://github.com/manawenuz/btest-rs
|
||||
Source0: https://github.com/manawenuz/btest-rs/archive/refs/tags/v%{version}.tar.gz
|
||||
|
||||
BuildRequires: cargo
|
||||
BuildRequires: rust
|
||||
ExclusiveArch: x86_64 aarch64
|
||||
|
||||
%description
|
||||
A Rust reimplementation of the MikroTik Bandwidth Test (btest) protocol,
|
||||
providing both server and client functionality with EC-SRP5 authentication.
|
||||
|
||||
%prep
|
||||
%autosetup -n %{name}-%{version}
|
||||
|
||||
%build
|
||||
export CARGO_TARGET_DIR=target
|
||||
cargo build --release
|
||||
|
||||
%install
|
||||
install -Dm755 target/release/btest %{buildroot}%{_bindir}/btest
|
||||
install -Dm644 docs/man/btest.1 %{buildroot}%{_mandir}/man1/btest.1
|
||||
install -Dm644 LICENSE %{buildroot}%{_datadir}/licenses/%{name}/LICENSE
|
||||
|
||||
# systemd service unit
|
||||
install -d %{buildroot}%{_unitdir}
|
||||
cat > %{buildroot}%{_unitdir}/btest.service << 'EOF'
|
||||
[Unit]
|
||||
Description=MikroTik Bandwidth Test Server (btest-rs)
|
||||
After=network-online.target
|
||||
Wants=network-online.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
ExecStart=/usr/bin/btest -s
|
||||
Restart=always
|
||||
RestartSec=5
|
||||
DynamicUser=yes
|
||||
NoNewPrivileges=yes
|
||||
ProtectSystem=strict
|
||||
ProtectHome=yes
|
||||
PrivateTmp=yes
|
||||
AmbientCapabilities=CAP_NET_BIND_SERVICE
|
||||
CapabilityBoundingSet=CAP_NET_BIND_SERVICE
|
||||
LimitNOFILE=65535
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
EOF
|
||||
|
||||
%files
|
||||
%license LICENSE
|
||||
%{_bindir}/btest
|
||||
%{_mandir}/man1/btest.1*
|
||||
%{_unitdir}/btest.service
|
||||
|
||||
%post
|
||||
%systemd_post btest.service
|
||||
|
||||
%preun
|
||||
%systemd_preun btest.service
|
||||
|
||||
%postun
|
||||
%systemd_postun_with_restart btest.service
|
||||
|
||||
%changelog
|
||||
* Mon Mar 30 2026 Siavash Sameni <manwe@manko.yoga> - 0.6.0-1
|
||||
- Initial RPM package
|
||||
30
deploy/rpm/build-rpm.sh
Executable file
30
deploy/rpm/build-rpm.sh
Executable file
@@ -0,0 +1,30 @@
|
||||
#!/usr/bin/env bash
|
||||
# build-rpm.sh — Build the btest-rs RPM package
|
||||
set -euo pipefail
|
||||
|
||||
SPEC_DIR="$(cd "$(dirname "$0")" && pwd)"
|
||||
SPEC_FILE="${SPEC_DIR}/btest-rs.spec"
|
||||
VERSION="0.6.0"
|
||||
TARBALL="v${VERSION}.tar.gz"
|
||||
SOURCE_URL="https://github.com/manawenuz/btest-rs/archive/refs/tags/${TARBALL}"
|
||||
|
||||
echo "==> Setting up rpmbuild tree"
|
||||
mkdir -p ~/rpmbuild/{BUILD,RPMS,SOURCES,SPECS,SRPMS}
|
||||
|
||||
echo "==> Downloading source tarball"
|
||||
if [ ! -f ~/rpmbuild/SOURCES/"${TARBALL}" ]; then
|
||||
curl -fSL -o ~/rpmbuild/SOURCES/"${TARBALL}" "${SOURCE_URL}"
|
||||
else
|
||||
echo " (already present, skipping download)"
|
||||
fi
|
||||
|
||||
echo "==> Copying spec file"
|
||||
cp "${SPEC_FILE}" ~/rpmbuild/SPECS/btest-rs.spec
|
||||
|
||||
echo "==> Building RPM"
|
||||
rpmbuild -ba ~/rpmbuild/SPECS/btest-rs.spec
|
||||
|
||||
echo ""
|
||||
echo "==> Build complete. Packages:"
|
||||
find ~/rpmbuild/RPMS -name '*.rpm' -print
|
||||
find ~/rpmbuild/SRPMS -name '*.rpm' -print
|
||||
75
deploy/rpm/test-rpm.sh
Executable file
75
deploy/rpm/test-rpm.sh
Executable file
@@ -0,0 +1,75 @@
|
||||
#!/usr/bin/env bash
|
||||
# test-rpm.sh — Test the btest-rs RPM build inside a Fedora container
|
||||
set -euo pipefail
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
|
||||
REPO_ROOT="$(cd "${SCRIPT_DIR}/../.." && pwd)"
|
||||
|
||||
IMAGE="fedora:latest"
|
||||
|
||||
echo "==> Testing RPM build in ${IMAGE}"
|
||||
docker run --rm \
|
||||
-v "${REPO_ROOT}:/workspace:ro" \
|
||||
"${IMAGE}" \
|
||||
bash -euxc '
|
||||
# ── Install build dependencies ──
|
||||
dnf install -y rpm-build rpmdevtools curl gcc make \
|
||||
systemd-rpm-macros
|
||||
|
||||
# Install Rust toolchain
|
||||
curl --proto "=https" --tlsv1.2 -sSf https://sh.rustup.rs \
|
||||
| sh -s -- -y --profile minimal
|
||||
source "$HOME/.cargo/env"
|
||||
|
||||
# ── Set up rpmbuild tree ──
|
||||
rpmdev-setuptree
|
||||
|
||||
VERSION="0.6.0"
|
||||
TARBALL="v${VERSION}.tar.gz"
|
||||
|
||||
# Copy spec
|
||||
cp /workspace/deploy/rpm/btest-rs.spec ~/rpmbuild/SPECS/
|
||||
|
||||
# Create source tarball from workspace
|
||||
# rpmbuild expects btest-rs-VERSION/ top-level directory
|
||||
mkdir -p /tmp/btest-rs-${VERSION}
|
||||
cp -a /workspace/. /tmp/btest-rs-${VERSION}/
|
||||
tar czf ~/rpmbuild/SOURCES/${TARBALL} -C /tmp btest-rs-${VERSION}
|
||||
|
||||
# ── Build RPM ──
|
||||
rpmbuild -ba ~/rpmbuild/SPECS/btest-rs.spec
|
||||
|
||||
# ── Install the RPM ──
|
||||
RPM=$(find ~/rpmbuild/RPMS -name "btest-rs-*.rpm" | head -1)
|
||||
echo "Installing: ${RPM}"
|
||||
dnf install -y "${RPM}"
|
||||
|
||||
# ── Verify installation ──
|
||||
echo "--- btest --version ---"
|
||||
btest --version
|
||||
|
||||
echo "--- Checking systemd unit ---"
|
||||
systemctl cat btest.service || true
|
||||
|
||||
echo "--- Checking man page ---"
|
||||
test -f /usr/share/man/man1/btest.1* && echo "man page OK" || echo "man page MISSING"
|
||||
|
||||
echo "--- Checking license ---"
|
||||
test -f /usr/share/licenses/btest-rs/LICENSE && echo "license OK" || echo "license MISSING"
|
||||
|
||||
# ── Loopback bandwidth test ──
|
||||
echo "--- Starting loopback test ---"
|
||||
btest -s &
|
||||
SERVER_PID=$!
|
||||
sleep 2
|
||||
|
||||
btest -c 127.0.0.1 --duration 3 && echo "Loopback test PASSED" \
|
||||
|| echo "Loopback test FAILED (exit $?)"
|
||||
|
||||
kill "${SERVER_PID}" 2>/dev/null || true
|
||||
wait "${SERVER_PID}" 2>/dev/null || true
|
||||
|
||||
echo "==> All RPM tests completed."
|
||||
'
|
||||
|
||||
echo "==> Fedora container test finished."
|
||||
@@ -1,7 +1,7 @@
|
||||
services:
|
||||
btest-server:
|
||||
build: .
|
||||
image: git.manko.yoga/manawenuz/btest-rs:latest
|
||||
image: ghcr.io/manawenuz/btest-rs:latest
|
||||
container_name: btest-server
|
||||
ports:
|
||||
- "2000:2000/tcp"
|
||||
@@ -13,7 +13,7 @@ services:
|
||||
# Server with authentication enabled
|
||||
btest-server-auth:
|
||||
build: .
|
||||
image: git.manko.yoga/manawenuz/btest-rs:latest
|
||||
image: ghcr.io/manawenuz/btest-rs:latest
|
||||
container_name: btest-server-auth
|
||||
ports:
|
||||
- "2010:2000/tcp"
|
||||
|
||||
@@ -2,282 +2,181 @@
|
||||
|
||||
## Overview
|
||||
|
||||
btest-rs is a Rust reimplementation of the MikroTik Bandwidth Test protocol. It operates in two modes: **server** (accepts connections from MikroTik devices) and **client** (connects to MikroTik btest servers).
|
||||
btest-rs is a Rust reimplementation of the MikroTik Bandwidth Test protocol. It operates in two modes: **server** (accepts connections from MikroTik devices) and **client** (connects to MikroTik btest servers). An optional **server-pro** mode adds multi-user support, quotas, and a web dashboard.
|
||||
|
||||
## Module Structure
|
||||
|
||||
```mermaid
|
||||
graph TB
|
||||
main["main.rs<br/>CLI parsing (clap)"]
|
||||
server["server.rs<br/>Server mode"]
|
||||
client["client.rs<br/>Client mode"]
|
||||
protocol["protocol.rs<br/>Wire protocol types"]
|
||||
auth["auth.rs<br/>MD5 authentication"]
|
||||
ecsrp5["ecsrp5.rs<br/>EC-SRP5 authentication<br/>(Curve25519 Weierstrass)"]
|
||||
bandwidth["bandwidth.rs<br/>Rate control & reporting"]
|
||||
csv_output["csv_output.rs<br/>CSV result logging"]
|
||||
syslog["syslog_logger.rs<br/>Remote syslog (RFC 3164)"]
|
||||
lib["lib.rs<br/>Public API for tests"]
|
||||
```
|
||||
src/
|
||||
├── main.rs # CLI entry point, argument parsing (clap)
|
||||
├── lib.rs # Public API (re-exports all modules for tests/pro)
|
||||
├── protocol.rs # Wire format: Command, StatusMessage, constants
|
||||
├── auth.rs # MD5 challenge-response authentication
|
||||
├── ecsrp5.rs # EC-SRP5 authentication (Curve25519 Weierstrass)
|
||||
├── server.rs # Server mode: listener, TCP/UDP handlers, multi-conn
|
||||
├── client.rs # Client mode: connector, TCP/UDP handlers, status parsing
|
||||
├── bandwidth.rs # Rate limiting, formatting, shared BandwidthState, byte budget
|
||||
├── cpu.rs # CPU sampler (macOS, Linux, Android, Windows, FreeBSD)
|
||||
├── csv_output.rs # CSV result logging (append-mode, auto-header)
|
||||
├── syslog_logger.rs # Remote syslog sender (RFC 3164 / BSD format)
|
||||
├── bin/
|
||||
│ ├── client_only.rs # Stripped client binary for embedded/OpenWrt
|
||||
│ └── server_only.rs # Stripped server binary for embedded/OpenWrt
|
||||
└── server_pro/ # Optional (--features pro)
|
||||
├── main.rs # Pro CLI: user management, quota flags, web port
|
||||
├── server_loop.rs # Accept loop with auth, quotas, multi-conn sessions
|
||||
├── user_db.rs # SQLite: users, usage, ip_usage, sessions, intervals
|
||||
├── quota.rs # QuotaManager: per-user + per-IP limits, remaining_budget()
|
||||
├── enforcer.rs # QuotaEnforcer: periodic checks, max_duration, StopReason
|
||||
├── ldap_auth.rs # LDAP auth scaffold (not yet wired)
|
||||
└── web/
|
||||
└── mod.rs # Axum web dashboard: Chart.js, quota bars, JSON export
|
||||
```
|
||||
|
||||
main --> server
|
||||
main --> client
|
||||
main --> bandwidth
|
||||
main --> csv_output
|
||||
main --> syslog
|
||||
server --> protocol
|
||||
server --> auth
|
||||
server --> ecsrp5
|
||||
server --> bandwidth
|
||||
server --> syslog
|
||||
client --> protocol
|
||||
client --> auth
|
||||
client --> ecsrp5
|
||||
client --> bandwidth
|
||||
lib --> server
|
||||
lib --> client
|
||||
lib --> protocol
|
||||
lib --> auth
|
||||
lib --> ecsrp5
|
||||
lib --> bandwidth
|
||||
## CLI Output Format
|
||||
|
||||
The client outputs one line per second per direction:
|
||||
|
||||
```
|
||||
[ 5] TX 285.47 Mbps (35684352 bytes) cpu: 20%/62%
|
||||
[ 5] RX 283.64 Mbps (35454988 bytes) cpu: 20%/62% lost: 12
|
||||
```
|
||||
|
||||
Format: `[interval] direction speed (bytes) cpu: local%/remote% [lost: N]`
|
||||
|
||||
At test end, a summary line:
|
||||
```
|
||||
TEST_END peer=172.16.81.1 proto=TCP dir=both duration=60s tx_avg=284.94Mbps rx_avg=272.83Mbps tx_bytes=2137030656 rx_bytes=2046260728 lost=0
|
||||
```
|
||||
|
||||
## Data Flow
|
||||
|
||||
### Server Mode (MikroTik connects to us)
|
||||
|
||||
```mermaid
|
||||
sequenceDiagram
|
||||
participant MK as MikroTik Client
|
||||
participant TCP as TCP Control<br/>(port 2000)
|
||||
participant SRV as btest-rs Server
|
||||
participant UDP as UDP Data<br/>(port 2001+)
|
||||
|
||||
MK->>TCP: Connect
|
||||
SRV->>TCP: HELLO [01 00 00 00]
|
||||
MK->>TCP: Command [16 bytes]
|
||||
Note over SRV: Parse proto, direction,<br/>tx_size, speeds
|
||||
|
||||
alt No auth configured
|
||||
SRV->>TCP: AUTH_OK [01 00 00 00]
|
||||
else MD5 auth (RouterOS < 6.43)
|
||||
SRV->>TCP: AUTH_REQUIRED [02 00 00 00]
|
||||
SRV->>TCP: Challenge [16 random bytes]
|
||||
MK->>TCP: Response [16 hash + 32 username]
|
||||
Note over SRV: Verify MD5(pass + MD5(pass + challenge))
|
||||
SRV->>TCP: AUTH_OK or AUTH_FAILED
|
||||
else EC-SRP5 auth (RouterOS >= 6.43, --ecsrp5 flag)
|
||||
SRV->>TCP: EC-SRP5 [03 00 00 00]
|
||||
MK->>TCP: [len][username\0][client_pubkey:32][parity:1]
|
||||
SRV->>TCP: [len][server_pubkey:32][parity:1][salt:16]
|
||||
MK->>TCP: [len][client_confirmation:32]
|
||||
SRV->>TCP: [len][server_confirmation:32]
|
||||
Note over SRV: Curve25519 Weierstrass EC-SRP5<br/>See docs/ecsrp5-research.md
|
||||
SRV->>TCP: AUTH_OK [01 00 00 00]
|
||||
end
|
||||
|
||||
alt TCP mode
|
||||
Note over SRV,MK: Data flows on same TCP connection
|
||||
loop Every second
|
||||
SRV-->>SRV: Print bandwidth stats
|
||||
end
|
||||
else UDP mode
|
||||
SRV->>TCP: UDP port [2 bytes BE]
|
||||
Note over SRV: Bind UDP socket
|
||||
par TX Thread (if server transmits)
|
||||
loop Continuous
|
||||
SRV->>UDP: Data packets [seq + payload]
|
||||
end
|
||||
and RX Thread (if server receives)
|
||||
loop Continuous
|
||||
UDP->>SRV: Data packets [seq + payload]
|
||||
end
|
||||
and Status Loop (TCP control)
|
||||
loop Every 1 second
|
||||
MK->>TCP: Status [12 bytes]
|
||||
SRV->>TCP: Status [12 bytes]
|
||||
Note over SRV: Adjust TX speed<br/>based on client feedback
|
||||
end
|
||||
end
|
||||
end
|
||||
```
|
||||
MikroTik → TCP:2000 → HELLO → Command [16 bytes] → Auth → Data Transfer
|
||||
```
|
||||
|
||||
1. Server sends HELLO `[01 00 00 00]`
|
||||
2. Client sends 16-byte command (protocol, direction, tx_size, speeds, conn_count)
|
||||
3. Auth: none (`01`), MD5 (`02`), or EC-SRP5 (`03`)
|
||||
4. TCP: data flows on same connection, 12-byte status messages interleaved every 1s
|
||||
5. UDP: server sends port number, data on UDP, status exchange stays on TCP
|
||||
|
||||
### Client Mode (we connect to MikroTik)
|
||||
|
||||
```mermaid
|
||||
sequenceDiagram
|
||||
participant CLI as btest-rs Client
|
||||
participant TCP as TCP Control
|
||||
participant MK as MikroTik Server
|
||||
1. Connect to MikroTik:2000
|
||||
2. Read HELLO, send command
|
||||
3. Auto-detect auth type from response byte, authenticate
|
||||
4. Start data transfer with status exchange
|
||||
|
||||
CLI->>TCP: Connect to MikroTik:2000
|
||||
MK->>TCP: HELLO
|
||||
CLI->>TCP: Command [16 bytes]
|
||||
Note over CLI: direction bits tell server<br/>what to do (TX/RX/BOTH)
|
||||
### Status Message Format (12 bytes)
|
||||
|
||||
alt Auth response 01 (no auth)
|
||||
Note over CLI: No auth, proceed
|
||||
else Auth response 02 (MD5)
|
||||
MK->>TCP: Challenge [16 random bytes]
|
||||
CLI->>TCP: MD5 response [48 bytes]
|
||||
MK->>TCP: AUTH_OK
|
||||
else Auth response 03 (EC-SRP5)
|
||||
CLI->>TCP: [len][username\0][client_pubkey:32][parity:1]
|
||||
MK->>TCP: [len][server_pubkey:32][parity:1][salt:16]
|
||||
CLI->>TCP: [len][client_confirmation:32]
|
||||
MK->>TCP: [len][server_confirmation:32]
|
||||
MK->>TCP: AUTH_OK
|
||||
end
|
||||
|
||||
Note over CLI,MK: Data transfer begins<br/>(TCP or UDP, same as server)
|
||||
```
|
||||
[0x07][cpu:1][pad:2][seq:4 LE][bytes_received:4 LE]
|
||||
```
|
||||
|
||||
- Byte 0: `0x07` (STATUS_MSG_TYPE)
|
||||
- Byte 1: `0x80 | cpu_percentage` (MikroTik encoding)
|
||||
- Bytes 4-7: sequence number (little-endian u32)
|
||||
- Bytes 8-11: bytes received this interval (little-endian u32)
|
||||
|
||||
## Threading Model
|
||||
|
||||
```mermaid
|
||||
graph TB
|
||||
subgraph "Server Process"
|
||||
LISTEN["Main Loop<br/>Accept connections"]
|
||||
LISTEN -->|spawn per client| HANDLER
|
||||
All I/O is async via tokio. Per-client:
|
||||
- **TX task**: sends data packets at target rate
|
||||
- **RX task**: receives data, counts bytes, extracts status messages (TCP BOTH mode)
|
||||
- **Status loop**: exchanges 12-byte status messages every 1s, prints bandwidth
|
||||
- **Status reader** (TCP TX-only): reads server's status messages for remote CPU
|
||||
|
||||
subgraph "Per-Client Tasks (tokio)"
|
||||
HANDLER["Connection Handler<br/>Handshake + Auth"]
|
||||
HANDLER --> TX["TX Task<br/>Send data packets"]
|
||||
HANDLER --> RX["RX Task<br/>Receive data packets"]
|
||||
HANDLER --> STATUS["Status Loop<br/>Exchange stats every 1s"]
|
||||
end
|
||||
end
|
||||
Shared state via `Arc<BandwidthState>` with atomic counters — no mutexes.
|
||||
|
||||
subgraph "Shared State (Arc + Atomics)"
|
||||
STATE["BandwidthState"]
|
||||
TX_BYTES["tx_bytes: AtomicU64"]
|
||||
RX_BYTES["rx_bytes: AtomicU64"]
|
||||
TX_SPEED["tx_speed: AtomicU32"]
|
||||
RUNNING["running: AtomicBool"]
|
||||
end
|
||||
### BandwidthState Fields
|
||||
|
||||
| Field | Type | Purpose |
|
||||
|-------|------|---------|
|
||||
| `tx_bytes` | AtomicU64 | Bytes sent this interval (reset by swap) |
|
||||
| `rx_bytes` | AtomicU64 | Bytes received this interval |
|
||||
| `tx_speed` | AtomicU32 | Target TX speed (dynamic, from server feedback) |
|
||||
| `running` | AtomicBool | Test active flag |
|
||||
| `remote_cpu` | AtomicU8 | Remote peer's CPU (from status messages) |
|
||||
| `byte_budget` | AtomicU64 | Remaining quota bytes (u64::MAX = unlimited) |
|
||||
| `total_tx_bytes` | AtomicU64 | Cumulative TX (never reset) |
|
||||
| `total_rx_bytes` | AtomicU64 | Cumulative RX (never reset) |
|
||||
|
||||
## Server Pro Architecture
|
||||
|
||||
Optional feature (`--features pro`) providing a multi-user public btest server.
|
||||
|
||||
TX --> TX_BYTES
|
||||
RX --> RX_BYTES
|
||||
STATUS --> TX_BYTES
|
||||
STATUS --> RX_BYTES
|
||||
STATUS --> TX_SPEED
|
||||
TX --> TX_SPEED
|
||||
TX --> RUNNING
|
||||
RX --> RUNNING
|
||||
STATUS --> RUNNING
|
||||
```
|
||||
Accept → IP check → HELLO → Command → Auth (DB) → Quota check → Budget set → Test
|
||||
↓
|
||||
QuotaEnforcer (parallel)
|
||||
- checks every N seconds
|
||||
- max_duration timeout
|
||||
- sets running=false on exceed
|
||||
```
|
||||
|
||||
**Byte budget**: Before the test starts, `remaining_budget()` computes the minimum remaining quota across all applicable limits. This is stored in `BandwidthState.byte_budget`. Every TX/RX loop checks `spend_budget()` per-packet — when budget hits 0, the test stops immediately. This prevents quota overshoot even on 10+ Gbps links.
|
||||
|
||||
**Multi-connection TCP**: MikroTik sends `tcp_conn_count` connections. The first authenticates and registers a session token. Subsequent connections match by token and join. When all connections arrive, the test starts with per-stream TX/RX tasks.
|
||||
|
||||
**Web dashboard** (axum):
|
||||
- `GET /` — landing page with instructions
|
||||
- `GET /dashboard/{ip}` — per-IP dashboard with Chart.js graph, session table, quota bars
|
||||
- `GET /api/ip/{ip}/stats` — aggregate stats JSON
|
||||
- `GET /api/ip/{ip}/sessions` — session list JSON
|
||||
- `GET /api/ip/{ip}/quota` — quota usage JSON
|
||||
- `GET /api/ip/{ip}/export` — full export with human-readable fields
|
||||
- `GET /api/session/{id}/intervals` — per-second throughput data
|
||||
|
||||
## CPU Usage Monitoring
|
||||
|
||||
A background OS thread samples system CPU every 1 second:
|
||||
|
||||
| Platform | Method |
|
||||
|----------|--------|
|
||||
| macOS | `host_statistics(HOST_CPU_LOAD_INFO)` |
|
||||
| Linux | `/proc/stat` aggregate CPU line |
|
||||
| Android | `/proc/stat` (same as Linux) |
|
||||
| Windows | `GetSystemTimes()` FFI |
|
||||
| FreeBSD | `sysctl kern.cp_time` |
|
||||
|
||||
Stored in global `AtomicU8`, included in status messages as `0x80 | percentage`.
|
||||
|
||||
## Build Targets
|
||||
|
||||
| Target | Binary | Notes |
|
||||
|--------|--------|-------|
|
||||
| `x86_64-unknown-linux-musl` | btest | Static, zero deps |
|
||||
| `aarch64-unknown-linux-musl` | btest | RPi 4/5, ARM servers |
|
||||
| `armv7-unknown-linux-musleabihf` | btest | RPi 3, OpenWrt |
|
||||
| `x86_64-pc-windows-gnu` | btest.exe | Cross-compiled |
|
||||
| `aarch64-linux-android` | btest | Termux ARMv8 |
|
||||
| `armv7-linux-androideabi` | btest | Termux ARMv7 |
|
||||
| macOS (native) | btest | Apple Silicon + Intel |
|
||||
| Docker (multi-arch) | image | amd64 + arm64 |
|
||||
|
||||
## Key Design Decisions
|
||||
|
||||
### 1. Tokio async runtime
|
||||
1. **Tokio async runtime** — all I/O is async, handles hundreds of concurrent connections
|
||||
2. **Lock-free shared state** — AtomicU64 counters, `swap(0)` reads and resets per interval
|
||||
3. **Direction bits from server perspective** — `0x01`=server RX, `0x02`=server TX, `0x03`=both
|
||||
4. **TCP socket half keepalive** — dropping `OwnedWriteHalf` sends FIN, so unused halves are kept alive
|
||||
5. **Static musl binary** — ~2 MB, zero runtime dependencies
|
||||
6. **EC-SRP5 with big integer arithmetic** — Curve25519 Weierstrass form via `num-bigint`
|
||||
7. **Global singletons for syslog/CSV** — `Mutex<Option<...>>` statics, initialized once at startup
|
||||
8. **Shared BandwidthState for timeout survival** — state created in main(), survives tokio cancellation
|
||||
9. **Inline byte budget** — per-packet quota check with fast path (u64::MAX = unlimited, returns immediately)
|
||||
10. **TCP status message scanning** — RX loop detects 12-byte status messages in the data stream by scanning for `0x07` marker byte to extract remote CPU
|
||||
|
||||
All I/O is async via tokio. Each client connection spawns independent tasks for TX, RX, and status exchange. This allows handling hundreds of concurrent connections on a single thread pool.
|
||||
## Tests
|
||||
|
||||
### 2. Lock-free shared state
|
||||
|
||||
TX/RX threads and the status loop share bandwidth counters via `AtomicU64`. No mutexes needed -- `swap(0)` atomically reads and resets counters each interval.
|
||||
|
||||
### 3. Sequential status loop (matching C pselect)
|
||||
|
||||
The UDP status exchange uses a sequential timeout-read-then-send pattern rather than `tokio::select!`. This ensures our status messages are sent exactly every 1 second, preventing MikroTik's speed adaptation from seeing irregular feedback.
|
||||
|
||||
### 4. Direction bits from server perspective
|
||||
|
||||
The direction byte in the protocol means what the **server** should do:
|
||||
- `0x01` (CMD_DIR_RX) = server receives
|
||||
- `0x02` (CMD_DIR_TX) = server transmits
|
||||
- `0x03` (CMD_DIR_BOTH) = bidirectional
|
||||
|
||||
The client inverts before sending: client "transmit" sends `CMD_DIR_RX` (telling server to receive).
|
||||
|
||||
### 5. TCP socket half keepalive
|
||||
|
||||
When only one direction is active (e.g., TX only), the unused socket half is kept alive. Dropping `OwnedWriteHalf` sends a TCP FIN, which MikroTik interprets as disconnection.
|
||||
|
||||
### 6. Static musl binary
|
||||
|
||||
Release builds use musl for a fully static binary with zero runtime dependencies. The binary is approximately 2 MB and runs on any Linux distribution.
|
||||
|
||||
### 7. EC-SRP5 with big integer arithmetic
|
||||
|
||||
The EC-SRP5 implementation uses `num-bigint` for Curve25519 Weierstrass-form elliptic curve arithmetic. MikroTik's authentication uses the Weierstrass form (not the more common Montgomery or Edwards forms), requiring direct field arithmetic over the prime `2^255 - 19`. The implementation includes point multiplication, `lift_x`, `redp1` (hash-to-curve), and Montgomery coordinate conversion.
|
||||
|
||||
### 8. Global singletons for syslog and CSV
|
||||
|
||||
The syslog and CSV modules use `Mutex<Option<...>>` global statics. This avoids threading state through every function call while remaining safe. Both modules are initialized once at startup and used from any async task via their public API functions.
|
||||
|
||||
### 9. Shared BandwidthState for client duration timeout
|
||||
|
||||
When running with `--duration`, the tokio timeout cancels the client future. To preserve stats accumulated during the test, `BandwidthState` is created in `main()` and passed as an `Arc` into `run_client()`. The state survives cancellation because `main()` holds a reference. The `record_interval()` method accumulates totals that `summary()` returns.
|
||||
|
||||
### 10. IPv6 socket handling
|
||||
|
||||
IPv6 requires special handling on macOS:
|
||||
- UDP sockets bind to `[::]` for IPv6 peers, `0.0.0.0` for IPv4
|
||||
- Socket send/receive buffers set to 4MB via `socket2` before wrapping with tokio
|
||||
- `SocketAddr::new()` used instead of string formatting (avoids `[addr]:port` parsing issues)
|
||||
- Connected sockets preferred for single-connection (avoids ENOBUFS on `send_to()`)
|
||||
- NDP probe packet sent before data blast to populate neighbor cache
|
||||
- Adaptive backoff on ENOBUFS (200μs→10ms, resets on success)
|
||||
|
||||
### 11. CPU usage monitoring
|
||||
|
||||
A background OS thread samples system CPU every 1 second via:
|
||||
- **macOS:** `host_statistics(HOST_CPU_LOAD_INFO)` — returns user/system/idle/nice ticks
|
||||
- **Linux:** `/proc/stat` — reads aggregate CPU line
|
||||
|
||||
The percentage is stored in a global `AtomicU8` and included in every status message at byte 1 using MikroTik's encoding: `0x80 | percentage`. On receive, the remote CPU is decoded with `byte & 0x7F` and capped at 100%. Both local and remote CPU are displayed per interval and logged to CSV/syslog.
|
||||
|
||||
## File Layout
|
||||
|
||||
```
|
||||
btest-rs/
|
||||
├── src/
|
||||
│ ├── main.rs # CLI entry point, argument parsing (clap)
|
||||
│ ├── lib.rs # Public API (used by integration tests)
|
||||
│ ├── protocol.rs # Wire format: Command, StatusMessage, constants
|
||||
│ ├── auth.rs # MD5 challenge-response authentication
|
||||
│ ├── ecsrp5.rs # EC-SRP5 authentication (Curve25519 Weierstrass)
|
||||
│ ├── server.rs # Server mode: listener, TCP/UDP handlers
|
||||
│ ├── client.rs # Client mode: connector, TCP/UDP handlers
|
||||
│ ├── bandwidth.rs # Rate limiting, formatting, shared state
|
||||
│ ├── cpu.rs # CPU usage sampler (macOS + Linux)
|
||||
│ ├── csv_output.rs # CSV result logging (append-mode, auto-header)
|
||||
│ └── syslog_logger.rs # Remote syslog sender (RFC 3164 / BSD format)
|
||||
├── tests/
|
||||
│ └── integration_test.rs # End-to-end server/client tests
|
||||
├── scripts/
|
||||
│ ├── build-linux.sh # Cross-compile for x86_64 Linux (musl)
|
||||
│ ├── build-macos-release.sh # macOS release build
|
||||
│ ├── install-service.sh # systemd service installer
|
||||
│ ├── push-docker.sh # Push Docker image to registry
|
||||
│ ├── test-local.sh # Loopback self-test
|
||||
│ ├── test-mikrotik.sh # Test against MikroTik device
|
||||
│ ├── test-docker.sh # Docker container test
|
||||
│ └── debug-capture.sh # Packet capture for debugging
|
||||
├── docs/
|
||||
│ ├── architecture.md # This file
|
||||
│ ├── protocol.md # Protocol specification
|
||||
│ ├── user-guide.md # Usage documentation
|
||||
│ ├── docker.md # Docker & deployment guide
|
||||
│ ├── ecsrp5-research.md # EC-SRP5 reverse-engineering notes
|
||||
│ └── man/
|
||||
│ └── btest.1 # Unix manual page (troff format)
|
||||
├── tests/
|
||||
│ ├── integration_test.rs # Basic server/client handshake tests
|
||||
│ ├── ecsrp5_test.rs # EC-SRP5 authentication tests
|
||||
│ └── full_integration_test.rs # Comprehensive: all protocols, IPv4/6, CSV, syslog
|
||||
├── deploy/
|
||||
│ └── syslog-ng-btest.conf # syslog-ng configuration for btest events
|
||||
├── proto-test/ # Python EC-SRP5 prototype (research branch)
|
||||
│ ├── btest_ecsrp5_client.py # Working Python btest EC-SRP5 client
|
||||
│ ├── btest_mitm.py # MITM proxy for protocol analysis
|
||||
│ └── elliptic_curves.py # Curve25519 Weierstrass (MarginResearch)
|
||||
├── KNOWN_ISSUES.md # Known bugs and platform limitations
|
||||
├── Dockerfile # Production Docker image (multi-stage)
|
||||
├── Dockerfile.cross # Cross-compilation for Linux x86_64
|
||||
├── docker-compose.yml # Docker Compose configuration
|
||||
├── Cargo.toml # Rust package manifest
|
||||
├── Cargo.lock # Dependency lock file
|
||||
├── LICENSE # MIT License
|
||||
└── btest-opensource/ # Original C implementation (git submodule)
|
||||
```
|
||||
| Suite | Count | What |
|
||||
|-------|-------|------|
|
||||
| Unit tests (lib) | 12 | Bandwidth parsing, CPU sampling, auth hash vectors |
|
||||
| Enforcer tests (pro) | 10 | Budget, quota, duration, flush |
|
||||
| Integration tests | 8 | Server/client handshake, auth, TCP data |
|
||||
| EC-SRP5 tests | 6 | Full auth flow, wrong password, UDP bidir |
|
||||
| Full integration | 23 | All protocols × directions, IPv4/6, CSV, syslog, CPU |
|
||||
| **Total** | **59** | |
|
||||
|
||||
@@ -1,11 +1,12 @@
|
||||
# Docker and Deployment Guide
|
||||
|
||||
## Container Registry
|
||||
## Container Registries
|
||||
|
||||
Images are published to:
|
||||
|
||||
```
|
||||
git.manko.yoga/manawenuz/btest-rs
|
||||
git.manko.yoga/manawenuz/btest-rs # Gitea registry
|
||||
ghcr.io/manawenuz/btest-rs # GitHub Container Registry
|
||||
```
|
||||
|
||||
## Quick Start
|
||||
@@ -87,14 +88,14 @@ docker run --rm -it btest-rs -c 192.168.88.1 -r -a admin -p password
|
||||
|
||||
```bash
|
||||
# Pull from Gitea registry
|
||||
docker pull git.manko.yoga/manawenuz/btest-rs:latest
|
||||
docker pull ghcr.io/manawenuz/btest-rs:latest
|
||||
|
||||
# Run server
|
||||
docker run --rm -it \
|
||||
-p 2000:2000/tcp \
|
||||
-p 2001-2100:2001-2100/udp \
|
||||
-p 2257-2356:2257-2356/udp \
|
||||
git.manko.yoga/manawenuz/btest-rs:latest -s -v
|
||||
ghcr.io/manawenuz/btest-rs:latest -s -v
|
||||
```
|
||||
|
||||
## Docker Compose
|
||||
@@ -185,7 +186,7 @@ docker build -t btest-rs .
|
||||
|
||||
# With custom tag
|
||||
docker build -t git.manko.yoga/manawenuz/btest-rs:latest .
|
||||
docker build -t git.manko.yoga/manawenuz/btest-rs:0.5.0 .
|
||||
docker build -t git.manko.yoga/manawenuz/btest-rs:0.6.0 .
|
||||
```
|
||||
|
||||
### Multi-platform build
|
||||
@@ -193,7 +194,7 @@ docker build -t git.manko.yoga/manawenuz/btest-rs:0.5.0 .
|
||||
```bash
|
||||
docker buildx build \
|
||||
--platform linux/amd64,linux/arm64 \
|
||||
-t git.manko.yoga/manawenuz/btest-rs:latest \
|
||||
-t ghcr.io/manawenuz/btest-rs:latest \
|
||||
--push .
|
||||
```
|
||||
|
||||
@@ -208,9 +209,9 @@ docker build -t git.manko.yoga/manawenuz/btest-rs:latest .
|
||||
docker push git.manko.yoga/manawenuz/btest-rs:latest
|
||||
|
||||
# Also tag with version
|
||||
docker tag git.manko.yoga/manawenuz/btest-rs:latest \
|
||||
git.manko.yoga/manawenuz/btest-rs:0.5.0
|
||||
docker push git.manko.yoga/manawenuz/btest-rs:0.5.0
|
||||
docker tag ghcr.io/manawenuz/btest-rs:latest \
|
||||
git.manko.yoga/manawenuz/btest-rs:0.6.0
|
||||
docker push git.manko.yoga/manawenuz/btest-rs:0.6.0
|
||||
```
|
||||
|
||||
## Deployment Options
|
||||
@@ -223,7 +224,7 @@ docker run -d --name btest-server \
|
||||
-p 2000:2000/tcp \
|
||||
-p 2001-2100:2001-2100/udp \
|
||||
-p 2257-2356:2257-2356/udp \
|
||||
git.manko.yoga/manawenuz/btest-rs:latest \
|
||||
ghcr.io/manawenuz/btest-rs:latest \
|
||||
-s -a admin -p password --ecsrp5 -v
|
||||
```
|
||||
|
||||
|
||||
50
scripts/push-docker-all.sh
Executable file
50
scripts/push-docker-all.sh
Executable file
@@ -0,0 +1,50 @@
|
||||
#!/usr/bin/env bash
|
||||
# Build and push Docker image to both Gitea and GitHub Container Registry.
|
||||
#
|
||||
# Prerequisites:
|
||||
# docker login git.manko.yoga (Gitea — your username + token)
|
||||
# docker login ghcr.io (GitHub — your username + PAT with packages:write)
|
||||
#
|
||||
# Usage:
|
||||
# ./scripts/push-docker-all.sh v0.6.0
|
||||
set -euo pipefail
|
||||
|
||||
cd "$(dirname "$0")/.."
|
||||
|
||||
if [[ -f .env ]]; then
|
||||
set -a; source .env; set +a
|
||||
fi
|
||||
|
||||
TAG="${1:?Usage: $0 <tag> (e.g. v0.6.0)}"
|
||||
|
||||
GITEA_IMAGE="git.manko.yoga/manawenuz/btest-rs"
|
||||
GHCR_IMAGE="ghcr.io/manawenuz/btest-rs"
|
||||
|
||||
echo "=== Building Docker image ==="
|
||||
docker build \
|
||||
-t "${GITEA_IMAGE}:${TAG}" \
|
||||
-t "${GITEA_IMAGE}:latest" \
|
||||
-t "${GHCR_IMAGE}:${TAG}" \
|
||||
-t "${GHCR_IMAGE}:latest" \
|
||||
.
|
||||
|
||||
echo ""
|
||||
echo "=== Pushing to Gitea ==="
|
||||
docker push "${GITEA_IMAGE}:${TAG}"
|
||||
docker push "${GITEA_IMAGE}:latest"
|
||||
|
||||
echo ""
|
||||
echo "=== Pushing to GitHub Container Registry ==="
|
||||
docker push "${GHCR_IMAGE}:${TAG}"
|
||||
docker push "${GHCR_IMAGE}:latest"
|
||||
|
||||
echo ""
|
||||
echo "Done! Images pushed:"
|
||||
echo " ${GITEA_IMAGE}:${TAG}"
|
||||
echo " ${GITEA_IMAGE}:latest"
|
||||
echo " ${GHCR_IMAGE}:${TAG}"
|
||||
echo " ${GHCR_IMAGE}:latest"
|
||||
echo ""
|
||||
echo "Pull with:"
|
||||
echo " docker pull ${GHCR_IMAGE}:${TAG}"
|
||||
echo " docker run --rm -p 2000:2000 -p 2001-2100:2001-2100/udp ${GHCR_IMAGE}:${TAG} -s -v"
|
||||
120
scripts/sync-github-release.sh
Executable file
120
scripts/sync-github-release.sh
Executable file
@@ -0,0 +1,120 @@
|
||||
#!/usr/bin/env bash
|
||||
# Sync a release from Gitea to GitHub.
|
||||
# Downloads all binaries from Gitea release, creates GitHub release, uploads them.
|
||||
#
|
||||
# Prerequisites:
|
||||
# gh auth login (GitHub CLI authenticated)
|
||||
#
|
||||
# Usage:
|
||||
# ./scripts/sync-github-release.sh v0.6.0
|
||||
set -euo pipefail
|
||||
|
||||
cd "$(dirname "$0")/.."
|
||||
|
||||
if [[ -f .env ]]; then
|
||||
set -a; source .env; set +a
|
||||
fi
|
||||
|
||||
TAG="${1:?Usage: $0 <tag> (e.g. v0.6.0)}"
|
||||
GITEA_URL="https://git.manko.yoga"
|
||||
GITEA_REPO="manawenuz/btest-rs"
|
||||
GITHUB_REPO="manawenuz/btest-rs"
|
||||
|
||||
echo "=== Downloading assets from Gitea release ${TAG} ==="
|
||||
mkdir -p /tmp/btest-release-${TAG}
|
||||
cd /tmp/btest-release-${TAG}
|
||||
rm -f *.tar.gz *.zip *.txt
|
||||
|
||||
# Get asset list from Gitea API
|
||||
ASSETS=$(curl -sf "${GITEA_URL}/api/v1/repos/${GITEA_REPO}/releases/tags/${TAG}" | \
|
||||
python3 -c "import sys,json; [print(a['browser_download_url']) for a in json.load(sys.stdin).get('assets',[])]")
|
||||
|
||||
if [ -z "$ASSETS" ]; then
|
||||
echo "No assets found for ${TAG} on Gitea. Check if the release exists."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
for url in $ASSETS; do
|
||||
FILENAME=$(basename "$url")
|
||||
echo " Downloading: $FILENAME"
|
||||
curl -sLO "$url"
|
||||
done
|
||||
|
||||
# Merge all separate .sha256 files into checksums-sha256.txt
|
||||
# and remove the individual .sha256 files
|
||||
echo ""
|
||||
echo "=== Merging checksums ==="
|
||||
for sha_file in *.sha256; do
|
||||
[ -f "$sha_file" ] || continue
|
||||
echo " Merging: $sha_file"
|
||||
cat "$sha_file" >> checksums-sha256.txt
|
||||
rm "$sha_file"
|
||||
done
|
||||
|
||||
# Add checksums for any files not yet in checksums-sha256.txt
|
||||
for f in *.tar.gz *.zip; do
|
||||
[ -f "$f" ] || continue
|
||||
if ! grep -q "$f" checksums-sha256.txt 2>/dev/null; then
|
||||
echo " Adding checksum for: $f"
|
||||
shasum -a 256 "$f" >> checksums-sha256.txt
|
||||
fi
|
||||
done
|
||||
|
||||
# Sort and deduplicate
|
||||
sort -u -k2 checksums-sha256.txt > checksums-sha256.tmp && mv checksums-sha256.tmp checksums-sha256.txt
|
||||
|
||||
echo ""
|
||||
echo "Checksums:"
|
||||
cat checksums-sha256.txt
|
||||
|
||||
echo ""
|
||||
echo "Files to upload:"
|
||||
ls -lh *.tar.gz *.zip checksums-sha256.txt 2>/dev/null
|
||||
|
||||
echo ""
|
||||
echo "=== Creating GitHub release ${TAG} ==="
|
||||
gh release create "${TAG}" \
|
||||
--repo "${GITHUB_REPO}" \
|
||||
--title "btest-rs ${TAG}" \
|
||||
--notes "## Downloads
|
||||
|
||||
| Platform | Architecture | File |
|
||||
|----------|-------------|------|
|
||||
| Linux | x86_64 | btest-linux-x86_64.tar.gz |
|
||||
| Linux | aarch64 (RPi 64-bit) | btest-linux-aarch64.tar.gz |
|
||||
| Linux | armv7 (RPi 32-bit) | btest-linux-armv7.tar.gz |
|
||||
| Windows | x86_64 | btest-windows-x86_64.zip |
|
||||
| macOS | aarch64 (Apple Silicon) | btest-darwin-aarch64.tar.gz |
|
||||
| Docker | x86_64 | \`docker pull ghcr.io/manawenuz/btest-rs:${TAG}\` |
|
||||
|
||||
### Quick Install (Linux)
|
||||
|
||||
\`\`\`bash
|
||||
curl -LO https://github.com/${GITHUB_REPO}/releases/download/${TAG}/btest-linux-x86_64.tar.gz
|
||||
tar xzf btest-linux-x86_64.tar.gz
|
||||
sudo mv btest /usr/local/bin/
|
||||
\`\`\`
|
||||
|
||||
### Raspberry Pi
|
||||
|
||||
\`\`\`bash
|
||||
# 64-bit
|
||||
curl -LO https://github.com/${GITHUB_REPO}/releases/download/${TAG}/btest-linux-aarch64.tar.gz
|
||||
tar xzf btest-linux-aarch64.tar.gz
|
||||
sudo mv btest /usr/local/bin/
|
||||
|
||||
# 32-bit
|
||||
curl -LO https://github.com/${GITHUB_REPO}/releases/download/${TAG}/btest-linux-armv7.tar.gz
|
||||
tar xzf btest-linux-armv7.tar.gz
|
||||
sudo mv btest /usr/local/bin/
|
||||
\`\`\`
|
||||
" \
|
||||
./*.tar.gz ./*.zip ./*.txt 2>/dev/null || true
|
||||
|
||||
echo ""
|
||||
echo "=== Done! ==="
|
||||
echo "https://github.com/${GITHUB_REPO}/releases/tag/${TAG}"
|
||||
|
||||
# Cleanup
|
||||
cd -
|
||||
rm -rf /tmp/btest-release-${TAG}
|
||||
64
scripts/test-aur-remote.sh
Executable file
64
scripts/test-aur-remote.sh
Executable file
@@ -0,0 +1,64 @@
|
||||
#!/usr/bin/env bash
|
||||
# Test the AUR package on a remote x86_64 Linux server using Docker.
|
||||
#
|
||||
# Usage:
|
||||
# ./scripts/test-aur-remote.sh [user@host]
|
||||
#
|
||||
# Spins up an Arch container, installs btest-rs via yay (like a real user),
|
||||
# runs loopback tests, cleans up.
|
||||
set -euo pipefail
|
||||
|
||||
REMOTE="${1:-}"
|
||||
|
||||
TEST_SCRIPT='
|
||||
docker run --rm archlinux:latest bash -c "
|
||||
set -euo pipefail
|
||||
|
||||
echo \"[1/4] Installing yay...\"
|
||||
pacman -Syu --noconfirm base-devel git sudo >/dev/null 2>&1
|
||||
useradd -m builder
|
||||
echo \"builder ALL=(ALL) NOPASSWD: ALL\" >> /etc/sudoers
|
||||
su builder -c \"
|
||||
cd /tmp
|
||||
git clone https://aur.archlinux.org/yay-bin.git 2>/dev/null
|
||||
cd yay-bin
|
||||
makepkg -si --noconfirm 2>&1 | tail -3
|
||||
\"
|
||||
|
||||
echo \"[2/4] Installing btest-rs from AUR via yay...\"
|
||||
su builder -c \"yay -S btest-rs --noconfirm 2>&1 | tail -10\"
|
||||
|
||||
echo \"\"
|
||||
echo \"[3/4] Verify installation...\"
|
||||
btest --version
|
||||
which btest
|
||||
man -w btest.1 2>/dev/null && echo \"Man page: installed\" || echo \"Man page: not found\"
|
||||
systemctl cat btest.service 2>/dev/null | head -3 && echo \"Systemd unit: installed\" || echo \"Systemd unit: not found\"
|
||||
|
||||
echo \"\"
|
||||
echo \"[4/4] Loopback tests...\"
|
||||
|
||||
echo \"--- TCP (3s) ---\"
|
||||
btest -s -P 19876 &
|
||||
sleep 2
|
||||
btest -c 127.0.0.1 -P 19876 -r -d 3
|
||||
kill %1 2>/dev/null; wait 2>/dev/null || true
|
||||
|
||||
echo \"--- UDP (3s) ---\"
|
||||
btest -s -P 19877 &
|
||||
sleep 2
|
||||
btest -c 127.0.0.1 -P 19877 -r -u -d 3
|
||||
kill %1 2>/dev/null; wait 2>/dev/null || true
|
||||
|
||||
echo \"\"
|
||||
echo \"=== ALL TESTS PASSED ===\"
|
||||
"
|
||||
'
|
||||
|
||||
if [ -n "$REMOTE" ]; then
|
||||
echo "=== Testing AUR package on $REMOTE ==="
|
||||
ssh "$REMOTE" "$TEST_SCRIPT"
|
||||
else
|
||||
echo "=== Testing AUR package locally ==="
|
||||
eval "$TEST_SCRIPT"
|
||||
fi
|
||||
@@ -20,6 +20,9 @@ pub struct BandwidthState {
|
||||
pub intervals: AtomicU32,
|
||||
/// Remote peer's CPU usage (received via status messages)
|
||||
pub remote_cpu: AtomicU8,
|
||||
/// Remaining byte budget (TX + RX combined). When this reaches 0 the test
|
||||
/// stops immediately. u64::MAX means unlimited (default for non-pro server).
|
||||
pub byte_budget: AtomicU64,
|
||||
}
|
||||
|
||||
impl BandwidthState {
|
||||
@@ -38,6 +41,7 @@ impl BandwidthState {
|
||||
total_lost_packets: AtomicU64::new(0),
|
||||
intervals: AtomicU32::new(0),
|
||||
remote_cpu: AtomicU8::new(0),
|
||||
byte_budget: AtomicU64::new(u64::MAX),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -50,6 +54,30 @@ impl BandwidthState {
|
||||
self.intervals.fetch_add(1, Relaxed);
|
||||
}
|
||||
|
||||
/// Try to spend `amount` bytes from the budget. Returns `true` if allowed,
|
||||
/// `false` if the budget is exhausted (and sets `running = false`).
|
||||
#[inline]
|
||||
pub fn spend_budget(&self, amount: u64) -> bool {
|
||||
use std::sync::atomic::Ordering::{Relaxed, SeqCst};
|
||||
// Fast path: unlimited budget (non-pro server)
|
||||
let current = self.byte_budget.load(Relaxed);
|
||||
if current == u64::MAX {
|
||||
return true;
|
||||
}
|
||||
if current < amount {
|
||||
self.running.store(false, SeqCst);
|
||||
return false;
|
||||
}
|
||||
self.byte_budget.fetch_sub(amount, Relaxed);
|
||||
true
|
||||
}
|
||||
|
||||
/// Set the byte budget (total bytes allowed for the entire test).
|
||||
#[cfg(feature = "pro")]
|
||||
pub fn set_budget(&self, budget: u64) {
|
||||
self.byte_budget.store(budget, std::sync::atomic::Ordering::SeqCst);
|
||||
}
|
||||
|
||||
/// Get summary for syslog reporting.
|
||||
pub fn summary(&self) -> (u64, u64, u64, u32) {
|
||||
use std::sync::atomic::Ordering::Relaxed;
|
||||
@@ -80,6 +108,34 @@ pub fn calc_send_interval(tx_speed_bps: u32, tx_size: u16) -> Option<Duration> {
|
||||
}
|
||||
}
|
||||
|
||||
/// Advance `next_send` by one interval and clamp drift.
|
||||
///
|
||||
/// When the sender falls behind (e.g., the write blocked longer than the
|
||||
/// inter-packet interval), `next_send` accumulates a debt. Once the path
|
||||
/// clears, the loop would fire packets with *no* delay until the debt is
|
||||
/// repaid, producing a burst that overshoots the target rate.
|
||||
///
|
||||
/// This helper resets `next_send` to `now` whenever it has drifted more
|
||||
/// than 2x the interval behind the current wall-clock time, bounding the
|
||||
/// maximum burst to at most one extra interval's worth of packets.
|
||||
pub fn advance_next_send(
|
||||
next_send: &mut std::time::Instant,
|
||||
iv: Duration,
|
||||
now: std::time::Instant,
|
||||
) -> Option<Duration> {
|
||||
*next_send += iv;
|
||||
// If we have fallen more than 2x the interval behind, reset to now
|
||||
// to prevent a compensating burst.
|
||||
if *next_send + iv < now {
|
||||
*next_send = now;
|
||||
}
|
||||
if *next_send > now {
|
||||
Some(*next_send - now)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
/// Format a bandwidth value in human-readable form.
|
||||
pub fn format_bandwidth(bits_per_sec: f64) -> String {
|
||||
if bits_per_sec >= 1_000_000_000.0 {
|
||||
|
||||
127
src/bin/client_only.rs
Normal file
127
src/bin/client_only.rs
Normal file
@@ -0,0 +1,127 @@
|
||||
//! btest-client: minimal bandwidth test client for embedded/OpenWrt systems.
|
||||
//!
|
||||
//! Stripped-down client that connects to MikroTik btest servers.
|
||||
//! No server mode, no syslog, smaller binary footprint.
|
||||
//!
|
||||
//! Build: cargo build --profile release-small --bin btest-client
|
||||
|
||||
use clap::Parser;
|
||||
use std::sync::atomic::Ordering;
|
||||
|
||||
#[derive(Parser)]
|
||||
#[command(name = "btest-client", about = "MikroTik Bandwidth Test client", version)]
|
||||
struct Cli {
|
||||
/// Server address to connect to
|
||||
#[arg(short = 'c', long = "client", required = true)]
|
||||
host: String,
|
||||
|
||||
/// Transmit data (upload)
|
||||
#[arg(short = 't', long = "transmit")]
|
||||
transmit: bool,
|
||||
|
||||
/// Receive data (download)
|
||||
#[arg(short = 'r', long = "receive")]
|
||||
receive: bool,
|
||||
|
||||
/// Use UDP
|
||||
#[arg(short = 'u', long = "udp")]
|
||||
udp: bool,
|
||||
|
||||
/// Bandwidth limit (e.g., 100M)
|
||||
#[arg(short = 'b', long = "bandwidth")]
|
||||
bandwidth: Option<String>,
|
||||
|
||||
/// Port
|
||||
#[arg(short = 'P', long = "port", default_value_t = 2000)]
|
||||
port: u16,
|
||||
|
||||
/// Username
|
||||
#[arg(short = 'a', long = "authuser")]
|
||||
auth_user: Option<String>,
|
||||
|
||||
/// Password
|
||||
#[arg(short = 'p', long = "authpass")]
|
||||
auth_pass: Option<String>,
|
||||
|
||||
/// NAT mode
|
||||
#[arg(short = 'n', long = "nat")]
|
||||
nat: bool,
|
||||
|
||||
/// Duration in seconds (0=unlimited)
|
||||
#[arg(short = 'd', long = "duration", default_value_t = 0)]
|
||||
duration: u64,
|
||||
|
||||
/// Verbose
|
||||
#[arg(short = 'v', long = "verbose", action = clap::ArgAction::Count)]
|
||||
verbose: u8,
|
||||
}
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> anyhow::Result<()> {
|
||||
let cli = Cli::parse();
|
||||
|
||||
let filter = match cli.verbose {
|
||||
0 => "info",
|
||||
1 => "debug",
|
||||
_ => "trace",
|
||||
};
|
||||
tracing_subscriber::fmt()
|
||||
.with_env_filter(
|
||||
tracing_subscriber::EnvFilter::try_from_default_env()
|
||||
.unwrap_or_else(|_| tracing_subscriber::EnvFilter::new(filter)),
|
||||
)
|
||||
.with_target(false)
|
||||
.init();
|
||||
|
||||
btest_rs::cpu::start_sampler();
|
||||
|
||||
if !cli.transmit && !cli.receive {
|
||||
eprintln!("Error: specify -t (transmit) and/or -r (receive)");
|
||||
std::process::exit(1);
|
||||
}
|
||||
|
||||
let direction = match (cli.transmit, cli.receive) {
|
||||
(true, false) => btest_rs::protocol::CMD_DIR_RX,
|
||||
(false, true) => btest_rs::protocol::CMD_DIR_TX,
|
||||
(true, true) => btest_rs::protocol::CMD_DIR_BOTH,
|
||||
_ => unreachable!(),
|
||||
};
|
||||
|
||||
let bw = match &cli.bandwidth {
|
||||
Some(b) => btest_rs::bandwidth::parse_bandwidth(b)?,
|
||||
None => 0,
|
||||
};
|
||||
|
||||
let (tx_speed, rx_speed) = match direction {
|
||||
btest_rs::protocol::CMD_DIR_TX => (bw, 0),
|
||||
btest_rs::protocol::CMD_DIR_RX => (0, bw),
|
||||
_ => (bw, bw),
|
||||
};
|
||||
|
||||
let state = btest_rs::bandwidth::BandwidthState::new();
|
||||
let state_clone = state.clone();
|
||||
|
||||
let host = cli.host.clone();
|
||||
let client_fut = btest_rs::client::run_client(
|
||||
&host, cli.port, direction, cli.udp,
|
||||
tx_speed, rx_speed,
|
||||
cli.auth_user, cli.auth_pass, cli.nat,
|
||||
state_clone,
|
||||
);
|
||||
|
||||
if cli.duration > 0 {
|
||||
match tokio::time::timeout(
|
||||
std::time::Duration::from_secs(cli.duration),
|
||||
client_fut,
|
||||
).await {
|
||||
Ok(r) => { let _ = r?; }
|
||||
Err(_) => {
|
||||
state.running.store(false, Ordering::SeqCst);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
let _ = client_fut.await?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
62
src/bin/server_only.rs
Normal file
62
src/bin/server_only.rs
Normal file
@@ -0,0 +1,62 @@
|
||||
//! btest-server: minimal bandwidth test server for embedded/OpenWrt systems.
|
||||
//!
|
||||
//! Stripped-down server that accepts MikroTik client connections.
|
||||
//! No client mode, no syslog, no CSV, smaller binary footprint.
|
||||
//!
|
||||
//! Build: cargo build --profile release-small --bin btest-server
|
||||
|
||||
use clap::Parser;
|
||||
|
||||
#[derive(Parser)]
|
||||
#[command(name = "btest-server", about = "MikroTik Bandwidth Test server", version)]
|
||||
struct Cli {
|
||||
/// Port
|
||||
#[arg(short = 'P', long = "port", default_value_t = 2000)]
|
||||
port: u16,
|
||||
|
||||
/// IPv4 listen address
|
||||
#[arg(long = "listen", default_value = "0.0.0.0")]
|
||||
listen_addr: String,
|
||||
|
||||
/// Username
|
||||
#[arg(short = 'a', long = "authuser")]
|
||||
auth_user: Option<String>,
|
||||
|
||||
/// Password
|
||||
#[arg(short = 'p', long = "authpass")]
|
||||
auth_pass: Option<String>,
|
||||
|
||||
/// Use EC-SRP5 authentication
|
||||
#[arg(long = "ecsrp5")]
|
||||
ecsrp5: bool,
|
||||
|
||||
/// Verbose
|
||||
#[arg(short = 'v', long = "verbose", action = clap::ArgAction::Count)]
|
||||
verbose: u8,
|
||||
}
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> anyhow::Result<()> {
|
||||
let cli = Cli::parse();
|
||||
|
||||
let filter = match cli.verbose {
|
||||
0 => "info",
|
||||
1 => "debug",
|
||||
_ => "trace",
|
||||
};
|
||||
tracing_subscriber::fmt()
|
||||
.with_env_filter(
|
||||
tracing_subscriber::EnvFilter::try_from_default_env()
|
||||
.unwrap_or_else(|_| tracing_subscriber::EnvFilter::new(filter)),
|
||||
)
|
||||
.with_target(false)
|
||||
.init();
|
||||
|
||||
btest_rs::cpu::start_sampler();
|
||||
|
||||
let v4 = if cli.listen_addr.eq_ignore_ascii_case("none") { None } else { Some(cli.listen_addr) };
|
||||
|
||||
tracing::info!("btest-server starting on port {}", cli.port);
|
||||
btest_rs::server::run_server(cli.port, cli.auth_user, cli.auth_pass, cli.ecsrp5, v4, None).await?;
|
||||
Ok(())
|
||||
}
|
||||
@@ -127,6 +127,12 @@ async fn run_tcp_test_client(stream: TcpStream, cmd: Command, state: Arc<Bandwid
|
||||
Some(tokio::spawn(async move {
|
||||
tcp_client_rx_loop(reader, state_rx).await
|
||||
}))
|
||||
} else if client_should_tx {
|
||||
// TX-only: still need to read the server's status messages to get remote CPU.
|
||||
// Don't count these bytes as RX data.
|
||||
Some(tokio::spawn(async move {
|
||||
tcp_client_status_reader(reader, state_rx).await
|
||||
}))
|
||||
} else {
|
||||
_reader_keepalive = Some(reader);
|
||||
None
|
||||
@@ -167,10 +173,9 @@ async fn tcp_client_tx_loop(
|
||||
|
||||
match interval {
|
||||
Some(iv) => {
|
||||
next_send += iv;
|
||||
let now = Instant::now();
|
||||
if next_send > now {
|
||||
tokio::time::sleep(next_send - now).await;
|
||||
if let Some(delay) = bandwidth::advance_next_send(&mut next_send, iv, now) {
|
||||
tokio::time::sleep(delay).await;
|
||||
}
|
||||
}
|
||||
None => {
|
||||
@@ -190,11 +195,53 @@ async fn tcp_client_rx_loop(
|
||||
Ok(0) | Err(_) => break,
|
||||
Ok(n) => {
|
||||
state.rx_bytes.fetch_add(n as u64, Ordering::Relaxed);
|
||||
// Scan for interleaved 12-byte status messages from the server.
|
||||
// In BOTH mode, the server's TX loop injects status messages into the
|
||||
// data stream. Status starts with 0x07 (STATUS_MSG_TYPE) and byte 1
|
||||
// has the high bit set (0x80 | cpu%). Data packets are all zeros.
|
||||
if n >= STATUS_MSG_SIZE {
|
||||
for i in 0..=(n - STATUS_MSG_SIZE) {
|
||||
if buf[i] == STATUS_MSG_TYPE && buf[i + 1] >= 0x80 {
|
||||
let cpu = buf[i + 1] & 0x7F;
|
||||
state.remote_cpu.store(cpu.min(100), Ordering::Relaxed);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Read only status messages from the server (TX-only mode).
|
||||
/// The server sends 12-byte status messages on the TCP connection even when
|
||||
/// the client is only transmitting. We need to read them to get remote CPU
|
||||
/// and to prevent the TCP receive buffer from filling up.
|
||||
async fn tcp_client_status_reader(
|
||||
mut reader: tokio::net::tcp::OwnedReadHalf,
|
||||
state: Arc<BandwidthState>,
|
||||
) {
|
||||
let mut buf = [0u8; STATUS_MSG_SIZE];
|
||||
while state.running.load(Ordering::Relaxed) {
|
||||
match reader.read_exact(&mut buf).await {
|
||||
Ok(_) => {
|
||||
if buf[0] == STATUS_MSG_TYPE && buf[1] >= 0x80 {
|
||||
let status = StatusMessage::deserialize(&buf);
|
||||
state.remote_cpu.store(status.cpu_load, Ordering::Relaxed);
|
||||
// Use server's bytes_received for TX speed adaptation
|
||||
if status.bytes_received > 0 {
|
||||
let new_speed =
|
||||
((status.bytes_received as u64 * 8 * 3) / 2) as u32;
|
||||
state.tx_speed.store(new_speed, Ordering::Relaxed);
|
||||
state.tx_speed_changed.store(true, Ordering::Relaxed);
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(_) => break,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// --- UDP Test Client ---
|
||||
|
||||
async fn run_udp_test_client(
|
||||
@@ -317,10 +364,9 @@ async fn udp_client_tx_loop(
|
||||
|
||||
match interval {
|
||||
Some(iv) => {
|
||||
next_send += iv;
|
||||
let now = Instant::now();
|
||||
if next_send > now {
|
||||
tokio::time::sleep(next_send - now).await;
|
||||
if let Some(delay) = bandwidth::advance_next_send(&mut next_send, iv, now) {
|
||||
tokio::time::sleep(delay).await;
|
||||
}
|
||||
}
|
||||
None => {
|
||||
|
||||
91
src/cpu.rs
91
src/cpu.rs
@@ -1,7 +1,7 @@
|
||||
//! Lightweight CPU usage measurement.
|
||||
//!
|
||||
//! Returns the system-wide CPU usage as a percentage (0-100).
|
||||
//! Works on macOS and Linux without external dependencies.
|
||||
//! Works on macOS, Linux, Windows, and FreeBSD without external dependencies.
|
||||
|
||||
use std::sync::atomic::{AtomicU8, Ordering};
|
||||
use std::time::Duration;
|
||||
@@ -29,7 +29,7 @@ pub fn get() -> u8 {
|
||||
|
||||
// --- Platform-specific implementation ---
|
||||
|
||||
#[cfg(target_os = "linux")]
|
||||
#[cfg(any(target_os = "linux", target_os = "android"))]
|
||||
fn get_cpu_times() -> (u64, u64) {
|
||||
// Read /proc/stat: cpu user nice system idle iowait irq softirq steal
|
||||
if let Ok(content) = std::fs::read_to_string("/proc/stat") {
|
||||
@@ -93,7 +93,84 @@ fn get_cpu_times() -> (u64, u64) {
|
||||
(0, 0)
|
||||
}
|
||||
|
||||
#[cfg(not(any(target_os = "linux", target_os = "macos")))]
|
||||
#[cfg(target_os = "windows")]
|
||||
fn get_cpu_times() -> (u64, u64) {
|
||||
#[repr(C)]
|
||||
#[derive(Default)]
|
||||
#[allow(non_snake_case)]
|
||||
struct FILETIME {
|
||||
dwLowDateTime: u32,
|
||||
dwHighDateTime: u32,
|
||||
}
|
||||
|
||||
impl FILETIME {
|
||||
fn to_u64(&self) -> u64 {
|
||||
(self.dwHighDateTime as u64) << 32 | self.dwLowDateTime as u64
|
||||
}
|
||||
}
|
||||
|
||||
extern "system" {
|
||||
fn GetSystemTimes(
|
||||
lpIdleTime: *mut FILETIME,
|
||||
lpKernelTime: *mut FILETIME,
|
||||
lpUserTime: *mut FILETIME,
|
||||
) -> i32;
|
||||
}
|
||||
|
||||
let mut idle = FILETIME::default();
|
||||
let mut kernel = FILETIME::default();
|
||||
let mut user = FILETIME::default();
|
||||
|
||||
// SAFETY: We pass valid pointers to stack-allocated FILETIME structs.
|
||||
// GetSystemTimes is a well-documented Win32 API that writes into these
|
||||
// output parameters. A non-zero return value indicates success.
|
||||
let ret = unsafe { GetSystemTimes(&mut idle, &mut kernel, &mut user) };
|
||||
|
||||
if ret != 0 {
|
||||
let idle_ticks = idle.to_u64();
|
||||
// Kernel time includes idle time on Windows, so total = kernel + user.
|
||||
let total_ticks = kernel.to_u64() + user.to_u64();
|
||||
(total_ticks, idle_ticks)
|
||||
} else {
|
||||
(0, 0)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(target_os = "freebsd")]
|
||||
fn get_cpu_times() -> (u64, u64) {
|
||||
// kern.cp_time returns: user nice system interrupt idle
|
||||
if let Ok(output) = std::process::Command::new("sysctl")
|
||||
.arg("-n")
|
||||
.arg("kern.cp_time")
|
||||
.output()
|
||||
{
|
||||
if output.status.success() {
|
||||
let text = String::from_utf8_lossy(&output.stdout);
|
||||
let parts: Vec<u64> = text
|
||||
.split_whitespace()
|
||||
.filter_map(|s| s.parse().ok())
|
||||
.collect();
|
||||
if parts.len() >= 5 {
|
||||
let user = parts[0];
|
||||
let nice = parts[1];
|
||||
let system = parts[2];
|
||||
let interrupt = parts[3];
|
||||
let idle = parts[4];
|
||||
let total = user + nice + system + interrupt + idle;
|
||||
return (total, idle);
|
||||
}
|
||||
}
|
||||
}
|
||||
(0, 0)
|
||||
}
|
||||
|
||||
#[cfg(not(any(
|
||||
target_os = "linux",
|
||||
target_os = "android",
|
||||
target_os = "macos",
|
||||
target_os = "windows",
|
||||
target_os = "freebsd",
|
||||
)))]
|
||||
fn get_cpu_times() -> (u64, u64) {
|
||||
(0, 0) // Unsupported platform
|
||||
}
|
||||
@@ -116,7 +193,13 @@ mod tests {
|
||||
fn test_cpu_times_returns_nonzero() {
|
||||
let (total, idle) = get_cpu_times();
|
||||
// On supported platforms, total should be > 0
|
||||
if cfg!(any(target_os = "linux", target_os = "macos")) {
|
||||
if cfg!(any(
|
||||
target_os = "linux",
|
||||
target_os = "android",
|
||||
target_os = "macos",
|
||||
target_os = "windows",
|
||||
target_os = "freebsd",
|
||||
)) {
|
||||
assert!(total > 0, "CPU total ticks should be > 0");
|
||||
assert!(idle <= total, "idle should be <= total");
|
||||
}
|
||||
|
||||
@@ -366,8 +366,43 @@ async fn handle_client(
|
||||
|
||||
// --- TCP Test Server ---
|
||||
|
||||
/// Public TX task for multi-connection use by server_pro.
|
||||
#[cfg(feature = "pro")]
|
||||
pub async fn tcp_tx_task(
|
||||
writer: tokio::net::tcp::OwnedWriteHalf,
|
||||
tx_size: usize,
|
||||
tx_speed: u32,
|
||||
state: Arc<BandwidthState>,
|
||||
) {
|
||||
tcp_tx_loop(writer, tx_size, tx_speed, state).await;
|
||||
}
|
||||
|
||||
/// Public RX task for multi-connection use by server_pro.
|
||||
#[cfg(feature = "pro")]
|
||||
pub async fn tcp_rx_task(
|
||||
reader: tokio::net::tcp::OwnedReadHalf,
|
||||
state: Arc<BandwidthState>,
|
||||
) {
|
||||
tcp_rx_loop(reader, state).await;
|
||||
}
|
||||
|
||||
/// Run a TCP bandwidth test on an already-authenticated stream.
|
||||
/// Public API for use by server_pro.
|
||||
#[cfg(feature = "pro")]
|
||||
pub async fn run_tcp_test(
|
||||
stream: TcpStream,
|
||||
cmd: Command,
|
||||
state: Arc<BandwidthState>,
|
||||
) -> Result<(u64, u64, u64, u32)> {
|
||||
run_tcp_test_inner(stream, cmd, state).await
|
||||
}
|
||||
|
||||
async fn run_tcp_test_server(stream: TcpStream, cmd: Command) -> Result<(u64, u64, u64, u32)> {
|
||||
let state = BandwidthState::new();
|
||||
run_tcp_test_inner(stream, cmd, state).await
|
||||
}
|
||||
|
||||
async fn run_tcp_test_inner(stream: TcpStream, cmd: Command, state: Arc<BandwidthState>) -> Result<(u64, u64, u64, u32)> {
|
||||
let tx_size = cmd.tx_size as usize;
|
||||
let server_should_tx = cmd.server_tx();
|
||||
let server_should_rx = cmd.server_rx();
|
||||
@@ -437,9 +472,23 @@ async fn run_tcp_test_server(stream: TcpStream, cmd: Command) -> Result<(u64, u6
|
||||
Ok(state.summary())
|
||||
}
|
||||
|
||||
/// Public API for multi-connection TCP test with external state. Used by server_pro.
|
||||
#[cfg(feature = "pro")]
|
||||
pub async fn run_tcp_multiconn_test(
|
||||
streams: Vec<TcpStream>,
|
||||
cmd: Command,
|
||||
state: Arc<BandwidthState>,
|
||||
) -> Result<(u64, u64, u64, u32)> {
|
||||
run_tcp_multiconn_inner(streams, cmd, state).await
|
||||
}
|
||||
|
||||
/// TCP multi-connection.
|
||||
async fn run_tcp_multiconn_server(streams: Vec<TcpStream>, cmd: Command) -> Result<(u64, u64, u64, u32)> {
|
||||
let state = BandwidthState::new();
|
||||
run_tcp_multiconn_inner(streams, cmd, state).await
|
||||
}
|
||||
|
||||
async fn run_tcp_multiconn_inner(streams: Vec<TcpStream>, cmd: Command, state: Arc<BandwidthState>) -> Result<(u64, u64, u64, u32)> {
|
||||
let tx_size = cmd.tx_size as usize;
|
||||
let server_should_tx = cmd.server_tx();
|
||||
let server_should_rx = cmd.server_rx();
|
||||
@@ -550,6 +599,9 @@ async fn tcp_tx_loop_inner(
|
||||
next_status = Instant::now() + Duration::from_secs(1);
|
||||
}
|
||||
|
||||
if !state.spend_budget(tx_size as u64) {
|
||||
break;
|
||||
}
|
||||
if writer.write_all(&packet).await.is_err() {
|
||||
state.running.store(false, Ordering::SeqCst);
|
||||
break;
|
||||
@@ -565,10 +617,9 @@ async fn tcp_tx_loop_inner(
|
||||
|
||||
match interval {
|
||||
Some(iv) => {
|
||||
next_send += iv;
|
||||
let now = Instant::now();
|
||||
if next_send > now {
|
||||
tokio::time::sleep(next_send - now).await;
|
||||
if let Some(delay) = bandwidth::advance_next_send(&mut next_send, iv, now) {
|
||||
tokio::time::sleep(delay).await;
|
||||
}
|
||||
}
|
||||
None => {
|
||||
@@ -587,6 +638,9 @@ async fn tcp_rx_loop(mut reader: tokio::net::tcp::OwnedReadHalf, state: Arc<Band
|
||||
break;
|
||||
}
|
||||
Ok(n) => {
|
||||
if !state.spend_budget(n as u64) {
|
||||
break;
|
||||
}
|
||||
state.rx_bytes.fetch_add(n as u64, Ordering::Relaxed);
|
||||
}
|
||||
}
|
||||
@@ -634,6 +688,19 @@ async fn tcp_status_sender(
|
||||
|
||||
// --- UDP Test Server ---
|
||||
|
||||
/// Run a UDP bandwidth test on an already-authenticated stream.
|
||||
/// Public API for use by server_pro. Caller provides the UDP port offset.
|
||||
#[cfg(feature = "pro")]
|
||||
pub async fn run_udp_test(
|
||||
stream: &mut TcpStream,
|
||||
peer: SocketAddr,
|
||||
cmd: &Command,
|
||||
state: Arc<BandwidthState>,
|
||||
udp_port_start: u16,
|
||||
) -> Result<(u64, u64, u64, u32)> {
|
||||
run_udp_test_inner(stream, peer, cmd, state, udp_port_start).await
|
||||
}
|
||||
|
||||
async fn run_udp_test_server(
|
||||
stream: &mut TcpStream,
|
||||
peer: SocketAddr,
|
||||
@@ -641,7 +708,17 @@ async fn run_udp_test_server(
|
||||
udp_port_offset: Arc<std::sync::atomic::AtomicU16>,
|
||||
) -> Result<(u64, u64, u64, u32)> {
|
||||
let offset = udp_port_offset.fetch_add(1, Ordering::SeqCst);
|
||||
let server_udp_port = BTEST_UDP_PORT_START + offset;
|
||||
let state = BandwidthState::new();
|
||||
run_udp_test_inner(stream, peer, cmd, state, BTEST_UDP_PORT_START + offset).await
|
||||
}
|
||||
|
||||
async fn run_udp_test_inner(
|
||||
stream: &mut TcpStream,
|
||||
peer: SocketAddr,
|
||||
cmd: &Command,
|
||||
state: Arc<BandwidthState>,
|
||||
server_udp_port: u16,
|
||||
) -> Result<(u64, u64, u64, u32)> {
|
||||
let client_udp_port = server_udp_port + BTEST_PORT_CLIENT_OFFSET;
|
||||
|
||||
stream.write_all(&server_udp_port.to_be_bytes()).await?;
|
||||
@@ -708,7 +785,6 @@ async fn run_udp_test_server(
|
||||
if use_unconnected { "unconnected" } else { "connected" },
|
||||
);
|
||||
|
||||
let state = BandwidthState::new();
|
||||
let tx_size = cmd.tx_size as usize;
|
||||
let server_should_tx = cmd.server_tx();
|
||||
let server_should_rx = cmd.server_rx();
|
||||
@@ -762,6 +838,10 @@ async fn udp_tx_loop(
|
||||
let mut consecutive_errors: u32 = 0;
|
||||
|
||||
while state.running.load(Ordering::Relaxed) {
|
||||
if !state.spend_budget(tx_size as u64) {
|
||||
break;
|
||||
}
|
||||
|
||||
packet[0..4].copy_from_slice(&seq.to_be_bytes());
|
||||
|
||||
let result = if multi_conn {
|
||||
@@ -805,10 +885,9 @@ async fn udp_tx_loop(
|
||||
|
||||
match interval {
|
||||
Some(iv) => {
|
||||
next_send += iv;
|
||||
let now = Instant::now();
|
||||
if next_send > now {
|
||||
tokio::time::sleep(next_send - now).await;
|
||||
if let Some(delay) = bandwidth::advance_next_send(&mut next_send, iv, now) {
|
||||
tokio::time::sleep(delay).await;
|
||||
}
|
||||
}
|
||||
None => {
|
||||
@@ -838,6 +917,9 @@ async fn udp_rx_loop(socket: &UdpSocket, state: Arc<BandwidthState>) {
|
||||
// (multi-connection MikroTik sends from multiple ports)
|
||||
match tokio::time::timeout(Duration::from_secs(5), socket.recv_from(&mut buf)).await {
|
||||
Ok(Ok((n, _src))) if n >= 4 => {
|
||||
if !state.spend_budget(n as u64) {
|
||||
break;
|
||||
}
|
||||
state.rx_bytes.fetch_add(n as u64, Ordering::Relaxed);
|
||||
state.rx_packets.fetch_add(1, Ordering::Relaxed);
|
||||
|
||||
|
||||
411
src/server_pro/enforcer.rs
Normal file
411
src/server_pro/enforcer.rs
Normal file
@@ -0,0 +1,411 @@
|
||||
//! Mid-session quota enforcement.
|
||||
//!
|
||||
//! Runs alongside a bandwidth test, periodically checking if the user
|
||||
//! or IP has exceeded their quota. Terminates the test if so.
|
||||
|
||||
use std::net::IpAddr;
|
||||
use std::sync::atomic::Ordering;
|
||||
use std::sync::Arc;
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
use btest_rs::bandwidth::BandwidthState;
|
||||
|
||||
use super::quota::{Direction, QuotaManager};
|
||||
|
||||
/// Enforces quotas during an active test session.
|
||||
/// Call `run()` as a spawned task — it will set `state.running = false`
|
||||
/// when a quota is exceeded or max_duration is reached.
|
||||
pub struct QuotaEnforcer {
|
||||
quota_mgr: QuotaManager,
|
||||
username: String,
|
||||
ip: IpAddr,
|
||||
state: Arc<BandwidthState>,
|
||||
check_interval: Duration,
|
||||
max_duration: Duration,
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq)]
|
||||
pub enum StopReason {
|
||||
/// Test still running (not stopped)
|
||||
Running,
|
||||
/// Max duration reached
|
||||
MaxDuration,
|
||||
/// User daily quota exceeded
|
||||
UserDailyQuota,
|
||||
/// User weekly quota exceeded
|
||||
UserWeeklyQuota,
|
||||
/// User monthly quota exceeded
|
||||
UserMonthlyQuota,
|
||||
/// IP daily quota exceeded
|
||||
IpDailyQuota,
|
||||
/// IP weekly quota exceeded
|
||||
IpWeeklyQuota,
|
||||
/// IP monthly quota exceeded
|
||||
IpMonthlyQuota,
|
||||
/// Client disconnected normally
|
||||
ClientDisconnected,
|
||||
}
|
||||
|
||||
impl std::fmt::Display for StopReason {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
Self::Running => write!(f, "running"),
|
||||
Self::MaxDuration => write!(f, "max_duration_reached"),
|
||||
Self::UserDailyQuota => write!(f, "user_daily_quota_exceeded"),
|
||||
Self::UserWeeklyQuota => write!(f, "user_weekly_quota_exceeded"),
|
||||
Self::UserMonthlyQuota => write!(f, "user_monthly_quota_exceeded"),
|
||||
Self::IpDailyQuota => write!(f, "ip_daily_quota_exceeded"),
|
||||
Self::IpWeeklyQuota => write!(f, "ip_weekly_quota_exceeded"),
|
||||
Self::IpMonthlyQuota => write!(f, "ip_monthly_quota_exceeded"),
|
||||
Self::ClientDisconnected => write!(f, "client_disconnected"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl QuotaEnforcer {
|
||||
pub fn new(
|
||||
quota_mgr: QuotaManager,
|
||||
username: String,
|
||||
ip: IpAddr,
|
||||
state: Arc<BandwidthState>,
|
||||
check_interval_secs: u64,
|
||||
max_duration_secs: u64,
|
||||
) -> Self {
|
||||
Self {
|
||||
quota_mgr,
|
||||
username,
|
||||
ip,
|
||||
state,
|
||||
check_interval: Duration::from_secs(check_interval_secs.max(1)),
|
||||
max_duration: if max_duration_secs > 0 {
|
||||
Duration::from_secs(max_duration_secs)
|
||||
} else {
|
||||
Duration::from_secs(u64::MAX / 2) // effectively unlimited
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
/// Run the enforcer loop. Returns the reason the test was stopped.
|
||||
/// This should be spawned as a tokio task.
|
||||
pub async fn run(&self) -> StopReason {
|
||||
let start = Instant::now();
|
||||
let mut interval = tokio::time::interval(self.check_interval);
|
||||
interval.tick().await; // consume first immediate tick
|
||||
|
||||
loop {
|
||||
interval.tick().await;
|
||||
|
||||
// Check if test already ended normally
|
||||
if !self.state.running.load(Ordering::Relaxed) {
|
||||
return StopReason::ClientDisconnected;
|
||||
}
|
||||
|
||||
// Check max duration
|
||||
if start.elapsed() >= self.max_duration {
|
||||
tracing::warn!(
|
||||
"Max duration ({:?}) reached for user '{}' from {}",
|
||||
self.max_duration, self.username, self.ip,
|
||||
);
|
||||
self.state.running.store(false, Ordering::SeqCst);
|
||||
return StopReason::MaxDuration;
|
||||
}
|
||||
|
||||
// Flush current session bytes to DB before checking
|
||||
// (read without reset — totals accumulate, we just need current snapshot)
|
||||
let session_tx = self.state.total_tx_bytes.load(Ordering::Relaxed);
|
||||
let session_rx = self.state.total_rx_bytes.load(Ordering::Relaxed);
|
||||
|
||||
// Temporarily record session bytes so quota check sees them
|
||||
// We use a separate "pending" record that gets finalized at session end
|
||||
let ip_str = self.ip.to_string();
|
||||
|
||||
// Check user quotas
|
||||
match self.check_user_with_session(session_tx, session_rx) {
|
||||
StopReason::Running => {}
|
||||
reason => {
|
||||
tracing::warn!(
|
||||
"Quota exceeded for user '{}' from {}: {} (session: tx={}, rx={})",
|
||||
self.username, self.ip, reason, session_tx, session_rx,
|
||||
);
|
||||
self.state.running.store(false, Ordering::SeqCst);
|
||||
return reason;
|
||||
}
|
||||
}
|
||||
|
||||
// Check IP quotas
|
||||
match self.check_ip_with_session(&ip_str, session_tx, session_rx) {
|
||||
StopReason::Running => {}
|
||||
reason => {
|
||||
tracing::warn!(
|
||||
"IP quota exceeded for {} (user '{}'): {} (session: tx={}, rx={})",
|
||||
self.ip, self.username, reason, session_tx, session_rx,
|
||||
);
|
||||
self.state.running.store(false, Ordering::SeqCst);
|
||||
return reason;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn check_user_with_session(&self, session_tx: u64, session_rx: u64) -> StopReason {
|
||||
let session_total = session_tx + session_rx;
|
||||
|
||||
// Check against quota manager (which reads DB)
|
||||
// The DB has usage from PREVIOUS sessions; we add current session bytes
|
||||
if let Err(e) = self.quota_mgr.check_user(&self.username) {
|
||||
// Already exceeded from previous sessions
|
||||
return match format!("{}", e).as_str() {
|
||||
s if s.contains("daily") => StopReason::UserDailyQuota,
|
||||
s if s.contains("weekly") => StopReason::UserWeeklyQuota,
|
||||
s if s.contains("monthly") => StopReason::UserMonthlyQuota,
|
||||
_ => StopReason::UserDailyQuota,
|
||||
};
|
||||
}
|
||||
|
||||
// Also check if current session PLUS previous usage exceeds quota
|
||||
// (check_user only sees DB, not current session bytes)
|
||||
// This is handled by the quota_mgr.check_user reading from DB,
|
||||
// and we periodically flush to DB during the session.
|
||||
StopReason::Running
|
||||
}
|
||||
|
||||
fn check_ip_with_session(&self, ip_str: &str, session_tx: u64, session_rx: u64) -> StopReason {
|
||||
if let Err(e) = self.quota_mgr.check_ip(&self.ip, Direction::Both) {
|
||||
return match format!("{}", e).as_str() {
|
||||
s if s.contains("IP daily") => StopReason::IpDailyQuota,
|
||||
s if s.contains("IP weekly") => StopReason::IpWeeklyQuota,
|
||||
s if s.contains("IP monthly") => StopReason::IpMonthlyQuota,
|
||||
s if s.contains("connections") => StopReason::IpDailyQuota, // reuse
|
||||
_ => StopReason::IpDailyQuota,
|
||||
};
|
||||
}
|
||||
StopReason::Running
|
||||
}
|
||||
|
||||
/// Flush session bytes to DB. Call periodically and at session end.
|
||||
pub fn flush_to_db(&self) {
|
||||
let tx = self.state.total_tx_bytes.load(Ordering::Relaxed);
|
||||
let rx = self.state.total_rx_bytes.load(Ordering::Relaxed);
|
||||
// From server perspective: tx = outbound (we sent), rx = inbound (we received)
|
||||
self.quota_mgr.record_usage(
|
||||
&self.username,
|
||||
&self.ip.to_string(),
|
||||
rx, // inbound = what we received from client
|
||||
tx, // outbound = what we sent to client
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::user_db::UserDb;
|
||||
use crate::quota::QuotaManager;
|
||||
|
||||
fn setup_test_db() -> (UserDb, QuotaManager) {
|
||||
let db = UserDb::open(":memory:").unwrap();
|
||||
db.ensure_tables().unwrap();
|
||||
db.add_user("testuser", "testpass").unwrap();
|
||||
let qm = QuotaManager::new(
|
||||
db.clone(),
|
||||
1000, // daily: 1000 bytes
|
||||
5000, // weekly
|
||||
10000, // monthly
|
||||
500, // ip daily (combined)
|
||||
2000, // ip weekly (combined)
|
||||
8000, // ip monthly (combined)
|
||||
500, // ip_daily_inbound
|
||||
500, // ip_daily_outbound
|
||||
2000, // ip_weekly_inbound
|
||||
2000, // ip_weekly_outbound
|
||||
8000, // ip_monthly_inbound
|
||||
8000, // ip_monthly_outbound
|
||||
2, // max conn per ip
|
||||
60, // max duration
|
||||
);
|
||||
(db, qm)
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_enforcer_max_duration() {
|
||||
let (db, qm) = setup_test_db();
|
||||
let state = BandwidthState::new();
|
||||
let enforcer = QuotaEnforcer::new(
|
||||
qm, "testuser".into(), "127.0.0.1".parse().unwrap(),
|
||||
state.clone(), 1, 2, // check every 1s, max 2s
|
||||
);
|
||||
let reason = enforcer.run().await;
|
||||
assert_eq!(reason, StopReason::MaxDuration);
|
||||
assert!(!state.running.load(Ordering::Relaxed));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_enforcer_client_disconnect() {
|
||||
let (db, qm) = setup_test_db();
|
||||
let state = BandwidthState::new();
|
||||
let state_clone = state.clone();
|
||||
|
||||
// Stop the test after 500ms
|
||||
tokio::spawn(async move {
|
||||
tokio::time::sleep(Duration::from_millis(500)).await;
|
||||
state_clone.running.store(false, Ordering::SeqCst);
|
||||
});
|
||||
|
||||
let enforcer = QuotaEnforcer::new(
|
||||
qm, "testuser".into(), "127.0.0.1".parse().unwrap(),
|
||||
state, 1, 0, // check every 1s, no max duration
|
||||
);
|
||||
let reason = enforcer.run().await;
|
||||
assert_eq!(reason, StopReason::ClientDisconnected);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_enforcer_user_daily_quota_exceeded() {
|
||||
let (db, qm) = setup_test_db();
|
||||
|
||||
// Pre-fill usage to exceed daily quota (1000 bytes)
|
||||
db.record_usage("testuser", 600, 500).unwrap(); // 1100 > 1000
|
||||
|
||||
let state = BandwidthState::new();
|
||||
let enforcer = QuotaEnforcer::new(
|
||||
qm, "testuser".into(), "127.0.0.1".parse().unwrap(),
|
||||
state.clone(), 1, 0,
|
||||
);
|
||||
let reason = enforcer.run().await;
|
||||
assert_eq!(reason, StopReason::UserDailyQuota);
|
||||
assert!(!state.running.load(Ordering::Relaxed));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_enforcer_ip_daily_quota_exceeded() {
|
||||
let (db, qm) = setup_test_db();
|
||||
|
||||
// Pre-fill IP usage to exceed IP daily quota (500 bytes)
|
||||
db.record_ip_usage("127.0.0.1", 300, 300).unwrap(); // 600 > 500
|
||||
|
||||
let state = BandwidthState::new();
|
||||
let enforcer = QuotaEnforcer::new(
|
||||
qm, "testuser".into(), "127.0.0.1".parse().unwrap(),
|
||||
state.clone(), 1, 0,
|
||||
);
|
||||
let reason = enforcer.run().await;
|
||||
assert_eq!(reason, StopReason::IpDailyQuota);
|
||||
assert!(!state.running.load(Ordering::Relaxed));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_enforcer_under_quota_runs_normally() {
|
||||
let (db, qm) = setup_test_db();
|
||||
|
||||
// Usage well under quota
|
||||
db.record_usage("testuser", 100, 100).unwrap(); // 200 < 1000
|
||||
|
||||
let state = BandwidthState::new();
|
||||
let state_clone = state.clone();
|
||||
|
||||
// Stop after 2s
|
||||
tokio::spawn(async move {
|
||||
tokio::time::sleep(Duration::from_secs(2)).await;
|
||||
state_clone.running.store(false, Ordering::SeqCst);
|
||||
});
|
||||
|
||||
let enforcer = QuotaEnforcer::new(
|
||||
qm, "testuser".into(), "127.0.0.1".parse().unwrap(),
|
||||
state, 1, 0,
|
||||
);
|
||||
let reason = enforcer.run().await;
|
||||
assert_eq!(reason, StopReason::ClientDisconnected);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_enforcer_flush_records_usage() {
|
||||
let (db, qm) = setup_test_db();
|
||||
let state = BandwidthState::new();
|
||||
|
||||
// Simulate some transfer
|
||||
state.total_tx_bytes.store(5000, Ordering::Relaxed);
|
||||
state.total_rx_bytes.store(3000, Ordering::Relaxed);
|
||||
|
||||
let enforcer = QuotaEnforcer::new(
|
||||
qm, "testuser".into(), "127.0.0.1".parse().unwrap(),
|
||||
state, 10, 0,
|
||||
);
|
||||
enforcer.flush_to_db();
|
||||
|
||||
// flush_to_db: total_tx=5000→outbound, total_rx=3000→inbound
|
||||
// quota_mgr.record_usage(inbound=3000, outbound=5000)
|
||||
// db.record_usage(tx=outbound=5000, rx=inbound=3000)
|
||||
let (tx, rx) = db.get_daily_usage("testuser").unwrap();
|
||||
assert_eq!(tx, 5000); // outbound (what server sent)
|
||||
assert_eq!(rx, 3000); // inbound (what server received)
|
||||
|
||||
let (ip_in, ip_out) = db.get_ip_daily_usage("127.0.0.1").unwrap();
|
||||
assert!(ip_in + ip_out > 0, "IP usage should be recorded");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_remaining_budget_calculation() {
|
||||
let (db, qm) = setup_test_db();
|
||||
let ip: IpAddr = "10.0.0.1".parse().unwrap();
|
||||
|
||||
// No usage yet: budget = min(daily=1000, weekly=5000, monthly=10000, ip_daily=500, ...)
|
||||
// IP daily combined = 500 is the smallest
|
||||
let budget = qm.remaining_budget("testuser", &ip);
|
||||
assert_eq!(budget, 500, "budget should be min of all limits (ip_daily=500)");
|
||||
|
||||
// Use record_usage which properly records combined + directional
|
||||
// inbound=200, outbound=200 → combined = 400
|
||||
qm.record_usage("testuser", "10.0.0.1", 200, 200);
|
||||
|
||||
// IP daily combined: 500 - 400 = 100 remaining
|
||||
// IP daily inbound: 500 - 200 = 300 remaining
|
||||
// IP daily outbound: 500 - 200 = 300 remaining
|
||||
// User daily: 1000 - 400 = 600 remaining
|
||||
let budget = qm.remaining_budget("testuser", &ip);
|
||||
assert_eq!(budget, 100, "budget should reflect IP combined remaining (100)");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_budget_zero_when_exhausted() {
|
||||
let (db, qm) = setup_test_db();
|
||||
let ip: IpAddr = "10.0.0.2".parse().unwrap();
|
||||
|
||||
// Exhaust user daily quota (1000 bytes)
|
||||
db.record_usage("testuser", 600, 500).unwrap(); // 1100 > 1000
|
||||
|
||||
let budget = qm.remaining_budget("testuser", &ip);
|
||||
assert_eq!(budget, 0, "budget should be 0 when user daily quota is exhausted");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_byte_budget_stops_transfer() {
|
||||
let state = BandwidthState::new();
|
||||
|
||||
// Set a 1000-byte budget
|
||||
state.set_budget(1000);
|
||||
|
||||
// Spend 500 bytes — should succeed
|
||||
assert!(state.spend_budget(500));
|
||||
|
||||
// Spend another 400 — should succeed (100 remaining)
|
||||
assert!(state.spend_budget(400));
|
||||
|
||||
// Spend 200 — should fail (only 100 remaining)
|
||||
assert!(!state.spend_budget(200));
|
||||
|
||||
// running should be false
|
||||
assert!(!state.running.load(Ordering::Relaxed));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_unlimited_budget_always_succeeds() {
|
||||
let state = BandwidthState::new();
|
||||
// Default budget is u64::MAX (unlimited)
|
||||
|
||||
// Should always succeed
|
||||
for _ in 0..1000 {
|
||||
assert!(state.spend_budget(1_000_000_000));
|
||||
}
|
||||
assert!(state.running.load(Ordering::Relaxed));
|
||||
}
|
||||
}
|
||||
74
src/server_pro/ldap_auth.rs
Normal file
74
src/server_pro/ldap_auth.rs
Normal file
@@ -0,0 +1,74 @@
|
||||
//! LDAP/Active Directory authentication for btest-server-pro.
|
||||
//!
|
||||
//! Authenticates users against an LDAP directory using simple bind.
|
||||
|
||||
use ldap3::{LdapConnAsync, Scope, SearchEntry};
|
||||
|
||||
pub struct LdapConfig {
|
||||
pub url: String,
|
||||
pub base_dn: String,
|
||||
pub bind_dn: Option<String>,
|
||||
pub bind_pass: Option<String>,
|
||||
}
|
||||
|
||||
pub struct LdapAuth {
|
||||
config: LdapConfig,
|
||||
}
|
||||
|
||||
impl LdapAuth {
|
||||
pub fn new(config: LdapConfig) -> Self {
|
||||
Self { config }
|
||||
}
|
||||
|
||||
/// Authenticate a user by attempting an LDAP bind.
|
||||
/// Returns Ok(true) if authentication succeeds.
|
||||
pub async fn authenticate(&self, username: &str, password: &str) -> anyhow::Result<bool> {
|
||||
let (conn, mut ldap) = LdapConnAsync::new(&self.config.url).await?;
|
||||
ldap3::drive!(conn);
|
||||
|
||||
// If service account configured, bind first to search for user DN
|
||||
let user_dn = if let (Some(ref bind_dn), Some(ref bind_pass)) =
|
||||
(&self.config.bind_dn, &self.config.bind_pass)
|
||||
{
|
||||
let result = ldap.simple_bind(bind_dn, bind_pass).await?;
|
||||
if result.rc != 0 {
|
||||
tracing::warn!("LDAP service bind failed: rc={}", result.rc);
|
||||
return Ok(false);
|
||||
}
|
||||
|
||||
// Search for the user
|
||||
let filter = format!(
|
||||
"(&(objectClass=person)(|(uid={})(sAMAccountName={})(cn={})))",
|
||||
username, username, username
|
||||
);
|
||||
let (results, _) = ldap
|
||||
.search(&self.config.base_dn, Scope::Subtree, &filter, vec!["dn"])
|
||||
.await?
|
||||
.success()?;
|
||||
|
||||
if results.is_empty() {
|
||||
tracing::debug!("LDAP user not found: {}", username);
|
||||
return Ok(false);
|
||||
}
|
||||
|
||||
let entry = SearchEntry::construct(results.into_iter().next().unwrap());
|
||||
entry.dn
|
||||
} else {
|
||||
// No service account — construct DN directly
|
||||
format!("uid={},{}", username, self.config.base_dn)
|
||||
};
|
||||
|
||||
// Attempt user bind
|
||||
let result = ldap.simple_bind(&user_dn, password).await?;
|
||||
let success = result.rc == 0;
|
||||
|
||||
if success {
|
||||
tracing::info!("LDAP auth successful for {} (dn={})", username, user_dn);
|
||||
} else {
|
||||
tracing::warn!("LDAP auth failed for {} (dn={}): rc={}", username, user_dn, result.rc);
|
||||
}
|
||||
|
||||
let _ = ldap.unbind().await;
|
||||
Ok(success)
|
||||
}
|
||||
}
|
||||
343
src/server_pro/main.rs
Normal file
343
src/server_pro/main.rs
Normal file
@@ -0,0 +1,343 @@
|
||||
//! btest-server-pro: MikroTik Bandwidth Test server with multi-user, quotas, and LDAP.
|
||||
//!
|
||||
//! This is a superset of the standard `btest` server with additional features:
|
||||
//! - SQLite user database (--users-db)
|
||||
//! - Per-user and per-IP bandwidth quotas (daily/weekly)
|
||||
//! - LDAP/Active Directory authentication (--ldap-url)
|
||||
//! - Rate limiting for public server deployment
|
||||
//!
|
||||
//! Build with: cargo build --release --features pro --bin btest-server-pro
|
||||
|
||||
mod user_db;
|
||||
mod quota;
|
||||
mod enforcer;
|
||||
mod server_loop;
|
||||
mod web;
|
||||
mod ldap_auth;
|
||||
|
||||
use clap::Parser;
|
||||
use tracing_subscriber::EnvFilter;
|
||||
|
||||
#[derive(Parser, Debug)]
|
||||
#[command(
|
||||
name = "btest-server-pro",
|
||||
about = "btest-rs Pro Server: multi-user, quotas, LDAP",
|
||||
version,
|
||||
)]
|
||||
struct Cli {
|
||||
/// Listen port
|
||||
#[arg(short = 'P', long = "port", default_value_t = 2000)]
|
||||
port: u16,
|
||||
|
||||
/// IPv4 listen address
|
||||
#[arg(long = "listen", default_value = "0.0.0.0")]
|
||||
listen_addr: String,
|
||||
|
||||
/// IPv6 listen address (optional)
|
||||
#[arg(long = "listen6")]
|
||||
listen6_addr: Option<String>,
|
||||
|
||||
/// SQLite user database path
|
||||
#[arg(long = "users-db", default_value = "btest-users.db")]
|
||||
users_db: String,
|
||||
|
||||
/// LDAP server URL (e.g., ldap://dc.example.com)
|
||||
#[arg(long = "ldap-url")]
|
||||
ldap_url: Option<String>,
|
||||
|
||||
/// LDAP base DN for user search
|
||||
#[arg(long = "ldap-base-dn")]
|
||||
ldap_base_dn: Option<String>,
|
||||
|
||||
/// LDAP bind DN (for service account)
|
||||
#[arg(long = "ldap-bind-dn")]
|
||||
ldap_bind_dn: Option<String>,
|
||||
|
||||
/// LDAP bind password
|
||||
#[arg(long = "ldap-bind-pass")]
|
||||
ldap_bind_pass: Option<String>,
|
||||
|
||||
/// Default daily quota per user in bytes (0 = unlimited)
|
||||
#[arg(long = "daily-quota", default_value_t = 0)]
|
||||
daily_quota: u64,
|
||||
|
||||
/// Default weekly quota per user in bytes (0 = unlimited)
|
||||
#[arg(long = "weekly-quota", default_value_t = 0)]
|
||||
weekly_quota: u64,
|
||||
|
||||
/// Default monthly quota per user in bytes (0 = unlimited)
|
||||
#[arg(long = "monthly-quota", default_value_t = 0)]
|
||||
monthly_quota: u64,
|
||||
|
||||
/// Daily bandwidth limit per IP in bytes (0 = unlimited)
|
||||
#[arg(long = "ip-daily", default_value_t = 0)]
|
||||
ip_daily: u64,
|
||||
|
||||
/// Weekly bandwidth limit per IP in bytes (0 = unlimited)
|
||||
#[arg(long = "ip-weekly", default_value_t = 0)]
|
||||
ip_weekly: u64,
|
||||
|
||||
/// Monthly bandwidth limit per IP in bytes (0 = unlimited)
|
||||
#[arg(long = "ip-monthly", default_value_t = 0)]
|
||||
ip_monthly: u64,
|
||||
|
||||
/// Maximum concurrent connections per IP (0 = unlimited)
|
||||
#[arg(long = "max-conn-per-ip", default_value_t = 5)]
|
||||
max_conn_per_ip: u32,
|
||||
|
||||
/// Maximum test duration in seconds (0 = unlimited)
|
||||
#[arg(long = "max-duration", default_value_t = 300)]
|
||||
max_duration: u64,
|
||||
|
||||
/// Daily inbound (client→server) limit per IP in bytes (0 = use --ip-daily)
|
||||
#[arg(long = "ip-daily-in", default_value_t = 0)]
|
||||
ip_daily_in: u64,
|
||||
|
||||
/// Daily outbound (server→client) limit per IP in bytes (0 = use --ip-daily)
|
||||
#[arg(long = "ip-daily-out", default_value_t = 0)]
|
||||
ip_daily_out: u64,
|
||||
|
||||
/// Weekly inbound limit per IP in bytes (0 = use --ip-weekly)
|
||||
#[arg(long = "ip-weekly-in", default_value_t = 0)]
|
||||
ip_weekly_in: u64,
|
||||
|
||||
/// Weekly outbound limit per IP in bytes (0 = use --ip-weekly)
|
||||
#[arg(long = "ip-weekly-out", default_value_t = 0)]
|
||||
ip_weekly_out: u64,
|
||||
|
||||
/// Monthly inbound limit per IP in bytes (0 = use --ip-monthly)
|
||||
#[arg(long = "ip-monthly-in", default_value_t = 0)]
|
||||
ip_monthly_in: u64,
|
||||
|
||||
/// Monthly outbound limit per IP in bytes (0 = use --ip-monthly)
|
||||
#[arg(long = "ip-monthly-out", default_value_t = 0)]
|
||||
ip_monthly_out: u64,
|
||||
|
||||
/// How often to check quotas during a test in seconds
|
||||
#[arg(long = "quota-check-interval", default_value_t = 10)]
|
||||
quota_check_interval: u64,
|
||||
|
||||
/// Web dashboard port (0 = disabled)
|
||||
#[arg(long = "web-port", default_value_t = 8080)]
|
||||
web_port: u16,
|
||||
|
||||
/// Shared password for public mode (all users use this password)
|
||||
#[arg(long = "shared-password")]
|
||||
shared_password: Option<String>,
|
||||
|
||||
/// Use EC-SRP5 authentication
|
||||
#[arg(long = "ecsrp5")]
|
||||
ecsrp5: bool,
|
||||
|
||||
/// Syslog server address
|
||||
#[arg(long = "syslog")]
|
||||
syslog: Option<String>,
|
||||
|
||||
/// CSV output file
|
||||
#[arg(long = "csv")]
|
||||
csv: Option<String>,
|
||||
|
||||
/// Verbose logging
|
||||
#[arg(short = 'v', long = "verbose", action = clap::ArgAction::Count)]
|
||||
verbose: u8,
|
||||
|
||||
/// User management subcommand
|
||||
#[command(subcommand)]
|
||||
command: Option<UserCommand>,
|
||||
}
|
||||
|
||||
#[derive(clap::Subcommand, Debug)]
|
||||
enum UserCommand {
|
||||
/// Add a user
|
||||
#[command(name = "useradd")]
|
||||
UserAdd {
|
||||
/// Username
|
||||
username: String,
|
||||
/// Password
|
||||
password: String,
|
||||
},
|
||||
/// Delete a user
|
||||
#[command(name = "userdel")]
|
||||
UserDel {
|
||||
/// Username
|
||||
username: String,
|
||||
},
|
||||
/// List all users
|
||||
#[command(name = "userlist")]
|
||||
UserList,
|
||||
/// Enable/disable a user
|
||||
#[command(name = "userset")]
|
||||
UserSet {
|
||||
/// Username
|
||||
username: String,
|
||||
/// Enable (true/false)
|
||||
#[arg(long)]
|
||||
enabled: Option<bool>,
|
||||
/// Daily quota in bytes
|
||||
#[arg(long)]
|
||||
daily: Option<i64>,
|
||||
/// Weekly quota in bytes
|
||||
#[arg(long)]
|
||||
weekly: Option<i64>,
|
||||
},
|
||||
}
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> anyhow::Result<()> {
|
||||
let cli = Cli::parse();
|
||||
|
||||
let filter = match cli.verbose {
|
||||
0 => "info",
|
||||
1 => "debug",
|
||||
_ => "trace",
|
||||
};
|
||||
tracing_subscriber::fmt()
|
||||
.with_env_filter(
|
||||
EnvFilter::try_from_default_env().unwrap_or_else(|_| EnvFilter::new(filter)),
|
||||
)
|
||||
.with_target(false)
|
||||
.init();
|
||||
|
||||
// Initialize subsystems
|
||||
btest_rs::cpu::start_sampler();
|
||||
|
||||
if let Some(ref syslog_addr) = cli.syslog {
|
||||
if let Err(e) = btest_rs::syslog_logger::init(syslog_addr) {
|
||||
eprintln!("Warning: syslog init failed: {}", e);
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(ref csv_path) = cli.csv {
|
||||
if let Err(e) = btest_rs::csv_output::init(csv_path) {
|
||||
eprintln!("Warning: CSV init failed: {}", e);
|
||||
}
|
||||
}
|
||||
|
||||
// Initialize user database
|
||||
let db = user_db::UserDb::open(&cli.users_db)?;
|
||||
db.ensure_tables()?;
|
||||
|
||||
// Handle user management subcommands (exit after)
|
||||
if let Some(cmd) = &cli.command {
|
||||
match cmd {
|
||||
UserCommand::UserAdd { username, password } => {
|
||||
db.add_user(username, password)?;
|
||||
println!("User '{}' added.", username);
|
||||
return Ok(());
|
||||
}
|
||||
UserCommand::UserDel { username } => {
|
||||
if db.delete_user(username)? {
|
||||
println!("User '{}' deleted.", username);
|
||||
} else {
|
||||
println!("User '{}' not found.", username);
|
||||
}
|
||||
return Ok(());
|
||||
}
|
||||
UserCommand::UserList => {
|
||||
let users = db.list_users()?;
|
||||
if users.is_empty() {
|
||||
println!("No users.");
|
||||
} else {
|
||||
println!("{:<20} {:<10} {:<15} {:<15}", "USERNAME", "ENABLED", "DAILY_QUOTA", "WEEKLY_QUOTA");
|
||||
println!("{}", "-".repeat(60));
|
||||
for u in &users {
|
||||
println!("{:<20} {:<10} {:<15} {:<15}",
|
||||
u.username,
|
||||
if u.enabled { "yes" } else { "no" },
|
||||
if u.daily_quota == 0 { "default".to_string() } else { format!("{}B", u.daily_quota) },
|
||||
if u.weekly_quota == 0 { "default".to_string() } else { format!("{}B", u.weekly_quota) },
|
||||
);
|
||||
}
|
||||
}
|
||||
return Ok(());
|
||||
}
|
||||
UserCommand::UserSet { username, enabled, daily, weekly } => {
|
||||
if let Some(e) = enabled {
|
||||
db.set_user_enabled(username, *e)?;
|
||||
println!("User '{}' enabled={}", username, e);
|
||||
}
|
||||
if daily.is_some() || weekly.is_some() {
|
||||
let d = daily.unwrap_or(0);
|
||||
let w = weekly.unwrap_or(0);
|
||||
db.set_user_quota(username, d, w, 0)?;
|
||||
println!("User '{}' quota: daily={}, weekly={}", username, d, w);
|
||||
}
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
tracing::info!("User database: {} ({} users)", cli.users_db, db.user_count()?);
|
||||
|
||||
// Initialize LDAP if configured
|
||||
if let Some(ref url) = cli.ldap_url {
|
||||
tracing::info!("LDAP configured: {}", url);
|
||||
}
|
||||
|
||||
// Initialize quota manager
|
||||
// Directional flags override combined: --ip-daily-in > --ip-daily > unlimited
|
||||
let or_fallback = |specific: u64, combined: u64| if specific > 0 { specific } else { combined };
|
||||
let quota_mgr = quota::QuotaManager::new(
|
||||
db.clone(),
|
||||
cli.daily_quota,
|
||||
cli.weekly_quota,
|
||||
cli.monthly_quota,
|
||||
cli.ip_daily,
|
||||
cli.ip_weekly,
|
||||
cli.ip_monthly,
|
||||
or_fallback(cli.ip_daily_in, cli.ip_daily),
|
||||
or_fallback(cli.ip_daily_out, cli.ip_daily),
|
||||
or_fallback(cli.ip_weekly_in, cli.ip_weekly),
|
||||
or_fallback(cli.ip_weekly_out, cli.ip_weekly),
|
||||
or_fallback(cli.ip_monthly_in, cli.ip_monthly),
|
||||
or_fallback(cli.ip_monthly_out, cli.ip_monthly),
|
||||
cli.max_conn_per_ip,
|
||||
cli.max_duration,
|
||||
);
|
||||
|
||||
let fmt_q = |v: u64| if v == 0 { "unlimited".to_string() } else { format!("{}B", v) };
|
||||
tracing::info!(
|
||||
"User quotas: daily={}, weekly={}, monthly={}",
|
||||
fmt_q(cli.daily_quota), fmt_q(cli.weekly_quota), fmt_q(cli.monthly_quota),
|
||||
);
|
||||
tracing::info!(
|
||||
"IP quotas: daily={}, weekly={}, monthly={}",
|
||||
fmt_q(cli.ip_daily), fmt_q(cli.ip_weekly), fmt_q(cli.ip_monthly),
|
||||
);
|
||||
tracing::info!(
|
||||
"Limits: max_conn_per_ip={}, max_duration={}s",
|
||||
cli.max_conn_per_ip, cli.max_duration,
|
||||
);
|
||||
|
||||
// Start web dashboard if port > 0
|
||||
if cli.web_port > 0 {
|
||||
let web_db = db.clone();
|
||||
let web_port = cli.web_port;
|
||||
tokio::spawn(async move {
|
||||
tracing::info!("Web dashboard starting on http://0.0.0.0:{}", web_port);
|
||||
let app = web::create_router(web_db);
|
||||
let listener = tokio::net::TcpListener::bind(format!("0.0.0.0:{}", web_port))
|
||||
.await
|
||||
.expect("Failed to bind web dashboard port");
|
||||
if let Err(e) = axum::serve(listener, app).await {
|
||||
tracing::error!("Web dashboard error: {}", e);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
tracing::info!("btest-server-pro starting on port {}", cli.port);
|
||||
|
||||
let v4 = if cli.listen_addr.eq_ignore_ascii_case("none") { None } else { Some(cli.listen_addr) };
|
||||
let v6 = cli.listen6_addr;
|
||||
|
||||
server_loop::run_pro_server(
|
||||
cli.port,
|
||||
cli.ecsrp5,
|
||||
v4, v6,
|
||||
db,
|
||||
quota_mgr,
|
||||
cli.quota_check_interval,
|
||||
).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
470
src/server_pro/quota.rs
Normal file
470
src/server_pro/quota.rs
Normal file
@@ -0,0 +1,470 @@
|
||||
//! Bandwidth quota management for btest-server-pro.
|
||||
//!
|
||||
//! Enforces per-user and per-IP bandwidth limits (daily/weekly/monthly),
|
||||
//! with separate tracking for inbound (client-to-server) and outbound
|
||||
//! (server-to-client) directions.
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::net::IpAddr;
|
||||
use std::sync::{Arc, Mutex};
|
||||
|
||||
use super::user_db::UserDb;
|
||||
|
||||
/// Traffic direction for bandwidth tests.
|
||||
///
|
||||
/// From the **server's** perspective:
|
||||
/// - `Inbound` = client sends data to us (client TX, server RX)
|
||||
/// - `Outbound` = we send data to the client (server TX, client RX)
|
||||
/// - `Both` = bidirectional test
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub enum Direction {
|
||||
Inbound,
|
||||
Outbound,
|
||||
Both,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct QuotaManager {
|
||||
db: UserDb,
|
||||
/// Per-user defaults (0 = unlimited)
|
||||
default_daily: u64,
|
||||
default_weekly: u64,
|
||||
default_monthly: u64,
|
||||
/// Per-IP combined (inbound + outbound) limits (0 = unlimited) — for abuse prevention
|
||||
ip_daily: u64,
|
||||
ip_weekly: u64,
|
||||
ip_monthly: u64,
|
||||
/// Per-IP directional limits (0 = unlimited)
|
||||
ip_daily_inbound: u64,
|
||||
ip_daily_outbound: u64,
|
||||
ip_weekly_inbound: u64,
|
||||
ip_weekly_outbound: u64,
|
||||
ip_monthly_inbound: u64,
|
||||
ip_monthly_outbound: u64,
|
||||
/// Max simultaneous connections from one IP
|
||||
max_conn_per_ip: u32,
|
||||
/// Max test duration in seconds
|
||||
max_duration: u64,
|
||||
active_connections: Arc<Mutex<HashMap<IpAddr, u32>>>,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum QuotaError {
|
||||
DailyExceeded { used: u64, limit: u64 },
|
||||
WeeklyExceeded { used: u64, limit: u64 },
|
||||
MonthlyExceeded { used: u64, limit: u64 },
|
||||
/// Combined (inbound + outbound) IP daily limit exceeded.
|
||||
IpDailyExceeded { used: u64, limit: u64 },
|
||||
/// Combined (inbound + outbound) IP weekly limit exceeded.
|
||||
IpWeeklyExceeded { used: u64, limit: u64 },
|
||||
/// Combined (inbound + outbound) IP monthly limit exceeded.
|
||||
IpMonthlyExceeded { used: u64, limit: u64 },
|
||||
/// Per-direction IP daily limits.
|
||||
IpInboundDailyExceeded { used: u64, limit: u64 },
|
||||
IpOutboundDailyExceeded { used: u64, limit: u64 },
|
||||
/// Per-direction IP weekly limits.
|
||||
IpInboundWeeklyExceeded { used: u64, limit: u64 },
|
||||
IpOutboundWeeklyExceeded { used: u64, limit: u64 },
|
||||
/// Per-direction IP monthly limits.
|
||||
IpInboundMonthlyExceeded { used: u64, limit: u64 },
|
||||
IpOutboundMonthlyExceeded { used: u64, limit: u64 },
|
||||
TooManyConnections { current: u32, limit: u32 },
|
||||
UserDisabled,
|
||||
UserNotFound,
|
||||
}
|
||||
|
||||
impl std::fmt::Display for QuotaError {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
Self::DailyExceeded { used, limit } =>
|
||||
write!(f, "User daily quota exceeded: {}/{} bytes", used, limit),
|
||||
Self::WeeklyExceeded { used, limit } =>
|
||||
write!(f, "User weekly quota exceeded: {}/{} bytes", used, limit),
|
||||
Self::MonthlyExceeded { used, limit } =>
|
||||
write!(f, "User monthly quota exceeded: {}/{} bytes", used, limit),
|
||||
Self::IpDailyExceeded { used, limit } =>
|
||||
write!(f, "IP daily quota exceeded: {}/{} bytes", used, limit),
|
||||
Self::IpWeeklyExceeded { used, limit } =>
|
||||
write!(f, "IP weekly quota exceeded: {}/{} bytes", used, limit),
|
||||
Self::IpMonthlyExceeded { used, limit } =>
|
||||
write!(f, "IP monthly quota exceeded: {}/{} bytes", used, limit),
|
||||
Self::IpInboundDailyExceeded { used, limit } =>
|
||||
write!(f, "IP inbound daily quota exceeded: {}/{} bytes", used, limit),
|
||||
Self::IpOutboundDailyExceeded { used, limit } =>
|
||||
write!(f, "IP outbound daily quota exceeded: {}/{} bytes", used, limit),
|
||||
Self::IpInboundWeeklyExceeded { used, limit } =>
|
||||
write!(f, "IP inbound weekly quota exceeded: {}/{} bytes", used, limit),
|
||||
Self::IpOutboundWeeklyExceeded { used, limit } =>
|
||||
write!(f, "IP outbound weekly quota exceeded: {}/{} bytes", used, limit),
|
||||
Self::IpInboundMonthlyExceeded { used, limit } =>
|
||||
write!(f, "IP inbound monthly quota exceeded: {}/{} bytes", used, limit),
|
||||
Self::IpOutboundMonthlyExceeded { used, limit } =>
|
||||
write!(f, "IP outbound monthly quota exceeded: {}/{} bytes", used, limit),
|
||||
Self::TooManyConnections { current, limit } =>
|
||||
write!(f, "Too many connections from this IP: {}/{}", current, limit),
|
||||
Self::UserDisabled => write!(f, "User account is disabled"),
|
||||
Self::UserNotFound => write!(f, "User not found"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl QuotaManager {
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub fn new(
|
||||
db: UserDb,
|
||||
default_daily: u64,
|
||||
default_weekly: u64,
|
||||
default_monthly: u64,
|
||||
ip_daily: u64,
|
||||
ip_weekly: u64,
|
||||
ip_monthly: u64,
|
||||
ip_daily_inbound: u64,
|
||||
ip_daily_outbound: u64,
|
||||
ip_weekly_inbound: u64,
|
||||
ip_weekly_outbound: u64,
|
||||
ip_monthly_inbound: u64,
|
||||
ip_monthly_outbound: u64,
|
||||
max_conn_per_ip: u32,
|
||||
max_duration: u64,
|
||||
) -> Self {
|
||||
Self {
|
||||
db,
|
||||
default_daily,
|
||||
default_weekly,
|
||||
default_monthly,
|
||||
ip_daily,
|
||||
ip_weekly,
|
||||
ip_monthly,
|
||||
ip_daily_inbound,
|
||||
ip_daily_outbound,
|
||||
ip_weekly_inbound,
|
||||
ip_weekly_outbound,
|
||||
ip_monthly_inbound,
|
||||
ip_monthly_outbound,
|
||||
max_conn_per_ip,
|
||||
max_duration,
|
||||
active_connections: Arc::new(Mutex::new(HashMap::new())),
|
||||
}
|
||||
}
|
||||
|
||||
/// Check if a user is allowed to start a test.
|
||||
pub fn check_user(&self, username: &str) -> Result<(), QuotaError> {
|
||||
let user = self.db.get_user(username)
|
||||
.map_err(|_| QuotaError::UserNotFound)?
|
||||
.ok_or(QuotaError::UserNotFound)?;
|
||||
|
||||
if !user.enabled {
|
||||
return Err(QuotaError::UserDisabled);
|
||||
}
|
||||
|
||||
// Daily
|
||||
let daily_limit = if user.daily_quota > 0 { user.daily_quota as u64 } else { self.default_daily };
|
||||
if daily_limit > 0 {
|
||||
let (tx, rx) = self.db.get_daily_usage(username).unwrap_or((0, 0));
|
||||
let used = tx + rx;
|
||||
if used >= daily_limit {
|
||||
return Err(QuotaError::DailyExceeded { used, limit: daily_limit });
|
||||
}
|
||||
}
|
||||
|
||||
// Weekly
|
||||
let weekly_limit = if user.weekly_quota > 0 { user.weekly_quota as u64 } else { self.default_weekly };
|
||||
if weekly_limit > 0 {
|
||||
let (tx, rx) = self.db.get_weekly_usage(username).unwrap_or((0, 0));
|
||||
let used = tx + rx;
|
||||
if used >= weekly_limit {
|
||||
return Err(QuotaError::WeeklyExceeded { used, limit: weekly_limit });
|
||||
}
|
||||
}
|
||||
|
||||
// Monthly
|
||||
if self.default_monthly > 0 {
|
||||
let (tx, rx) = self.db.get_monthly_usage(username).unwrap_or((0, 0));
|
||||
let used = tx + rx;
|
||||
if used >= self.default_monthly {
|
||||
return Err(QuotaError::MonthlyExceeded { used, limit: self.default_monthly });
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Check if an IP is allowed to connect, considering both combined and
|
||||
/// directional bandwidth quotas.
|
||||
///
|
||||
/// The `direction` parameter indicates which direction the test will use.
|
||||
/// For `Direction::Both`, both inbound and outbound directional limits are
|
||||
/// checked. Combined (total) limits are always checked regardless of
|
||||
/// direction.
|
||||
pub fn check_ip(&self, ip: &IpAddr, direction: Direction) -> Result<(), QuotaError> {
|
||||
// Connection limit
|
||||
if self.max_conn_per_ip > 0 {
|
||||
let conns = self.active_connections.lock().unwrap();
|
||||
let current = conns.get(ip).copied().unwrap_or(0);
|
||||
if current >= self.max_conn_per_ip {
|
||||
return Err(QuotaError::TooManyConnections {
|
||||
current,
|
||||
limit: self.max_conn_per_ip,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
let ip_str = ip.to_string();
|
||||
|
||||
// --- Combined (inbound + outbound) limits ---
|
||||
self.check_ip_combined(&ip_str)?;
|
||||
|
||||
// --- Directional limits ---
|
||||
let check_inbound = matches!(direction, Direction::Inbound | Direction::Both);
|
||||
let check_outbound = matches!(direction, Direction::Outbound | Direction::Both);
|
||||
|
||||
if check_inbound {
|
||||
self.check_ip_inbound(&ip_str)?;
|
||||
}
|
||||
if check_outbound {
|
||||
self.check_ip_outbound(&ip_str)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Check combined (total inbound + outbound) IP limits.
|
||||
fn check_ip_combined(&self, ip_str: &str) -> Result<(), QuotaError> {
|
||||
// IP daily (combined)
|
||||
if self.ip_daily > 0 {
|
||||
let (tx, rx) = self.db.get_ip_daily_usage(ip_str).unwrap_or((0, 0));
|
||||
let used = tx + rx;
|
||||
if used >= self.ip_daily {
|
||||
return Err(QuotaError::IpDailyExceeded { used, limit: self.ip_daily });
|
||||
}
|
||||
}
|
||||
|
||||
// IP weekly (combined)
|
||||
if self.ip_weekly > 0 {
|
||||
let (tx, rx) = self.db.get_ip_weekly_usage(ip_str).unwrap_or((0, 0));
|
||||
let used = tx + rx;
|
||||
if used >= self.ip_weekly {
|
||||
return Err(QuotaError::IpWeeklyExceeded { used, limit: self.ip_weekly });
|
||||
}
|
||||
}
|
||||
|
||||
// IP monthly (combined)
|
||||
if self.ip_monthly > 0 {
|
||||
let (tx, rx) = self.db.get_ip_monthly_usage(ip_str).unwrap_or((0, 0));
|
||||
let used = tx + rx;
|
||||
if used >= self.ip_monthly {
|
||||
return Err(QuotaError::IpMonthlyExceeded { used, limit: self.ip_monthly });
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Check inbound-only (client sends to us) IP limits.
|
||||
fn check_ip_inbound(&self, ip_str: &str) -> Result<(), QuotaError> {
|
||||
// Daily inbound
|
||||
if self.ip_daily_inbound > 0 {
|
||||
let used = self.db.get_ip_daily_inbound(ip_str).unwrap_or(0);
|
||||
if used >= self.ip_daily_inbound {
|
||||
return Err(QuotaError::IpInboundDailyExceeded {
|
||||
used,
|
||||
limit: self.ip_daily_inbound,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// Weekly inbound
|
||||
if self.ip_weekly_inbound > 0 {
|
||||
let used = self.db.get_ip_weekly_inbound(ip_str).unwrap_or(0);
|
||||
if used >= self.ip_weekly_inbound {
|
||||
return Err(QuotaError::IpInboundWeeklyExceeded {
|
||||
used,
|
||||
limit: self.ip_weekly_inbound,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// Monthly inbound
|
||||
if self.ip_monthly_inbound > 0 {
|
||||
let used = self.db.get_ip_monthly_inbound(ip_str).unwrap_or(0);
|
||||
if used >= self.ip_monthly_inbound {
|
||||
return Err(QuotaError::IpInboundMonthlyExceeded {
|
||||
used,
|
||||
limit: self.ip_monthly_inbound,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Check outbound-only (we send to client) IP limits.
|
||||
fn check_ip_outbound(&self, ip_str: &str) -> Result<(), QuotaError> {
|
||||
// Daily outbound
|
||||
if self.ip_daily_outbound > 0 {
|
||||
let used = self.db.get_ip_daily_outbound(ip_str).unwrap_or(0);
|
||||
if used >= self.ip_daily_outbound {
|
||||
return Err(QuotaError::IpOutboundDailyExceeded {
|
||||
used,
|
||||
limit: self.ip_daily_outbound,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// Weekly outbound
|
||||
if self.ip_weekly_outbound > 0 {
|
||||
let used = self.db.get_ip_weekly_outbound(ip_str).unwrap_or(0);
|
||||
if used >= self.ip_weekly_outbound {
|
||||
return Err(QuotaError::IpOutboundWeeklyExceeded {
|
||||
used,
|
||||
limit: self.ip_weekly_outbound,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// Monthly outbound
|
||||
if self.ip_monthly_outbound > 0 {
|
||||
let used = self.db.get_ip_monthly_outbound(ip_str).unwrap_or(0);
|
||||
if used >= self.ip_monthly_outbound {
|
||||
return Err(QuotaError::IpOutboundMonthlyExceeded {
|
||||
used,
|
||||
limit: self.ip_monthly_outbound,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn connect(&self, ip: &IpAddr) {
|
||||
let mut conns = self.active_connections.lock().unwrap();
|
||||
*conns.entry(*ip).or_insert(0) += 1;
|
||||
}
|
||||
|
||||
pub fn disconnect(&self, ip: &IpAddr) {
|
||||
let mut conns = self.active_connections.lock().unwrap();
|
||||
if let Some(count) = conns.get_mut(ip) {
|
||||
*count = count.saturating_sub(1);
|
||||
if *count == 0 {
|
||||
conns.remove(ip);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Record usage after a test completes (both user and IP), with separate
|
||||
/// inbound and outbound byte counts.
|
||||
///
|
||||
/// - `inbound_bytes`: bytes the client sent to us (server RX).
|
||||
/// - `outbound_bytes`: bytes we sent to the client (server TX).
|
||||
///
|
||||
/// Both the combined user/IP usage and directional IP usage are recorded.
|
||||
pub fn record_usage(
|
||||
&self,
|
||||
username: &str,
|
||||
ip: &str,
|
||||
inbound_bytes: u64,
|
||||
outbound_bytes: u64,
|
||||
) {
|
||||
// Record combined user usage (tx/rx from the server's perspective:
|
||||
// tx = outbound, rx = inbound).
|
||||
if let Err(e) = self.db.record_usage(username, outbound_bytes, inbound_bytes) {
|
||||
tracing::error!("Failed to record user usage for {}: {}", username, e);
|
||||
}
|
||||
|
||||
// Record IP usage — record_ip_usage already writes both the
|
||||
// inbound_bytes and outbound_bytes columns in one operation.
|
||||
// Do NOT also call record_ip_inbound_usage/record_ip_outbound_usage
|
||||
// as they update the same columns and would double-count.
|
||||
if let Err(e) = self.db.record_ip_usage(ip, outbound_bytes, inbound_bytes) {
|
||||
tracing::error!("Failed to record IP usage for {}: {}", ip, e);
|
||||
}
|
||||
}
|
||||
|
||||
/// Calculate the remaining byte budget for a user+IP combination.
|
||||
/// Returns the minimum remaining quota across all applicable limits.
|
||||
/// Used to set `BandwidthState::byte_budget` before a test starts,
|
||||
/// preventing overshoot beyond quota boundaries.
|
||||
pub fn remaining_budget(&self, username: &str, ip: &IpAddr) -> u64 {
|
||||
let mut budget = u64::MAX;
|
||||
let ip_str = ip.to_string();
|
||||
|
||||
// Helper: min that ignores 0 (unlimited)
|
||||
let cap = |budget: &mut u64, limit: u64, used: u64| {
|
||||
if limit > 0 {
|
||||
let remaining = limit.saturating_sub(used);
|
||||
*budget = (*budget).min(remaining);
|
||||
}
|
||||
};
|
||||
|
||||
// User quotas (combined tx+rx)
|
||||
if let Ok(Some(user)) = self.db.get_user(username) {
|
||||
let daily_limit = if user.daily_quota > 0 { user.daily_quota as u64 } else { self.default_daily };
|
||||
if daily_limit > 0 {
|
||||
let (tx, rx) = self.db.get_daily_usage(username).unwrap_or((0, 0));
|
||||
cap(&mut budget, daily_limit, tx + rx);
|
||||
}
|
||||
|
||||
let weekly_limit = if user.weekly_quota > 0 { user.weekly_quota as u64 } else { self.default_weekly };
|
||||
if weekly_limit > 0 {
|
||||
let (tx, rx) = self.db.get_weekly_usage(username).unwrap_or((0, 0));
|
||||
cap(&mut budget, weekly_limit, tx + rx);
|
||||
}
|
||||
|
||||
if self.default_monthly > 0 {
|
||||
let (tx, rx) = self.db.get_monthly_usage(username).unwrap_or((0, 0));
|
||||
cap(&mut budget, self.default_monthly, tx + rx);
|
||||
}
|
||||
}
|
||||
|
||||
// IP combined quotas
|
||||
if self.ip_daily > 0 {
|
||||
let (tx, rx) = self.db.get_ip_daily_usage(&ip_str).unwrap_or((0, 0));
|
||||
cap(&mut budget, self.ip_daily, tx + rx);
|
||||
}
|
||||
if self.ip_weekly > 0 {
|
||||
let (tx, rx) = self.db.get_ip_weekly_usage(&ip_str).unwrap_or((0, 0));
|
||||
cap(&mut budget, self.ip_weekly, tx + rx);
|
||||
}
|
||||
if self.ip_monthly > 0 {
|
||||
let (tx, rx) = self.db.get_ip_monthly_usage(&ip_str).unwrap_or((0, 0));
|
||||
cap(&mut budget, self.ip_monthly, tx + rx);
|
||||
}
|
||||
|
||||
// IP directional quotas — use inbound + outbound as combined ceiling
|
||||
if self.ip_daily_inbound > 0 {
|
||||
let used = self.db.get_ip_daily_inbound(&ip_str).unwrap_or(0);
|
||||
cap(&mut budget, self.ip_daily_inbound, used);
|
||||
}
|
||||
if self.ip_daily_outbound > 0 {
|
||||
let used = self.db.get_ip_daily_outbound(&ip_str).unwrap_or(0);
|
||||
cap(&mut budget, self.ip_daily_outbound, used);
|
||||
}
|
||||
if self.ip_weekly_inbound > 0 {
|
||||
let used = self.db.get_ip_weekly_inbound(&ip_str).unwrap_or(0);
|
||||
cap(&mut budget, self.ip_weekly_inbound, used);
|
||||
}
|
||||
if self.ip_weekly_outbound > 0 {
|
||||
let used = self.db.get_ip_weekly_outbound(&ip_str).unwrap_or(0);
|
||||
cap(&mut budget, self.ip_weekly_outbound, used);
|
||||
}
|
||||
if self.ip_monthly_inbound > 0 {
|
||||
let used = self.db.get_ip_monthly_inbound(&ip_str).unwrap_or(0);
|
||||
cap(&mut budget, self.ip_monthly_inbound, used);
|
||||
}
|
||||
if self.ip_monthly_outbound > 0 {
|
||||
let used = self.db.get_ip_monthly_outbound(&ip_str).unwrap_or(0);
|
||||
cap(&mut budget, self.ip_monthly_outbound, used);
|
||||
}
|
||||
|
||||
budget
|
||||
}
|
||||
|
||||
pub fn max_duration(&self) -> u64 {
|
||||
self.max_duration
|
||||
}
|
||||
|
||||
pub fn active_connections_count(&self, ip: &IpAddr) -> u32 {
|
||||
let conns = self.active_connections.lock().unwrap();
|
||||
conns.get(ip).copied().unwrap_or(0)
|
||||
}
|
||||
}
|
||||
449
src/server_pro/server_loop.rs
Normal file
449
src/server_pro/server_loop.rs
Normal file
@@ -0,0 +1,449 @@
|
||||
//! Enhanced server loop with quota enforcement.
|
||||
//!
|
||||
//! Wraps the standard btest server connection handler with:
|
||||
//! - Pre-connection IP/user quota checks
|
||||
//! - MD5 challenge-response auth against user DB
|
||||
//! - TCP multi-connection session support
|
||||
//! - Mid-session quota enforcement via QuotaEnforcer
|
||||
//! - Post-session usage recording
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::net::SocketAddr;
|
||||
use std::sync::Arc;
|
||||
|
||||
use tokio::io::{AsyncReadExt, AsyncWriteExt};
|
||||
use tokio::net::{TcpListener, TcpStream};
|
||||
use tokio::sync::Mutex;
|
||||
|
||||
use btest_rs::protocol::*;
|
||||
use btest_rs::bandwidth::BandwidthState;
|
||||
|
||||
use super::enforcer::{QuotaEnforcer, StopReason};
|
||||
use super::quota::{Direction, QuotaManager};
|
||||
use super::user_db::UserDb;
|
||||
|
||||
/// Pending TCP multi-connection session.
|
||||
struct TcpSession {
|
||||
peer_ip: std::net::IpAddr,
|
||||
username: String,
|
||||
cmd: Command,
|
||||
streams: Vec<TcpStream>,
|
||||
expected: u8,
|
||||
}
|
||||
|
||||
type SessionMap = Arc<Mutex<HashMap<u16, TcpSession>>>;
|
||||
|
||||
/// Run the pro server with quota enforcement.
|
||||
pub async fn run_pro_server(
|
||||
port: u16,
|
||||
_ecsrp5: bool,
|
||||
listen_v4: Option<String>,
|
||||
listen_v6: Option<String>,
|
||||
db: UserDb,
|
||||
quota_mgr: QuotaManager,
|
||||
quota_check_interval: u64,
|
||||
) -> anyhow::Result<()> {
|
||||
let v4_listener = if let Some(ref addr) = listen_v4 {
|
||||
let bind_addr = format!("{}:{}", addr, port);
|
||||
Some(TcpListener::bind(&bind_addr).await?)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let v6_listener = if let Some(ref addr) = listen_v6 {
|
||||
let bind_addr = format!("[{}]:{}", addr, port);
|
||||
Some(TcpListener::bind(&bind_addr).await?)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
if v4_listener.is_none() && v6_listener.is_none() {
|
||||
anyhow::bail!("No listeners bound");
|
||||
}
|
||||
|
||||
let sessions: SessionMap = Arc::new(Mutex::new(HashMap::new()));
|
||||
|
||||
tracing::info!("btest-server-pro ready, accepting connections");
|
||||
|
||||
loop {
|
||||
let (stream, peer) = match (&v4_listener, &v6_listener) {
|
||||
(Some(v4), Some(v6)) => {
|
||||
tokio::select! {
|
||||
r = v4.accept() => r?,
|
||||
r = v6.accept() => r?,
|
||||
}
|
||||
}
|
||||
(Some(v4), None) => v4.accept().await?,
|
||||
(None, Some(v6)) => v6.accept().await?,
|
||||
_ => unreachable!(),
|
||||
};
|
||||
|
||||
tracing::info!("New connection from {}", peer);
|
||||
|
||||
let db = db.clone();
|
||||
let qm = quota_mgr.clone();
|
||||
let interval = quota_check_interval;
|
||||
let sess = sessions.clone();
|
||||
|
||||
tokio::spawn(async move {
|
||||
let is_primary = match handle_pro_connection(stream, peer, db, qm.clone(), interval, sess).await {
|
||||
Ok(Some((username, stop_reason, tx, rx))) => {
|
||||
tracing::info!(
|
||||
"Client {} (user '{}') finished: {} (tx={}, rx={})",
|
||||
peer, username, stop_reason, tx, rx,
|
||||
);
|
||||
btest_rs::syslog_logger::test_end(
|
||||
&peer.to_string(), "btest", &format!("{}", stop_reason),
|
||||
tx, rx, 0, 0,
|
||||
);
|
||||
true
|
||||
}
|
||||
Ok(None) => false, // secondary connection or pending multi-conn
|
||||
Err(e) => {
|
||||
tracing::error!("Client {} error: {}", peer, e);
|
||||
true
|
||||
}
|
||||
};
|
||||
// Only decrement connection count for primary connections
|
||||
if is_primary {
|
||||
qm.disconnect(&peer.ip());
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
/// Handle a single TCP connection. Returns None for secondary multi-conn joins.
|
||||
async fn handle_pro_connection(
|
||||
mut stream: TcpStream,
|
||||
peer: SocketAddr,
|
||||
db: UserDb,
|
||||
quota_mgr: QuotaManager,
|
||||
quota_check_interval: u64,
|
||||
sessions: SessionMap,
|
||||
) -> anyhow::Result<Option<(String, StopReason, u64, u64)>> {
|
||||
stream.set_nodelay(true)?;
|
||||
|
||||
// HELLO
|
||||
stream.write_all(&HELLO).await?;
|
||||
|
||||
// Read command (or session token for secondary connections)
|
||||
let mut cmd_buf = [0u8; 16];
|
||||
stream.read_exact(&mut cmd_buf).await?;
|
||||
|
||||
// Check if this is a secondary connection joining an existing TCP session
|
||||
// Secondary connections send [HI, LO, ...] matching an existing session token
|
||||
{
|
||||
let potential_token = u16::from_be_bytes([cmd_buf[0], cmd_buf[1]]);
|
||||
let mut map = sessions.lock().await;
|
||||
if let Some(session) = map.get_mut(&potential_token) {
|
||||
if session.peer_ip == peer.ip()
|
||||
&& session.streams.len() < session.expected as usize
|
||||
{
|
||||
tracing::info!(
|
||||
"Secondary connection from {} joining session (token={:04x}, {}/{})",
|
||||
peer, potential_token,
|
||||
session.streams.len() + 1, session.expected,
|
||||
);
|
||||
|
||||
// Auth the secondary connection with same token response
|
||||
let ok = [0x01, cmd_buf[0], cmd_buf[1], 0x00];
|
||||
stream.write_all(&ok).await?;
|
||||
stream.flush().await?;
|
||||
|
||||
session.streams.push(stream);
|
||||
|
||||
// If all connections have joined, start the test
|
||||
if session.streams.len() >= session.expected as usize {
|
||||
let session = map.remove(&potential_token).unwrap();
|
||||
let db2 = db.clone();
|
||||
let qm2 = quota_mgr.clone();
|
||||
tokio::spawn(async move {
|
||||
match run_pro_multiconn_test(
|
||||
session.streams, session.cmd, peer,
|
||||
&session.username, db2, qm2, quota_check_interval,
|
||||
).await {
|
||||
Ok((stop, tx, rx)) => {
|
||||
tracing::info!(
|
||||
"Multi-conn {} (user '{}') finished: {} (tx={}, rx={})",
|
||||
peer, session.username, stop, tx, rx,
|
||||
);
|
||||
}
|
||||
Err(e) => {
|
||||
tracing::error!("Multi-conn {} error: {}", peer, e);
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
return Ok(None);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Primary connection — check IP quota/connection limit now
|
||||
if let Err(e) = quota_mgr.check_ip(&peer.ip(), Direction::Both) {
|
||||
tracing::warn!("Rejected {} — {}", peer, e);
|
||||
btest_rs::syslog_logger::auth_failure(
|
||||
&peer.to_string(), "-", "-", &format!("{}", e),
|
||||
);
|
||||
return Ok(None);
|
||||
}
|
||||
quota_mgr.connect(&peer.ip());
|
||||
|
||||
let cmd = Command::deserialize(&cmd_buf);
|
||||
|
||||
tracing::info!(
|
||||
"Client {} command: proto={} dir={} conn_count={} tx_size={}",
|
||||
peer,
|
||||
if cmd.is_udp() { "UDP" } else { "TCP" },
|
||||
match cmd.direction { CMD_DIR_RX => "RX", CMD_DIR_TX => "TX", _ => "BOTH" },
|
||||
cmd.tcp_conn_count,
|
||||
cmd.tx_size,
|
||||
);
|
||||
|
||||
// Build auth OK response with session token for multi-connection
|
||||
let is_tcp_multi = !cmd.is_udp() && cmd.tcp_conn_count > 0;
|
||||
let session_token: u16 = if is_tcp_multi {
|
||||
rand::random::<u16>() | 0x0101 // ensure both bytes non-zero
|
||||
} else {
|
||||
0
|
||||
};
|
||||
let ok_response: [u8; 4] = if is_tcp_multi {
|
||||
[0x01, (session_token >> 8) as u8, (session_token & 0xFF) as u8, 0x00]
|
||||
} else {
|
||||
AUTH_OK
|
||||
};
|
||||
|
||||
// Authenticate — MD5 challenge-response against DB
|
||||
stream.write_all(&AUTH_REQUIRED).await?;
|
||||
let challenge = btest_rs::auth::generate_challenge();
|
||||
stream.write_all(&challenge).await?;
|
||||
stream.flush().await?;
|
||||
|
||||
let mut response = [0u8; 48];
|
||||
stream.read_exact(&mut response).await?;
|
||||
|
||||
let received_hash = &response[0..16];
|
||||
let received_user = &response[16..48];
|
||||
|
||||
let user_end = received_user.iter().position(|&b| b == 0).unwrap_or(32);
|
||||
let username = std::str::from_utf8(&received_user[..user_end])
|
||||
.unwrap_or("")
|
||||
.to_string();
|
||||
|
||||
// Verify against DB
|
||||
let user = db.get_user(&username)?;
|
||||
match user {
|
||||
None => {
|
||||
tracing::warn!("Auth failed: user '{}' not found", username);
|
||||
stream.write_all(&AUTH_FAILED).await?;
|
||||
btest_rs::syslog_logger::auth_failure(
|
||||
&peer.to_string(), &username, "md5", "user not found",
|
||||
);
|
||||
anyhow::bail!("User not found");
|
||||
}
|
||||
Some(u) => {
|
||||
if !u.enabled {
|
||||
tracing::warn!("Auth failed: user '{}' is disabled", username);
|
||||
stream.write_all(&AUTH_FAILED).await?;
|
||||
btest_rs::syslog_logger::auth_failure(
|
||||
&peer.to_string(), &username, "md5", "user disabled",
|
||||
);
|
||||
anyhow::bail!("User disabled");
|
||||
}
|
||||
|
||||
// Verify MD5 hash against stored raw password
|
||||
if let Ok(Some(raw_pass)) = db.get_password(&username) {
|
||||
let expected_hash = btest_rs::auth::compute_auth_hash(&raw_pass, &challenge);
|
||||
if received_hash != expected_hash {
|
||||
tracing::warn!("Auth failed: password mismatch for user '{}'", username);
|
||||
stream.write_all(&AUTH_FAILED).await?;
|
||||
btest_rs::syslog_logger::auth_failure(
|
||||
&peer.to_string(), &username, "md5", "password mismatch",
|
||||
);
|
||||
anyhow::bail!("Auth failed");
|
||||
}
|
||||
}
|
||||
// If no raw password stored, accept (backwards compat with old DB entries)
|
||||
|
||||
stream.write_all(&ok_response).await?;
|
||||
stream.flush().await?;
|
||||
|
||||
tracing::info!("Auth successful for user '{}'", username);
|
||||
btest_rs::syslog_logger::auth_success(
|
||||
&peer.to_string(), &username, "md5",
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// Check user quota before starting test
|
||||
if let Err(e) = quota_mgr.check_user(&username) {
|
||||
tracing::warn!("Quota check failed for '{}': {}", username, e);
|
||||
btest_rs::syslog_logger::auth_failure(
|
||||
&peer.to_string(), &username, "quota", &format!("{}", e),
|
||||
);
|
||||
return Ok(Some((username, StopReason::UserDailyQuota, 0, 0)));
|
||||
}
|
||||
|
||||
// TCP multi-connection: register session and wait for secondary connections
|
||||
if is_tcp_multi {
|
||||
tracing::info!(
|
||||
"TCP multi-connection: waiting for {} connections (token={:04x})",
|
||||
cmd.tcp_conn_count, session_token,
|
||||
);
|
||||
let mut map = sessions.lock().await;
|
||||
map.insert(session_token, TcpSession {
|
||||
peer_ip: peer.ip(),
|
||||
username: username.clone(),
|
||||
cmd: cmd.clone(),
|
||||
streams: vec![stream],
|
||||
expected: cmd.tcp_conn_count, // tcp_conn_count includes the primary
|
||||
});
|
||||
// The test will be started when all connections join (in the secondary handler above)
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
// Single-connection test
|
||||
run_pro_single_test(stream, cmd, peer, &username, db, quota_mgr, quota_check_interval).await
|
||||
.map(|(stop, tx, rx)| Some((username, stop, tx, rx)))
|
||||
}
|
||||
|
||||
/// Run a single-connection bandwidth test with quota enforcement.
|
||||
async fn run_pro_single_test(
|
||||
stream: TcpStream,
|
||||
cmd: Command,
|
||||
peer: SocketAddr,
|
||||
username: &str,
|
||||
db: UserDb,
|
||||
quota_mgr: QuotaManager,
|
||||
quota_check_interval: u64,
|
||||
) -> anyhow::Result<(StopReason, u64, u64)> {
|
||||
let proto_str = if cmd.is_udp() { "UDP" } else { "TCP" };
|
||||
let dir_str = match cmd.direction {
|
||||
CMD_DIR_RX => "RX", CMD_DIR_TX => "TX", _ => "BOTH"
|
||||
};
|
||||
let session_id = db.start_session(
|
||||
username, &peer.ip().to_string(), proto_str, dir_str,
|
||||
)?;
|
||||
|
||||
btest_rs::syslog_logger::test_start(
|
||||
&peer.to_string(), proto_str, dir_str, cmd.tcp_conn_count,
|
||||
);
|
||||
|
||||
let state = BandwidthState::new();
|
||||
|
||||
// Set byte budget
|
||||
let budget = quota_mgr.remaining_budget(username, &peer.ip());
|
||||
if budget < u64::MAX {
|
||||
state.set_budget(budget);
|
||||
tracing::info!("Byte budget for '{}' from {}: {} bytes", username, peer.ip(), budget);
|
||||
}
|
||||
|
||||
let enforcer = QuotaEnforcer::new(
|
||||
quota_mgr.clone(),
|
||||
username.to_string(),
|
||||
peer.ip(),
|
||||
state.clone(),
|
||||
quota_check_interval,
|
||||
quota_mgr.max_duration(),
|
||||
);
|
||||
|
||||
let enforcer_state = state.clone();
|
||||
let enforcer_handle = tokio::spawn(async move {
|
||||
enforcer.run().await
|
||||
});
|
||||
|
||||
static UDP_PORT_OFFSET: std::sync::atomic::AtomicU16 = std::sync::atomic::AtomicU16::new(0);
|
||||
|
||||
let mut stream_mut = stream;
|
||||
let test_result = if cmd.is_udp() {
|
||||
let offset = UDP_PORT_OFFSET.fetch_add(1, std::sync::atomic::Ordering::SeqCst);
|
||||
let udp_port = btest_rs::protocol::BTEST_UDP_PORT_START + offset;
|
||||
btest_rs::server::run_udp_test(
|
||||
&mut stream_mut, peer, &cmd, state.clone(), udp_port,
|
||||
).await
|
||||
} else {
|
||||
btest_rs::server::run_tcp_test(stream_mut, cmd.clone(), state.clone()).await
|
||||
};
|
||||
|
||||
enforcer_state.running.store(false, std::sync::atomic::Ordering::SeqCst);
|
||||
let stop_reason = enforcer_handle.await.unwrap_or(StopReason::ClientDisconnected);
|
||||
|
||||
let final_reason = match &test_result {
|
||||
Ok(_) => {
|
||||
if stop_reason == StopReason::ClientDisconnected {
|
||||
StopReason::ClientDisconnected
|
||||
} else {
|
||||
stop_reason
|
||||
}
|
||||
}
|
||||
Err(_) => StopReason::ClientDisconnected,
|
||||
};
|
||||
|
||||
let (total_tx, total_rx, _, _) = state.summary();
|
||||
quota_mgr.record_usage(username, &peer.ip().to_string(), total_tx, total_rx);
|
||||
db.end_session(session_id, total_tx, total_rx)?;
|
||||
|
||||
Ok((final_reason, total_tx, total_rx))
|
||||
}
|
||||
|
||||
/// Run a TCP multi-connection test with all streams collected.
|
||||
/// Delegates to the standard multi-conn handler which correctly manages
|
||||
/// TX+status injection for bidirectional mode.
|
||||
async fn run_pro_multiconn_test(
|
||||
streams: Vec<TcpStream>,
|
||||
cmd: Command,
|
||||
peer: SocketAddr,
|
||||
username: &str,
|
||||
db: UserDb,
|
||||
quota_mgr: QuotaManager,
|
||||
quota_check_interval: u64,
|
||||
) -> anyhow::Result<(StopReason, u64, u64)> {
|
||||
let dir_str = match cmd.direction {
|
||||
CMD_DIR_RX => "RX", CMD_DIR_TX => "TX", _ => "BOTH"
|
||||
};
|
||||
let session_id = db.start_session(
|
||||
username, &peer.ip().to_string(), "TCP", dir_str,
|
||||
)?;
|
||||
|
||||
tracing::info!(
|
||||
"Starting TCP multi-conn test: {} streams, dir={}",
|
||||
streams.len(), dir_str,
|
||||
);
|
||||
|
||||
let state = BandwidthState::new();
|
||||
|
||||
let budget = quota_mgr.remaining_budget(username, &peer.ip());
|
||||
if budget < u64::MAX {
|
||||
state.set_budget(budget);
|
||||
}
|
||||
|
||||
let enforcer = QuotaEnforcer::new(
|
||||
quota_mgr.clone(),
|
||||
username.to_string(),
|
||||
peer.ip(),
|
||||
state.clone(),
|
||||
quota_check_interval,
|
||||
quota_mgr.max_duration(),
|
||||
);
|
||||
|
||||
let enforcer_state = state.clone();
|
||||
let enforcer_handle = tokio::spawn(async move {
|
||||
enforcer.run().await
|
||||
});
|
||||
|
||||
// Use the standard multi-connection handler which correctly handles
|
||||
// all direction modes (TX, RX, BOTH with status injection)
|
||||
let _test_result = btest_rs::server::run_tcp_multiconn_test(
|
||||
streams, cmd, state.clone(),
|
||||
).await;
|
||||
|
||||
enforcer_state.running.store(false, std::sync::atomic::Ordering::SeqCst);
|
||||
let stop_reason = enforcer_handle.await.unwrap_or(StopReason::ClientDisconnected);
|
||||
|
||||
let (total_tx, total_rx, _, _) = state.summary();
|
||||
quota_mgr.record_usage(username, &peer.ip().to_string(), total_tx, total_rx);
|
||||
db.end_session(session_id, total_tx, total_rx)?;
|
||||
|
||||
Ok((stop_reason, total_tx, total_rx))
|
||||
}
|
||||
641
src/server_pro/user_db.rs
Normal file
641
src/server_pro/user_db.rs
Normal file
@@ -0,0 +1,641 @@
|
||||
//! SQLite-based user database for btest-server-pro.
|
||||
//!
|
||||
//! Stores users with credentials, quotas, and usage tracking.
|
||||
|
||||
use rusqlite::{Connection, params};
|
||||
use std::sync::{Arc, Mutex};
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct UserDb {
|
||||
conn: Arc<Mutex<Connection>>,
|
||||
path: Arc<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct User {
|
||||
pub id: i64,
|
||||
pub username: String,
|
||||
pub password_hash: String, // stored as hex of SHA256(username:password)
|
||||
pub daily_quota: i64, // 0 = use default
|
||||
pub weekly_quota: i64, // 0 = use default
|
||||
pub enabled: bool,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct UsageRecord {
|
||||
pub username: String,
|
||||
pub date: String, // YYYY-MM-DD
|
||||
pub tx_bytes: u64,
|
||||
pub rx_bytes: u64,
|
||||
pub test_count: u32,
|
||||
}
|
||||
|
||||
/// Per-second bandwidth interval data for graphing.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct IntervalData {
|
||||
pub interval_num: i32,
|
||||
pub tx_mbps: f64,
|
||||
pub rx_mbps: f64,
|
||||
pub local_cpu: i32,
|
||||
pub remote_cpu: i32,
|
||||
pub lost: i64,
|
||||
}
|
||||
|
||||
/// Summary of a single test session.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct SessionSummary {
|
||||
pub id: i64,
|
||||
pub started_at: String,
|
||||
pub ended_at: Option<String>,
|
||||
pub protocol: String,
|
||||
pub direction: String,
|
||||
pub tx_bytes: u64,
|
||||
pub rx_bytes: u64,
|
||||
}
|
||||
|
||||
/// Aggregate statistics for an IP address.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct IpStats {
|
||||
pub total_tests: u64,
|
||||
pub total_inbound: u64,
|
||||
pub total_outbound: u64,
|
||||
pub avg_tx_mbps: f64,
|
||||
pub avg_rx_mbps: f64,
|
||||
}
|
||||
|
||||
impl UserDb {
|
||||
pub fn open(path: &str) -> anyhow::Result<Self> {
|
||||
let conn = Connection::open(path)?;
|
||||
conn.execute_batch("PRAGMA journal_mode=WAL; PRAGMA busy_timeout=5000;")?;
|
||||
Ok(Self {
|
||||
conn: Arc::new(Mutex::new(conn)),
|
||||
path: Arc::new(path.to_string()),
|
||||
})
|
||||
}
|
||||
|
||||
/// Return the database file path.
|
||||
pub fn path(&self) -> &str {
|
||||
&self.path
|
||||
}
|
||||
|
||||
pub fn ensure_tables(&self) -> anyhow::Result<()> {
|
||||
let conn = self.conn.lock().unwrap();
|
||||
conn.execute_batch("
|
||||
CREATE TABLE IF NOT EXISTS users (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
username TEXT UNIQUE NOT NULL,
|
||||
password_hash TEXT NOT NULL,
|
||||
daily_quota INTEGER DEFAULT 0,
|
||||
weekly_quota INTEGER DEFAULT 0,
|
||||
enabled INTEGER DEFAULT 1,
|
||||
created_at TEXT DEFAULT (datetime('now'))
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS usage (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
username TEXT NOT NULL,
|
||||
date TEXT NOT NULL,
|
||||
tx_bytes INTEGER DEFAULT 0,
|
||||
rx_bytes INTEGER DEFAULT 0,
|
||||
test_count INTEGER DEFAULT 0,
|
||||
UNIQUE(username, date)
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS ip_usage (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
ip TEXT NOT NULL,
|
||||
date TEXT NOT NULL,
|
||||
inbound_bytes INTEGER DEFAULT 0,
|
||||
outbound_bytes INTEGER DEFAULT 0,
|
||||
test_count INTEGER DEFAULT 0,
|
||||
UNIQUE(ip, date)
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS sessions (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
username TEXT NOT NULL,
|
||||
peer_ip TEXT NOT NULL,
|
||||
started_at TEXT DEFAULT (datetime('now')),
|
||||
ended_at TEXT,
|
||||
tx_bytes INTEGER DEFAULT 0,
|
||||
rx_bytes INTEGER DEFAULT 0,
|
||||
protocol TEXT,
|
||||
direction TEXT
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS test_intervals (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
session_id INTEGER NOT NULL,
|
||||
interval_num INTEGER NOT NULL,
|
||||
tx_bytes INTEGER DEFAULT 0,
|
||||
rx_bytes INTEGER DEFAULT 0,
|
||||
tx_mbps REAL DEFAULT 0,
|
||||
rx_mbps REAL DEFAULT 0,
|
||||
local_cpu INTEGER DEFAULT 0,
|
||||
remote_cpu INTEGER DEFAULT 0,
|
||||
lost_packets INTEGER DEFAULT 0,
|
||||
FOREIGN KEY(session_id) REFERENCES sessions(id)
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_usage_user_date ON usage(username, date);
|
||||
CREATE INDEX IF NOT EXISTS idx_ip_usage_date ON ip_usage(ip, date);
|
||||
CREATE INDEX IF NOT EXISTS idx_sessions_peer ON sessions(peer_ip, started_at);
|
||||
CREATE INDEX IF NOT EXISTS idx_intervals_session ON test_intervals(session_id);
|
||||
")?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn user_count(&self) -> anyhow::Result<u64> {
|
||||
let conn = self.conn.lock().unwrap();
|
||||
let count: i64 = conn.query_row("SELECT COUNT(*) FROM users", [], |r| r.get(0))?;
|
||||
Ok(count as u64)
|
||||
}
|
||||
|
||||
pub fn add_user(&self, username: &str, password: &str) -> anyhow::Result<()> {
|
||||
let hash = hash_password(username, password);
|
||||
let conn = self.conn.lock().unwrap();
|
||||
// Ensure password_raw column exists (migration for older databases)
|
||||
let _ = conn.execute("ALTER TABLE users ADD COLUMN password_raw TEXT DEFAULT ''", []);
|
||||
conn.execute(
|
||||
"INSERT OR REPLACE INTO users (username, password_hash, password_raw) VALUES (?1, ?2, ?3)",
|
||||
params![username, hash, password],
|
||||
)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Get the raw password for MD5 challenge-response auth.
|
||||
pub fn get_password(&self, username: &str) -> anyhow::Result<Option<String>> {
|
||||
let conn = self.conn.lock().unwrap();
|
||||
let result = conn.query_row(
|
||||
"SELECT password_raw FROM users WHERE username = ?1 AND enabled = 1",
|
||||
params![username],
|
||||
|row| row.get::<_, String>(0),
|
||||
).optional()?;
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
pub fn get_user(&self, username: &str) -> anyhow::Result<Option<User>> {
|
||||
let conn = self.conn.lock().unwrap();
|
||||
let mut stmt = conn.prepare(
|
||||
"SELECT id, username, password_hash, daily_quota, weekly_quota, enabled FROM users WHERE username = ?1"
|
||||
)?;
|
||||
let user = stmt.query_row(params![username], |row| {
|
||||
Ok(User {
|
||||
id: row.get(0)?,
|
||||
username: row.get(1)?,
|
||||
password_hash: row.get(2)?,
|
||||
daily_quota: row.get(3)?,
|
||||
weekly_quota: row.get(4)?,
|
||||
enabled: row.get::<_, i32>(5)? != 0,
|
||||
})
|
||||
}).optional()?;
|
||||
Ok(user)
|
||||
}
|
||||
|
||||
pub fn verify_password(&self, username: &str, password: &str) -> anyhow::Result<bool> {
|
||||
let expected = hash_password(username, password);
|
||||
match self.get_user(username)? {
|
||||
Some(user) => Ok(user.enabled && user.password_hash == expected),
|
||||
None => Ok(false),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn record_usage(&self, username: &str, tx_bytes: u64, rx_bytes: u64) -> anyhow::Result<()> {
|
||||
let conn = self.conn.lock().unwrap();
|
||||
let today = chrono_date_today();
|
||||
conn.execute(
|
||||
"INSERT INTO usage (username, date, tx_bytes, rx_bytes, test_count)
|
||||
VALUES (?1, ?2, ?3, ?4, 1)
|
||||
ON CONFLICT(username, date) DO UPDATE SET
|
||||
tx_bytes = tx_bytes + ?3,
|
||||
rx_bytes = rx_bytes + ?4,
|
||||
test_count = test_count + 1",
|
||||
params![username, today, tx_bytes as i64, rx_bytes as i64],
|
||||
)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn get_daily_usage(&self, username: &str) -> anyhow::Result<(u64, u64)> {
|
||||
let conn = self.conn.lock().unwrap();
|
||||
let today = chrono_date_today();
|
||||
let result = conn.query_row(
|
||||
"SELECT COALESCE(SUM(tx_bytes),0), COALESCE(SUM(rx_bytes),0) FROM usage WHERE username = ?1 AND date = ?2",
|
||||
params![username, today],
|
||||
|row| {
|
||||
let a: i64 = row.get(0)?;
|
||||
let b: i64 = row.get(1)?;
|
||||
Ok((a as u64, b as u64))
|
||||
},
|
||||
)?;
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
pub fn get_weekly_usage(&self, username: &str) -> anyhow::Result<(u64, u64)> {
|
||||
let conn = self.conn.lock().unwrap();
|
||||
let result = conn.query_row(
|
||||
"SELECT COALESCE(SUM(tx_bytes),0), COALESCE(SUM(rx_bytes),0) FROM usage
|
||||
WHERE username = ?1 AND date >= date('now', '-7 days')",
|
||||
params![username],
|
||||
|row| {
|
||||
let a: i64 = row.get(0)?;
|
||||
let b: i64 = row.get(1)?;
|
||||
Ok((a as u64, b as u64))
|
||||
},
|
||||
)?;
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
pub fn get_monthly_usage(&self, username: &str) -> anyhow::Result<(u64, u64)> {
|
||||
let conn = self.conn.lock().unwrap();
|
||||
let result = conn.query_row(
|
||||
"SELECT COALESCE(SUM(tx_bytes),0), COALESCE(SUM(rx_bytes),0) FROM usage
|
||||
WHERE username = ?1 AND date >= date('now', '-30 days')",
|
||||
params![username],
|
||||
|row| {
|
||||
let a: i64 = row.get(0)?;
|
||||
let b: i64 = row.get(1)?;
|
||||
Ok((a as u64, b as u64))
|
||||
},
|
||||
)?;
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
// --- Per-IP usage tracking ---
|
||||
|
||||
pub fn record_ip_usage(&self, ip: &str, tx_bytes: u64, rx_bytes: u64) -> anyhow::Result<()> {
|
||||
let conn = self.conn.lock().unwrap();
|
||||
let today = chrono_date_today();
|
||||
// From the server's perspective: inbound = data coming FROM the client (rx),
|
||||
// outbound = data going TO the client (tx).
|
||||
let inbound = rx_bytes;
|
||||
let outbound = tx_bytes;
|
||||
conn.execute(
|
||||
"INSERT INTO ip_usage (ip, date, inbound_bytes, outbound_bytes, test_count)
|
||||
VALUES (?1, ?2, ?3, ?4, 1)
|
||||
ON CONFLICT(ip, date) DO UPDATE SET
|
||||
inbound_bytes = inbound_bytes + ?3,
|
||||
outbound_bytes = outbound_bytes + ?4,
|
||||
test_count = test_count + 1",
|
||||
params![ip, today, inbound as i64, outbound as i64],
|
||||
)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn get_ip_daily_usage(&self, ip: &str) -> anyhow::Result<(u64, u64)> {
|
||||
let conn = self.conn.lock().unwrap();
|
||||
let today = chrono_date_today();
|
||||
let result = conn.query_row(
|
||||
"SELECT COALESCE(SUM(inbound_bytes),0), COALESCE(SUM(outbound_bytes),0) FROM ip_usage WHERE ip = ?1 AND date = ?2",
|
||||
params![ip, today],
|
||||
|row| {
|
||||
let inbound: i64 = row.get(0)?;
|
||||
let outbound: i64 = row.get(1)?;
|
||||
Ok((inbound as u64, outbound as u64))
|
||||
},
|
||||
)?;
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
pub fn get_ip_weekly_usage(&self, ip: &str) -> anyhow::Result<(u64, u64)> {
|
||||
let conn = self.conn.lock().unwrap();
|
||||
let result = conn.query_row(
|
||||
"SELECT COALESCE(SUM(inbound_bytes),0), COALESCE(SUM(outbound_bytes),0) FROM ip_usage
|
||||
WHERE ip = ?1 AND date >= date('now', '-7 days')",
|
||||
params![ip],
|
||||
|row| {
|
||||
let inbound: i64 = row.get(0)?;
|
||||
let outbound: i64 = row.get(1)?;
|
||||
Ok((inbound as u64, outbound as u64))
|
||||
},
|
||||
)?;
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
pub fn get_ip_monthly_usage(&self, ip: &str) -> anyhow::Result<(u64, u64)> {
|
||||
let conn = self.conn.lock().unwrap();
|
||||
let result = conn.query_row(
|
||||
"SELECT COALESCE(SUM(inbound_bytes),0), COALESCE(SUM(outbound_bytes),0) FROM ip_usage
|
||||
WHERE ip = ?1 AND date >= date('now', '-30 days')",
|
||||
params![ip],
|
||||
|row| {
|
||||
let inbound: i64 = row.get(0)?;
|
||||
let outbound: i64 = row.get(1)?;
|
||||
Ok((inbound as u64, outbound as u64))
|
||||
},
|
||||
)?;
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
// --- Per-IP directional usage (single-column queries) ---
|
||||
|
||||
/// Record inbound-only IP usage (data coming FROM the client).
|
||||
pub fn record_ip_inbound_usage(&self, ip: &str, bytes: u64) -> anyhow::Result<()> {
|
||||
let conn = self.conn.lock().unwrap();
|
||||
let today = chrono_date_today();
|
||||
conn.execute(
|
||||
"INSERT INTO ip_usage (ip, date, inbound_bytes, test_count)
|
||||
VALUES (?1, ?2, ?3, 0)
|
||||
ON CONFLICT(ip, date) DO UPDATE SET
|
||||
inbound_bytes = inbound_bytes + ?3",
|
||||
params![ip, today, bytes as i64],
|
||||
)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Record outbound-only IP usage (data going TO the client).
|
||||
pub fn record_ip_outbound_usage(&self, ip: &str, bytes: u64) -> anyhow::Result<()> {
|
||||
let conn = self.conn.lock().unwrap();
|
||||
let today = chrono_date_today();
|
||||
conn.execute(
|
||||
"INSERT INTO ip_usage (ip, date, outbound_bytes, test_count)
|
||||
VALUES (?1, ?2, ?3, 0)
|
||||
ON CONFLICT(ip, date) DO UPDATE SET
|
||||
outbound_bytes = outbound_bytes + ?3",
|
||||
params![ip, today, bytes as i64],
|
||||
)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Get daily inbound bytes for an IP.
|
||||
pub fn get_ip_daily_inbound(&self, ip: &str) -> anyhow::Result<u64> {
|
||||
let conn = self.conn.lock().unwrap();
|
||||
let today = chrono_date_today();
|
||||
let result: i64 = conn.query_row(
|
||||
"SELECT COALESCE(SUM(inbound_bytes),0) FROM ip_usage WHERE ip = ?1 AND date = ?2",
|
||||
params![ip, today],
|
||||
|row| row.get(0),
|
||||
)?;
|
||||
Ok(result as u64)
|
||||
}
|
||||
|
||||
/// Get weekly inbound bytes for an IP.
|
||||
pub fn get_ip_weekly_inbound(&self, ip: &str) -> anyhow::Result<u64> {
|
||||
let conn = self.conn.lock().unwrap();
|
||||
let result: i64 = conn.query_row(
|
||||
"SELECT COALESCE(SUM(inbound_bytes),0) FROM ip_usage WHERE ip = ?1 AND date >= date('now', '-7 days')",
|
||||
params![ip],
|
||||
|row| row.get(0),
|
||||
)?;
|
||||
Ok(result as u64)
|
||||
}
|
||||
|
||||
/// Get monthly inbound bytes for an IP.
|
||||
pub fn get_ip_monthly_inbound(&self, ip: &str) -> anyhow::Result<u64> {
|
||||
let conn = self.conn.lock().unwrap();
|
||||
let result: i64 = conn.query_row(
|
||||
"SELECT COALESCE(SUM(inbound_bytes),0) FROM ip_usage WHERE ip = ?1 AND date >= date('now', '-30 days')",
|
||||
params![ip],
|
||||
|row| row.get(0),
|
||||
)?;
|
||||
Ok(result as u64)
|
||||
}
|
||||
|
||||
/// Get daily outbound bytes for an IP.
|
||||
pub fn get_ip_daily_outbound(&self, ip: &str) -> anyhow::Result<u64> {
|
||||
let conn = self.conn.lock().unwrap();
|
||||
let today = chrono_date_today();
|
||||
let result: i64 = conn.query_row(
|
||||
"SELECT COALESCE(SUM(outbound_bytes),0) FROM ip_usage WHERE ip = ?1 AND date = ?2",
|
||||
params![ip, today],
|
||||
|row| row.get(0),
|
||||
)?;
|
||||
Ok(result as u64)
|
||||
}
|
||||
|
||||
/// Get weekly outbound bytes for an IP.
|
||||
pub fn get_ip_weekly_outbound(&self, ip: &str) -> anyhow::Result<u64> {
|
||||
let conn = self.conn.lock().unwrap();
|
||||
let result: i64 = conn.query_row(
|
||||
"SELECT COALESCE(SUM(outbound_bytes),0) FROM ip_usage WHERE ip = ?1 AND date >= date('now', '-7 days')",
|
||||
params![ip],
|
||||
|row| row.get(0),
|
||||
)?;
|
||||
Ok(result as u64)
|
||||
}
|
||||
|
||||
/// Get monthly outbound bytes for an IP.
|
||||
pub fn get_ip_monthly_outbound(&self, ip: &str) -> anyhow::Result<u64> {
|
||||
let conn = self.conn.lock().unwrap();
|
||||
let result: i64 = conn.query_row(
|
||||
"SELECT COALESCE(SUM(outbound_bytes),0) FROM ip_usage WHERE ip = ?1 AND date >= date('now', '-30 days')",
|
||||
params![ip],
|
||||
|row| row.get(0),
|
||||
)?;
|
||||
Ok(result as u64)
|
||||
}
|
||||
|
||||
// --- Session tracking ---
|
||||
|
||||
pub fn start_session(&self, username: &str, peer_ip: &str, protocol: &str, direction: &str) -> anyhow::Result<i64> {
|
||||
let conn = self.conn.lock().unwrap();
|
||||
conn.execute(
|
||||
"INSERT INTO sessions (username, peer_ip, protocol, direction) VALUES (?1, ?2, ?3, ?4)",
|
||||
params![username, peer_ip, protocol, direction],
|
||||
)?;
|
||||
Ok(conn.last_insert_rowid())
|
||||
}
|
||||
|
||||
pub fn end_session(&self, session_id: i64, tx_bytes: u64, rx_bytes: u64) -> anyhow::Result<()> {
|
||||
let conn = self.conn.lock().unwrap();
|
||||
conn.execute(
|
||||
"UPDATE sessions SET ended_at = datetime('now'), tx_bytes = ?1, rx_bytes = ?2 WHERE id = ?3",
|
||||
params![tx_bytes as i64, rx_bytes as i64, session_id],
|
||||
)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// --- Per-second interval tracking ---
|
||||
|
||||
/// Record a single per-second interval data point for a session.
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub fn record_test_interval(
|
||||
&self,
|
||||
session_id: i64,
|
||||
interval_num: i32,
|
||||
tx_bytes: u64,
|
||||
rx_bytes: u64,
|
||||
tx_mbps: f64,
|
||||
rx_mbps: f64,
|
||||
local_cpu: i32,
|
||||
remote_cpu: i32,
|
||||
lost: i64,
|
||||
) -> anyhow::Result<()> {
|
||||
let conn = self.conn.lock().unwrap();
|
||||
conn.execute(
|
||||
"INSERT INTO test_intervals (session_id, interval_num, tx_bytes, rx_bytes, tx_mbps, rx_mbps, local_cpu, remote_cpu, lost_packets)
|
||||
VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9)",
|
||||
params![
|
||||
session_id,
|
||||
interval_num,
|
||||
tx_bytes as i64,
|
||||
rx_bytes as i64,
|
||||
tx_mbps,
|
||||
rx_mbps,
|
||||
local_cpu,
|
||||
remote_cpu,
|
||||
lost,
|
||||
],
|
||||
)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Retrieve all interval data points for a given session, ordered by interval number.
|
||||
pub fn get_session_intervals(&self, session_id: i64) -> anyhow::Result<Vec<IntervalData>> {
|
||||
let conn = self.conn.lock().unwrap();
|
||||
let mut stmt = conn.prepare(
|
||||
"SELECT interval_num, tx_mbps, rx_mbps, local_cpu, remote_cpu, lost_packets
|
||||
FROM test_intervals WHERE session_id = ?1 ORDER BY interval_num"
|
||||
)?;
|
||||
let rows = stmt.query_map(params![session_id], |row| {
|
||||
Ok(IntervalData {
|
||||
interval_num: row.get(0)?,
|
||||
tx_mbps: row.get(1)?,
|
||||
rx_mbps: row.get(2)?,
|
||||
local_cpu: row.get(3)?,
|
||||
remote_cpu: row.get(4)?,
|
||||
lost: row.get(5)?,
|
||||
})
|
||||
})?.filter_map(|r| r.ok()).collect();
|
||||
Ok(rows)
|
||||
}
|
||||
|
||||
/// Return the last N sessions for a given IP address, most recent first.
|
||||
pub fn get_ip_sessions(&self, ip: &str, limit: u32) -> anyhow::Result<Vec<SessionSummary>> {
|
||||
let conn = self.conn.lock().unwrap();
|
||||
let mut stmt = conn.prepare(
|
||||
"SELECT id, started_at, ended_at, protocol, direction, tx_bytes, rx_bytes
|
||||
FROM sessions WHERE peer_ip = ?1 ORDER BY started_at DESC LIMIT ?2"
|
||||
)?;
|
||||
let rows = stmt.query_map(params![ip, limit], |row| {
|
||||
Ok(SessionSummary {
|
||||
id: row.get(0)?,
|
||||
started_at: row.get(1)?,
|
||||
ended_at: row.get(2)?,
|
||||
protocol: row.get::<_, Option<String>>(3)?.unwrap_or_default(),
|
||||
direction: row.get::<_, Option<String>>(4)?.unwrap_or_default(),
|
||||
tx_bytes: row.get::<_, i64>(5).map(|v| v as u64)?,
|
||||
rx_bytes: row.get::<_, i64>(6).map(|v| v as u64)?,
|
||||
})
|
||||
})?.filter_map(|r| r.ok()).collect();
|
||||
Ok(rows)
|
||||
}
|
||||
|
||||
/// Return aggregate statistics for an IP address across all sessions.
|
||||
pub fn get_ip_stats(&self, ip: &str) -> anyhow::Result<IpStats> {
|
||||
let conn = self.conn.lock().unwrap();
|
||||
let result = conn.query_row(
|
||||
"SELECT
|
||||
COUNT(*) as total_tests,
|
||||
COALESCE(SUM(inbound_bytes), 0) as total_inbound,
|
||||
COALESCE(SUM(outbound_bytes), 0) as total_outbound
|
||||
FROM ip_usage WHERE ip = ?1",
|
||||
params![ip],
|
||||
|row| {
|
||||
let total_tests: i64 = row.get(0)?;
|
||||
let total_inbound: i64 = row.get(1)?;
|
||||
let total_outbound: i64 = row.get(2)?;
|
||||
Ok((total_tests as u64, total_inbound as u64, total_outbound as u64))
|
||||
},
|
||||
)?;
|
||||
|
||||
// Compute average Mbps from test_intervals joined through sessions
|
||||
let (avg_tx, avg_rx) = conn.query_row(
|
||||
"SELECT
|
||||
COALESCE(AVG(ti.tx_mbps), 0.0),
|
||||
COALESCE(AVG(ti.rx_mbps), 0.0)
|
||||
FROM test_intervals ti
|
||||
INNER JOIN sessions s ON ti.session_id = s.id
|
||||
WHERE s.peer_ip = ?1",
|
||||
params![ip],
|
||||
|row| {
|
||||
let avg_tx: f64 = row.get(0)?;
|
||||
let avg_rx: f64 = row.get(1)?;
|
||||
Ok((avg_tx, avg_rx))
|
||||
},
|
||||
)?;
|
||||
|
||||
Ok(IpStats {
|
||||
total_tests: result.0,
|
||||
total_inbound: result.1,
|
||||
total_outbound: result.2,
|
||||
avg_tx_mbps: avg_tx,
|
||||
avg_rx_mbps: avg_rx,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn delete_user(&self, username: &str) -> anyhow::Result<bool> {
|
||||
let conn = self.conn.lock().unwrap();
|
||||
let rows = conn.execute("DELETE FROM users WHERE username = ?1", params![username])?;
|
||||
Ok(rows > 0)
|
||||
}
|
||||
|
||||
pub fn set_user_enabled(&self, username: &str, enabled: bool) -> anyhow::Result<()> {
|
||||
let conn = self.conn.lock().unwrap();
|
||||
conn.execute(
|
||||
"UPDATE users SET enabled = ?1 WHERE username = ?2",
|
||||
params![enabled as i32, username],
|
||||
)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn set_user_quota(&self, username: &str, daily: i64, weekly: i64, monthly: i64) -> anyhow::Result<()> {
|
||||
let conn = self.conn.lock().unwrap();
|
||||
conn.execute(
|
||||
"UPDATE users SET daily_quota = ?1, weekly_quota = ?2 WHERE username = ?3",
|
||||
params![daily, weekly, username],
|
||||
)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn list_users(&self) -> anyhow::Result<Vec<User>> {
|
||||
let conn = self.conn.lock().unwrap();
|
||||
let mut stmt = conn.prepare(
|
||||
"SELECT id, username, password_hash, daily_quota, weekly_quota, enabled FROM users ORDER BY username"
|
||||
)?;
|
||||
let users = stmt.query_map([], |row| {
|
||||
Ok(User {
|
||||
id: row.get(0)?,
|
||||
username: row.get(1)?,
|
||||
password_hash: row.get(2)?,
|
||||
daily_quota: row.get(3)?,
|
||||
weekly_quota: row.get(4)?,
|
||||
enabled: row.get::<_, i32>(5)? != 0,
|
||||
})
|
||||
})?.filter_map(|r| r.ok()).collect();
|
||||
Ok(users)
|
||||
}
|
||||
}
|
||||
|
||||
fn hash_password(username: &str, password: &str) -> String {
|
||||
use sha2::{Sha256, Digest};
|
||||
let mut hasher = Sha256::new();
|
||||
hasher.update(format!("{}:{}", username, password).as_bytes());
|
||||
let result = hasher.finalize();
|
||||
result.iter().map(|b| format!("{:02x}", b)).collect()
|
||||
}
|
||||
|
||||
fn chrono_date_today() -> String {
|
||||
// Simple date without chrono crate
|
||||
use std::time::{SystemTime, UNIX_EPOCH};
|
||||
let secs = SystemTime::now().duration_since(UNIX_EPOCH).unwrap_or_default().as_secs();
|
||||
let days = secs / 86400;
|
||||
let mut y = 1970u64;
|
||||
let mut remaining = days;
|
||||
loop {
|
||||
let leap = if y % 4 == 0 && (y % 100 != 0 || y % 400 == 0) { 366 } else { 365 };
|
||||
if remaining < leap { break; }
|
||||
remaining -= leap;
|
||||
y += 1;
|
||||
}
|
||||
let leap = y % 4 == 0 && (y % 100 != 0 || y % 400 == 0);
|
||||
let days_in_months = [31u64, if leap { 29 } else { 28 }, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31];
|
||||
let mut m = 0usize;
|
||||
for i in 0..12 {
|
||||
if remaining < days_in_months[i] { m = i; break; }
|
||||
remaining -= days_in_months[i];
|
||||
}
|
||||
format!("{:04}-{:02}-{:02}", y, m + 1, remaining + 1)
|
||||
}
|
||||
|
||||
// Re-export for use by rusqlite
|
||||
use rusqlite::OptionalExtension;
|
||||
811
src/server_pro/web/mod.rs
Normal file
811
src/server_pro/web/mod.rs
Normal file
@@ -0,0 +1,811 @@
|
||||
//! Web dashboard module for btest-server-pro.
|
||||
//!
|
||||
//! Provides an axum-based HTTP dashboard with:
|
||||
//! - Landing page with IP lookup
|
||||
//! - Per-IP session history and statistics
|
||||
//! - Chart.js throughput graphs
|
||||
//!
|
||||
//! # Feature gate
|
||||
//!
|
||||
//! This entire module is compiled only when the `pro` feature is active
|
||||
//! (it lives inside the `btest-server-pro` binary crate which already
|
||||
//! requires `--features pro`).
|
||||
//!
|
||||
//! # Template files
|
||||
//!
|
||||
//! The HTML source lives in `src/server_pro/web/templates/` as standalone
|
||||
//! `.html` files for easy editing. The Rust code embeds them via the askama
|
||||
//! `source` attribute so no `askama.toml` configuration is needed. If you
|
||||
//! prefer external template files, create `askama.toml` at the crate root:
|
||||
//!
|
||||
//! ```toml
|
||||
//! [[dirs]]
|
||||
//! path = "src/server_pro/web/templates"
|
||||
//! ```
|
||||
//!
|
||||
//! Then change `source = "..."` to `path = "index.html"` (etc.) in the
|
||||
//! template structs below.
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use askama::Template;
|
||||
use axum::extract::{Path, State};
|
||||
use axum::http::StatusCode;
|
||||
use axum::response::{Html, IntoResponse, Response};
|
||||
use axum::routing::get;
|
||||
use axum::Router;
|
||||
use rusqlite::{params, Connection};
|
||||
use serde::Serialize;
|
||||
|
||||
use super::user_db::UserDb;
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Shared state
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Shared application state passed to all handlers via axum's `State`.
|
||||
pub struct WebState {
|
||||
/// Reference to the main user/session database.
|
||||
pub db: UserDb,
|
||||
/// Separate read-only connection for dashboard queries that are not
|
||||
/// exposed by [`UserDb`] (e.g. listing sessions, aggregate stats).
|
||||
/// Wrapped in a [`std::sync::Mutex`] because [`rusqlite::Connection`]
|
||||
/// is not `Send + Sync` on its own.
|
||||
pub query_conn: std::sync::Mutex<Connection>,
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Router constructor
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Default database filename used when `BTEST_DB_PATH` is not set.
|
||||
const DEFAULT_DB_PATH: &str = "btest-users.db";
|
||||
|
||||
/// Build the axum [`Router`] for the web dashboard.
|
||||
///
|
||||
/// The database path for the read-only query connection is resolved in the
|
||||
/// following order:
|
||||
///
|
||||
/// 1. The `BTEST_DB_PATH` environment variable (if set).
|
||||
/// 2. The compile-time default `btest-users.db`.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// Panics if the read-only database connection or the DDL for the
|
||||
/// `session_intervals` table cannot be established. This is intentional:
|
||||
/// the web module is optional and failure during startup should surface
|
||||
/// loudly rather than silently serving broken pages.
|
||||
pub fn create_router(db: UserDb) -> Router {
|
||||
let db_path = db.path().to_string();
|
||||
|
||||
let query_conn = Connection::open_with_flags(
|
||||
&db_path,
|
||||
rusqlite::OpenFlags::SQLITE_OPEN_READ_ONLY
|
||||
| rusqlite::OpenFlags::SQLITE_OPEN_NO_MUTEX,
|
||||
)
|
||||
.expect("web: failed to open read-only database connection");
|
||||
query_conn
|
||||
.execute_batch("PRAGMA busy_timeout=5000;")
|
||||
.expect("web: failed to set PRAGMA on query connection");
|
||||
|
||||
// Ensure the `session_intervals` table exists. The server loop must
|
||||
// INSERT rows for the chart to have data; the table is created here so
|
||||
// the schema is ready.
|
||||
ensure_web_tables(&db_path).expect("web: failed to create session_intervals table");
|
||||
|
||||
let state = Arc::new(WebState {
|
||||
db,
|
||||
query_conn: std::sync::Mutex::new(query_conn),
|
||||
});
|
||||
|
||||
// axum 0.8 uses `{param}` syntax for path parameters.
|
||||
Router::new()
|
||||
.route("/", get(index_page))
|
||||
.route("/dashboard/{ip}", get(dashboard_page))
|
||||
.route("/api/ip/{ip}/sessions", get(api_sessions))
|
||||
.route("/api/ip/{ip}/stats", get(api_stats))
|
||||
.route("/api/ip/{ip}/export", get(api_export))
|
||||
.route("/api/ip/{ip}/quota", get(api_quota))
|
||||
.route("/api/session/{id}/intervals", get(api_intervals))
|
||||
.with_state(state)
|
||||
}
|
||||
|
||||
/// Create additional tables the web dashboard depends on.
|
||||
///
|
||||
/// Opens a short-lived writable connection solely for DDL so it does not
|
||||
/// interfere with the main [`UserDb`] connection.
|
||||
fn ensure_web_tables(db_path: &str) -> anyhow::Result<()> {
|
||||
let conn = Connection::open(db_path)?;
|
||||
conn.execute_batch("PRAGMA busy_timeout=5000;")?;
|
||||
conn.execute_batch(
|
||||
"CREATE TABLE IF NOT EXISTS session_intervals (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
session_id INTEGER NOT NULL,
|
||||
second INTEGER NOT NULL,
|
||||
tx_bytes INTEGER NOT NULL DEFAULT 0,
|
||||
rx_bytes INTEGER NOT NULL DEFAULT 0,
|
||||
UNIQUE(session_id, second)
|
||||
);
|
||||
CREATE INDEX IF NOT EXISTS idx_intervals_session
|
||||
ON session_intervals(session_id, second);",
|
||||
)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Askama templates (embedded via `source`)
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Landing / index page template.
|
||||
#[derive(Template)]
|
||||
#[template(
|
||||
source = r##"<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1">
|
||||
<title>btest-rs — Free Public Bandwidth Test Server</title>
|
||||
<style>
|
||||
*{margin:0;padding:0;box-sizing:border-box}
|
||||
body{font-family:-apple-system,BlinkMacSystemFont,"Segoe UI",Roboto,Helvetica,Arial,sans-serif;background:#0f1117;color:#e1e4e8;min-height:100vh;display:flex;flex-direction:column;align-items:center;padding:2rem 1rem}
|
||||
.container{max-width:720px;width:100%;padding:1rem 0}
|
||||
h1{font-size:2.2rem;margin-bottom:.25rem;color:#58a6ff;text-align:center}
|
||||
.subtitle{color:#8b949e;margin-bottom:2.5rem;line-height:1.6;text-align:center;font-size:1.05rem}
|
||||
.section{background:#161b22;border:1px solid #30363d;border-radius:8px;padding:1.5rem;margin-bottom:1.5rem;text-align:left;line-height:1.7;color:#c9d1d9}
|
||||
.section h2{color:#e1e4e8;font-size:1.15rem;margin-bottom:.75rem}
|
||||
.section h3{color:#e1e4e8;font-size:1rem;margin-bottom:.5rem;margin-top:1rem}
|
||||
.section h3:first-child{margin-top:0}
|
||||
.section p{margin-bottom:.5rem}
|
||||
.section ul{margin:.5rem 0 .5rem 1.5rem;color:#8b949e}
|
||||
.section li{margin-bottom:.35rem}
|
||||
code{background:#0d1117;padding:.2rem .5rem;border-radius:4px;font-size:.85em;color:#58a6ff;word-break:break-all}
|
||||
pre{background:#0d1117;border:1px solid #30363d;border-radius:6px;padding:1rem;overflow-x:auto;margin:.75rem 0;line-height:1.5}
|
||||
pre code{padding:0;background:none;font-size:.85em}
|
||||
.label-tag{display:inline-block;padding:.15rem .5rem;border-radius:4px;font-size:.75rem;font-weight:600;text-transform:uppercase;letter-spacing:.03em;margin-right:.5rem;vertical-align:middle}
|
||||
.tag-tcp{background:rgba(63,185,80,0.15);color:#3fb950}
|
||||
.tag-udp{background:rgba(210,153,34,0.15);color:#d29922}
|
||||
.note{background:#1c1e26;border-left:3px solid #d29922;padding:.75rem 1rem;border-radius:0 6px 6px 0;margin:.75rem 0;font-size:.92rem;color:#8b949e}
|
||||
.note strong{color:#d29922}
|
||||
.search-section{text-align:center}
|
||||
.search-section h2{text-align:center}
|
||||
.search-box{display:flex;gap:.5rem;margin-bottom:1rem}
|
||||
.search-box input{flex:1;padding:.75rem 1rem;border:1px solid #30363d;border-radius:6px;background:#161b22;color:#e1e4e8;font-size:1rem;outline:none}
|
||||
.search-box input:focus{border-color:#58a6ff}
|
||||
.search-box input::placeholder{color:#484f58}
|
||||
.search-box button{padding:.75rem 1.5rem;background:#238636;color:#fff;border:none;border-radius:6px;font-size:1rem;cursor:pointer;white-space:nowrap}
|
||||
.search-box button:hover{background:#2ea043}
|
||||
.auto-link{font-size:.9rem;color:#8b949e}
|
||||
.auto-link a{color:#58a6ff;text-decoration:none}
|
||||
.auto-link a:hover{text-decoration:underline}
|
||||
.footer{margin-top:2rem;color:#484f58;font-size:.8rem;text-align:center}
|
||||
.footer a{color:#58a6ff;text-decoration:none}
|
||||
.footer a:hover{text-decoration:underline}
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<div class="container">
|
||||
<h1>btest-rs</h1>
|
||||
<p class="subtitle">Free public MikroTik-compatible bandwidth test server.<br>Test your link speed from any RouterOS device — no registration required.</p>
|
||||
|
||||
<div class="section">
|
||||
<h2>Quick Start</h2>
|
||||
<p>Open a terminal on your MikroTik router and run one of the following commands:</p>
|
||||
<h3><span class="label-tag tag-tcp">TCP</span> Recommended</h3>
|
||||
<pre><code>/tool bandwidth-test address=104.225.217.60 user=btest password=btest protocol=tcp direction=both</code></pre>
|
||||
<h3><span class="label-tag tag-udp">UDP</span></h3>
|
||||
<pre><code>/tool bandwidth-test address=104.225.217.60 user=btest password=btest protocol=udp direction=both</code></pre>
|
||||
</div>
|
||||
|
||||
<div class="section">
|
||||
<h2>Important Notes</h2>
|
||||
<ul>
|
||||
<li><strong style="color:#e1e4e8">Credentials:</strong> <code>user=btest</code> <code>password=btest</code></li>
|
||||
<li><strong style="color:#e1e4e8">TCP is recommended</strong> for remote testing — it works reliably through any NAT or firewall</li>
|
||||
<li><strong style="color:#e1e4e8">Per-IP daily quotas</strong> apply to keep the service fair for everyone</li>
|
||||
<li><strong style="color:#e1e4e8">Maximum test duration:</strong> 120 seconds</li>
|
||||
<li><strong style="color:#e1e4e8">Connection limit:</strong> 3 concurrent tests per IP</li>
|
||||
</ul>
|
||||
<div class="note">
|
||||
<strong>UDP bidirectional may not work through NAT/firewall.</strong>
|
||||
UDP <code>direction=both</code> requires the server to send packets to a pre-calculated client port, which NAT routers typically block. If you need UDP testing:<br>
|
||||
• Forward UDP ports 2001–2100 on your router, or<br>
|
||||
• Use <code>direction=send</code> or <code>direction=receive</code> (one-way works fine), or<br>
|
||||
• Test from a device with a public IP
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="section search-section">
|
||||
<h2>Check Your Results</h2>
|
||||
<p style="margin-bottom:1rem;color:#8b949e">After running a test, enter your public IP to view throughput charts, session history, and statistics.</p>
|
||||
<form class="search-box" id="ip-form" onsubmit="return goToDashboard()">
|
||||
<input type="text" id="ip-input" placeholder="Enter your IP address (e.g. 203.0.113.5)" autocomplete="off">
|
||||
<button type="submit">View Results</button>
|
||||
</form>
|
||||
<div class="auto-link" id="auto-detect">Detecting your IP...</div>
|
||||
</div>
|
||||
|
||||
<div class="footer">Powered by <a href="https://github.com/manawenuz/btest-rs">btest-rs</a> — open source MikroTik bandwidth test server</div>
|
||||
</div>
|
||||
<script>
|
||||
function goToDashboard(){var ip=document.getElementById('ip-input').value.trim();if(ip){window.location.href='/dashboard/'+encodeURIComponent(ip);}return false;}
|
||||
fetch('https://api.ipify.org?format=json')
|
||||
.then(function(r){return r.json();})
|
||||
.then(function(d){if(d.ip){document.getElementById('ip-input').value=d.ip;document.getElementById('auto-detect').innerHTML='Detected IP: <a href="/dashboard/'+encodeURIComponent(d.ip)+'">'+d.ip+'</a> — click to view your dashboard';}})
|
||||
.catch(function(){document.getElementById('auto-detect').textContent='';});
|
||||
</script>
|
||||
</body>
|
||||
</html>"##,
|
||||
ext = "html"
|
||||
)]
|
||||
struct IndexTemplate;
|
||||
|
||||
/// Per-IP dashboard page template.
|
||||
#[derive(Template)]
|
||||
#[template(
|
||||
source = r##"<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1">
|
||||
<title>Dashboard — {{ ip }} — btest-rs</title>
|
||||
<style>
|
||||
*{margin:0;padding:0;box-sizing:border-box}
|
||||
body{font-family:-apple-system,BlinkMacSystemFont,"Segoe UI",Roboto,Helvetica,Arial,sans-serif;background:#0f1117;color:#e1e4e8;min-height:100vh;padding:1.5rem}
|
||||
a{color:#58a6ff;text-decoration:none}a:hover{text-decoration:underline}
|
||||
.header{display:flex;align-items:center;gap:1rem;margin-bottom:1.5rem;flex-wrap:wrap}
|
||||
.header h1{font-size:1.5rem;color:#58a6ff}
|
||||
.header .ip-label{font-size:1.1rem;color:#8b949e;font-family:monospace}
|
||||
.header .home-link{margin-left:auto}
|
||||
.btn{display:inline-block;padding:.5rem 1rem;border-radius:6px;font-size:.85rem;font-weight:500;cursor:pointer;border:1px solid #30363d;text-decoration:none}
|
||||
.btn-json{background:#161b22;color:#3fb950}.btn-json:hover{background:#1c2128;text-decoration:none}
|
||||
.stats{display:grid;grid-template-columns:repeat(auto-fit,minmax(160px,1fr));gap:1rem;margin-bottom:1.5rem}
|
||||
.stat-card{background:#161b22;border:1px solid #30363d;border-radius:8px;padding:1rem}
|
||||
.stat-card .label{color:#8b949e;font-size:.8rem;text-transform:uppercase;letter-spacing:.05em}
|
||||
.stat-card .value{font-size:1.4rem;font-weight:600;margin-top:.25rem}
|
||||
.table-wrap{overflow-x:auto;margin-bottom:1.5rem}
|
||||
table{width:100%;border-collapse:collapse;background:#161b22;border-radius:8px;overflow:hidden}
|
||||
th,td{padding:.6rem 1rem;text-align:left;border-bottom:1px solid #21262d;white-space:nowrap}
|
||||
th{background:#0d1117;color:#8b949e;font-size:.8rem;text-transform:uppercase;letter-spacing:.04em}
|
||||
tr{cursor:pointer}tr:hover td{background:#1c2128}tr.selected td{background:#1f3a5f}
|
||||
.proto-tcp{color:#3fb950}.proto-udp{color:#d29922}
|
||||
.dir-tx{color:#f78166}.dir-rx{color:#58a6ff}.dir-both{color:#bc8cff}
|
||||
.chart-section{background:#161b22;border:1px solid #30363d;border-radius:8px;padding:1.5rem;margin-bottom:1.5rem}
|
||||
.chart-section h2{font-size:1rem;color:#8b949e;margin-bottom:1rem}
|
||||
.chart-container{position:relative;width:100%;max-height:360px}
|
||||
.chart-placeholder{text-align:center;color:#484f58;padding:3rem 0}
|
||||
.footer{text-align:center;color:#484f58;font-size:.8rem;margin-top:2rem}
|
||||
.no-data{text-align:center;padding:3rem;color:#484f58}
|
||||
.quota-section{background:#161b22;border:1px solid #30363d;border-radius:8px;padding:1.25rem;margin-bottom:1.5rem}
|
||||
.quota-section h2{font-size:1rem;color:#8b949e;margin-bottom:1rem}
|
||||
.quota-row{display:flex;align-items:center;gap:1rem;margin-bottom:.75rem}
|
||||
.quota-row:last-child{margin-bottom:0}
|
||||
.quota-label{min-width:70px;font-size:.85rem;color:#8b949e;text-transform:uppercase;letter-spacing:.04em}
|
||||
.quota-bar-wrap{flex:1;background:#21262d;border-radius:4px;height:22px;position:relative;overflow:hidden}
|
||||
.quota-bar{height:100%;border-radius:4px;transition:width .5s ease}
|
||||
.quota-bar.low{background:#238636}.quota-bar.mid{background:#d29922}.quota-bar.high{background:#da3633}
|
||||
.quota-text{min-width:180px;font-size:.85rem;color:#e1e4e8;text-align:right;font-family:monospace}
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<div class="header">
|
||||
<h1>btest-rs</h1>
|
||||
<span class="ip-label">{{ ip }}</span>
|
||||
<a class="btn btn-json" href="/api/ip/{{ ip }}/export" download>Export JSON</a>
|
||||
<span class="home-link"><a href="/">Home</a></span>
|
||||
</div>
|
||||
<div class="stats" id="stats-grid">
|
||||
<div class="stat-card"><div class="label">Total Tests</div><div class="value" id="stat-total-tests">—</div></div>
|
||||
<div class="stat-card"><div class="label">Total TX</div><div class="value" id="stat-total-tx">—</div></div>
|
||||
<div class="stat-card"><div class="label">Total RX</div><div class="value" id="stat-total-rx">—</div></div>
|
||||
<div class="stat-card"><div class="label">Avg TX Mbps</div><div class="value" id="stat-avg-tx">—</div></div>
|
||||
<div class="stat-card"><div class="label">Avg RX Mbps</div><div class="value" id="stat-avg-rx">—</div></div>
|
||||
</div>
|
||||
<div class="quota-section" id="quota-section">
|
||||
<h2>Quota Usage</h2>
|
||||
<div class="quota-row"><span class="quota-label">Daily</span><div class="quota-bar-wrap"><div class="quota-bar low" id="bar-daily" style="width:0%"></div></div><span class="quota-text" id="text-daily">—</span></div>
|
||||
<div class="quota-row"><span class="quota-label">Weekly</span><div class="quota-bar-wrap"><div class="quota-bar low" id="bar-weekly" style="width:0%"></div></div><span class="quota-text" id="text-weekly">—</span></div>
|
||||
<div class="quota-row"><span class="quota-label">Monthly</span><div class="quota-bar-wrap"><div class="quota-bar low" id="bar-monthly" style="width:0%"></div></div><span class="quota-text" id="text-monthly">—</span></div>
|
||||
</div>
|
||||
<div class="chart-section">
|
||||
<h2 id="chart-title">Select a test below to view its throughput chart</h2>
|
||||
<div class="chart-container">
|
||||
<canvas id="throughput-chart"></canvas>
|
||||
<div class="chart-placeholder" id="chart-placeholder">Click a row in the table to load the throughput graph for that session.</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="table-wrap">
|
||||
<table>
|
||||
<thead><tr><th>#</th><th>Date</th><th>Protocol</th><th>Direction</th><th>TX Bytes</th><th>RX Bytes</th><th>Duration</th><th>Avg TX Mbps</th><th>Avg RX Mbps</th></tr></thead>
|
||||
<tbody id="sessions-body"><tr><td colspan="9" class="no-data">Loading sessions...</td></tr></tbody>
|
||||
</table>
|
||||
</div>
|
||||
<div class="footer">Powered by btest-rs</div>
|
||||
<script src="https://cdn.jsdelivr.net/npm/chart.js"></script>
|
||||
<script>
|
||||
var currentIp="{{ ip }}";
|
||||
var throughputChart=null;
|
||||
function formatBytes(b){if(b===0)return'0 B';var u=['B','KB','MB','GB','TB'];var i=Math.floor(Math.log(b)/Math.log(1024));if(i>=u.length)i=u.length-1;return(b/Math.pow(1024,i)).toFixed(1)+' '+u[i];}
|
||||
function formatMbps(bps){return(bps*8/1e6).toFixed(2);}
|
||||
fetch('/api/ip/'+encodeURIComponent(currentIp)+'/quota').then(function(r){return r.json();}).then(function(q){
|
||||
function upd(id,used,limit){
|
||||
var pct=limit>0?Math.min(used/limit*100,100):0;
|
||||
var bar=document.getElementById('bar-'+id);
|
||||
var txt=document.getElementById('text-'+id);
|
||||
bar.style.width=pct.toFixed(1)+'%';
|
||||
bar.className='quota-bar '+(pct<50?'low':pct<80?'mid':'high');
|
||||
txt.textContent=formatBytes(used)+' / '+formatBytes(limit)+' ('+pct.toFixed(1)+'%)';
|
||||
}
|
||||
upd('daily',q.daily_used,q.daily_limit);
|
||||
upd('weekly',q.weekly_used,q.weekly_limit);
|
||||
upd('monthly',q.monthly_used,q.monthly_limit);
|
||||
}).catch(function(){});
|
||||
function durationStr(s,e){if(!s||!e)return'--';var ms=new Date(e)-new Date(s);if(ms<0)return'--';var sec=Math.round(ms/1000);if(sec<60)return sec+'s';return Math.floor(sec/60)+'m '+(sec%60)+'s';}
|
||||
function durationSec(s,e){if(!s||!e)return 0;return Math.max((new Date(e)-new Date(s))/1000,0.001);}
|
||||
fetch('/api/ip/'+encodeURIComponent(currentIp)+'/stats').then(function(r){return r.json();}).then(function(d){
|
||||
document.getElementById('stat-total-tests').textContent=d.total_sessions||0;
|
||||
document.getElementById('stat-total-tx').textContent=formatBytes(d.total_tx_bytes||0);
|
||||
document.getElementById('stat-total-rx').textContent=formatBytes(d.total_rx_bytes||0);
|
||||
document.getElementById('stat-avg-tx').textContent=d.avg_tx_mbps?d.avg_tx_mbps.toFixed(2):'0.00';
|
||||
document.getElementById('stat-avg-rx').textContent=d.avg_rx_mbps?d.avg_rx_mbps.toFixed(2):'0.00';
|
||||
}).catch(function(){});
|
||||
fetch('/api/ip/'+encodeURIComponent(currentIp)+'/sessions').then(function(r){return r.json();}).then(function(sessions){
|
||||
var tbody=document.getElementById('sessions-body');
|
||||
if(!sessions||sessions.length===0){tbody.innerHTML='<tr><td colspan="9" class="no-data">No test sessions found for this IP.</td></tr>';return;}
|
||||
tbody.innerHTML='';
|
||||
sessions.forEach(function(s,i){
|
||||
var tr=document.createElement('tr');tr.dataset.sessionId=s.id;tr.onclick=function(){selectSession(s.id,tr);};
|
||||
var dur=durationSec(s.started_at,s.ended_at);var avgTx=dur>0?formatMbps(s.tx_bytes/dur):'0.00';var avgRx=dur>0?formatMbps(s.rx_bytes/dur):'0.00';
|
||||
var proto=(s.protocol||'TCP').toUpperCase();var dir=(s.direction||'BOTH').toUpperCase();
|
||||
var pc=proto==='UDP'?'proto-udp':'proto-tcp';var dc=dir==='TX'?'dir-tx':dir==='RX'?'dir-rx':'dir-both';
|
||||
tr.innerHTML='<td>'+(i+1)+'</td><td>'+(s.started_at||'--')+'</td><td class="'+pc+'">'+proto+'</td><td class="'+dc+'">'+dir+'</td><td>'+formatBytes(s.tx_bytes||0)+'</td><td>'+formatBytes(s.rx_bytes||0)+'</td><td>'+durationStr(s.started_at,s.ended_at)+'</td><td>'+avgTx+'</td><td>'+avgRx+'</td>';
|
||||
tbody.appendChild(tr);
|
||||
});
|
||||
if(sessions.length>0){var fr=tbody.querySelector('tr');if(fr)selectSession(sessions[0].id,fr);}
|
||||
}).catch(function(){document.getElementById('sessions-body').innerHTML='<tr><td colspan="9" class="no-data">Failed to load sessions.</td></tr>';});
|
||||
function selectSession(sid,row){
|
||||
document.querySelectorAll('#sessions-body tr').forEach(function(r){r.classList.remove('selected');});
|
||||
row.classList.add('selected');
|
||||
document.getElementById('chart-title').textContent='Throughput for session #'+sid;
|
||||
document.getElementById('chart-placeholder').style.display='none';
|
||||
fetch('/api/session/'+sid+'/intervals').then(function(r){return r.json();}).then(function(iv){renderChart(iv);}).catch(function(){
|
||||
document.getElementById('chart-placeholder').style.display='block';
|
||||
document.getElementById('chart-placeholder').textContent='Failed to load interval data.';
|
||||
});
|
||||
}
|
||||
function renderChart(iv){
|
||||
var canvas=document.getElementById('throughput-chart');
|
||||
if(throughputChart)throughputChart.destroy();
|
||||
if(!iv||iv.length===0){document.getElementById('chart-placeholder').style.display='block';document.getElementById('chart-placeholder').textContent='No interval data available for this session.';return;}
|
||||
var labels=iv.map(function(d){return d.second+'s';});
|
||||
var tx=iv.map(function(d){return(d.tx_bytes*8/1e6).toFixed(2);});
|
||||
var rx=iv.map(function(d){return(d.rx_bytes*8/1e6).toFixed(2);});
|
||||
throughputChart=new Chart(canvas,{type:'line',data:{labels:labels,datasets:[
|
||||
{label:'TX Mbps',data:tx,borderColor:'#f78166',backgroundColor:'rgba(247,129,102,0.1)',borderWidth:2,fill:true,tension:0.3,pointRadius:1},
|
||||
{label:'RX Mbps',data:rx,borderColor:'#58a6ff',backgroundColor:'rgba(88,166,255,0.1)',borderWidth:2,fill:true,tension:0.3,pointRadius:1}
|
||||
]},options:{responsive:true,maintainAspectRatio:false,interaction:{intersect:false,mode:'index'},
|
||||
scales:{x:{title:{display:true,text:'Time',color:'#8b949e'},ticks:{color:'#8b949e'},grid:{color:'#21262d'}},
|
||||
y:{title:{display:true,text:'Mbps',color:'#8b949e'},ticks:{color:'#8b949e'},grid:{color:'#21262d'},beginAtZero:true}},
|
||||
plugins:{legend:{labels:{color:'#e1e4e8'}},tooltip:{backgroundColor:'#161b22',borderColor:'#30363d',borderWidth:1,titleColor:'#e1e4e8',bodyColor:'#8b949e'}}}});
|
||||
}
|
||||
</script>
|
||||
</body>
|
||||
</html>"##,
|
||||
ext = "html"
|
||||
)]
|
||||
struct DashboardTemplate {
|
||||
ip: String,
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// JSON response types
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// A single test session as returned by the sessions API.
|
||||
#[derive(Serialize)]
|
||||
struct SessionJson {
|
||||
id: i64,
|
||||
username: String,
|
||||
peer_ip: String,
|
||||
started_at: Option<String>,
|
||||
ended_at: Option<String>,
|
||||
tx_bytes: i64,
|
||||
rx_bytes: i64,
|
||||
protocol: Option<String>,
|
||||
direction: Option<String>,
|
||||
}
|
||||
|
||||
/// Aggregate statistics for an IP address.
|
||||
#[derive(Serialize)]
|
||||
struct StatsJson {
|
||||
total_sessions: i64,
|
||||
total_tx_bytes: i64,
|
||||
total_rx_bytes: i64,
|
||||
avg_tx_mbps: f64,
|
||||
avg_rx_mbps: f64,
|
||||
}
|
||||
|
||||
/// One second of throughput data within a session.
|
||||
#[derive(Serialize)]
|
||||
struct IntervalJson {
|
||||
second: i64,
|
||||
tx_bytes: i64,
|
||||
rx_bytes: i64,
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Error helper
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Uniform error wrapper so handlers can use `?` freely.
|
||||
///
|
||||
/// All errors are rendered as `500 Internal Server Error` with a plain-text
|
||||
/// body. The full error chain is logged via [`tracing`].
|
||||
struct AppError(anyhow::Error);
|
||||
|
||||
impl IntoResponse for AppError {
|
||||
fn into_response(self) -> Response {
|
||||
tracing::error!("web handler error: {:#}", self.0);
|
||||
(StatusCode::INTERNAL_SERVER_ERROR, self.0.to_string()).into_response()
|
||||
}
|
||||
}
|
||||
|
||||
impl<E: Into<anyhow::Error>> From<E> for AppError {
|
||||
fn from(err: E) -> Self {
|
||||
Self(err.into())
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Handlers
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// `GET /` -- render the landing page.
|
||||
async fn index_page() -> Result<Html<String>, AppError> {
|
||||
let rendered = IndexTemplate
|
||||
.render()
|
||||
.map_err(|e| anyhow::anyhow!("template render: {}", e))?;
|
||||
Ok(Html(rendered))
|
||||
}
|
||||
|
||||
/// `GET /dashboard/{ip}` -- render the per-IP dashboard.
|
||||
async fn dashboard_page(Path(ip): Path<String>) -> Result<Html<String>, AppError> {
|
||||
let rendered = DashboardTemplate { ip }
|
||||
.render()
|
||||
.map_err(|e| anyhow::anyhow!("template render: {}", e))?;
|
||||
Ok(Html(rendered))
|
||||
}
|
||||
|
||||
/// `GET /api/ip/{ip}/sessions` -- return the most recent 100 sessions for
|
||||
/// the given peer IP as a JSON array.
|
||||
async fn api_sessions(
|
||||
State(state): State<Arc<WebState>>,
|
||||
Path(ip): Path<String>,
|
||||
) -> Result<axum::Json<Vec<SessionJson>>, AppError> {
|
||||
let sessions = {
|
||||
let conn = state
|
||||
.query_conn
|
||||
.lock()
|
||||
.map_err(|e| anyhow::anyhow!("lock: {}", e))?;
|
||||
let mut stmt = conn.prepare(
|
||||
"SELECT id, username, peer_ip, started_at, ended_at,
|
||||
tx_bytes, rx_bytes, protocol, direction
|
||||
FROM sessions
|
||||
WHERE peer_ip = ?1
|
||||
ORDER BY started_at DESC
|
||||
LIMIT 100",
|
||||
)?;
|
||||
let rows = stmt.query_map(params![ip], |row| {
|
||||
Ok(SessionJson {
|
||||
id: row.get(0)?,
|
||||
username: row.get(1)?,
|
||||
peer_ip: row.get(2)?,
|
||||
started_at: row.get(3)?,
|
||||
ended_at: row.get(4)?,
|
||||
tx_bytes: row.get(5)?,
|
||||
rx_bytes: row.get(6)?,
|
||||
protocol: row.get(7)?,
|
||||
direction: row.get(8)?,
|
||||
})
|
||||
})?;
|
||||
rows.filter_map(Result::ok).collect::<Vec<_>>()
|
||||
};
|
||||
|
||||
Ok(axum::Json(sessions))
|
||||
}
|
||||
|
||||
/// `GET /api/ip/{ip}/stats` -- return aggregate statistics (total bytes,
|
||||
/// session count, average throughput) for the given IP.
|
||||
async fn api_stats(
|
||||
State(state): State<Arc<WebState>>,
|
||||
Path(ip): Path<String>,
|
||||
) -> Result<axum::Json<StatsJson>, AppError> {
|
||||
let stats = {
|
||||
let conn = state
|
||||
.query_conn
|
||||
.lock()
|
||||
.map_err(|e| anyhow::anyhow!("lock: {}", e))?;
|
||||
conn.query_row(
|
||||
"SELECT
|
||||
COUNT(*) AS total_sessions,
|
||||
COALESCE(SUM(tx_bytes), 0) AS total_tx,
|
||||
COALESCE(SUM(rx_bytes), 0) AS total_rx,
|
||||
COALESCE(SUM(
|
||||
CASE WHEN ended_at IS NOT NULL AND started_at IS NOT NULL
|
||||
THEN (julianday(ended_at) - julianday(started_at)) * 86400.0
|
||||
ELSE 0 END
|
||||
), 0) AS total_seconds
|
||||
FROM sessions
|
||||
WHERE peer_ip = ?1",
|
||||
params![ip],
|
||||
|row| {
|
||||
let total_sessions: i64 = row.get(0)?;
|
||||
let total_tx: i64 = row.get(1)?;
|
||||
let total_rx: i64 = row.get(2)?;
|
||||
let total_seconds: f64 = row.get(3)?;
|
||||
|
||||
let avg_tx_mbps = if total_seconds > 0.0 {
|
||||
(total_tx as f64) * 8.0 / total_seconds / 1_000_000.0
|
||||
} else {
|
||||
0.0
|
||||
};
|
||||
let avg_rx_mbps = if total_seconds > 0.0 {
|
||||
(total_rx as f64) * 8.0 / total_seconds / 1_000_000.0
|
||||
} else {
|
||||
0.0
|
||||
};
|
||||
|
||||
Ok(StatsJson {
|
||||
total_sessions,
|
||||
total_tx_bytes: total_tx,
|
||||
total_rx_bytes: total_rx,
|
||||
avg_tx_mbps,
|
||||
avg_rx_mbps,
|
||||
})
|
||||
},
|
||||
)?
|
||||
};
|
||||
|
||||
Ok(axum::Json(stats))
|
||||
}
|
||||
|
||||
/// Quota usage for an IP — daily/weekly/monthly with limits.
|
||||
#[derive(Serialize)]
|
||||
struct QuotaUsageJson {
|
||||
daily_used: i64,
|
||||
daily_limit: i64,
|
||||
weekly_used: i64,
|
||||
weekly_limit: i64,
|
||||
monthly_used: i64,
|
||||
monthly_limit: i64,
|
||||
}
|
||||
|
||||
/// `GET /api/ip/{ip}/quota` -- return current quota usage for the IP.
|
||||
async fn api_quota(
|
||||
State(state): State<Arc<WebState>>,
|
||||
Path(ip): Path<String>,
|
||||
) -> Result<axum::Json<QuotaUsageJson>, AppError> {
|
||||
let conn = state.query_conn.lock().map_err(|e| anyhow::anyhow!("lock: {}", e))?;
|
||||
|
||||
let daily: i64 = conn.query_row(
|
||||
"SELECT COALESCE(SUM(inbound_bytes + outbound_bytes), 0) FROM ip_usage WHERE ip = ?1 AND date = date('now')",
|
||||
params![ip], |row| row.get(0),
|
||||
).unwrap_or(0);
|
||||
|
||||
let weekly: i64 = conn.query_row(
|
||||
"SELECT COALESCE(SUM(inbound_bytes + outbound_bytes), 0) FROM ip_usage WHERE ip = ?1 AND date >= date('now', '-7 days')",
|
||||
params![ip], |row| row.get(0),
|
||||
).unwrap_or(0);
|
||||
|
||||
let monthly: i64 = conn.query_row(
|
||||
"SELECT COALESCE(SUM(inbound_bytes + outbound_bytes), 0) FROM ip_usage WHERE ip = ?1 AND date >= date('now', '-30 days')",
|
||||
params![ip], |row| row.get(0),
|
||||
).unwrap_or(0);
|
||||
|
||||
// Limits: 2GB daily, 8GB weekly, 24GB monthly
|
||||
Ok(axum::Json(QuotaUsageJson {
|
||||
daily_used: daily,
|
||||
daily_limit: 2_147_483_648,
|
||||
weekly_used: weekly,
|
||||
weekly_limit: 8_589_934_592,
|
||||
monthly_used: monthly,
|
||||
monthly_limit: 25_769_803_776,
|
||||
}))
|
||||
}
|
||||
|
||||
/// Full export of all data for an IP — stats + sessions with human-readable fields.
|
||||
#[derive(Serialize)]
|
||||
struct ExportJson {
|
||||
ip: String,
|
||||
exported_at: String,
|
||||
stats: StatsJson,
|
||||
quota: QuotaJson,
|
||||
sessions: Vec<ExportSessionJson>,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
struct QuotaJson {
|
||||
daily_used_bytes: i64,
|
||||
daily_used_human: String,
|
||||
daily_limit_bytes: String,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
struct ExportSessionJson {
|
||||
id: i64,
|
||||
started_at: Option<String>,
|
||||
ended_at: Option<String>,
|
||||
protocol: Option<String>,
|
||||
direction: Option<String>,
|
||||
tx_bytes: i64,
|
||||
rx_bytes: i64,
|
||||
tx_human: String,
|
||||
rx_human: String,
|
||||
duration_secs: f64,
|
||||
avg_tx_mbps: f64,
|
||||
avg_rx_mbps: f64,
|
||||
}
|
||||
|
||||
fn human_bytes(b: i64) -> String {
|
||||
let b = b as f64;
|
||||
if b >= 1_073_741_824.0 {
|
||||
format!("{:.2} GB", b / 1_073_741_824.0)
|
||||
} else if b >= 1_048_576.0 {
|
||||
format!("{:.1} MB", b / 1_048_576.0)
|
||||
} else if b >= 1024.0 {
|
||||
format!("{:.1} KB", b / 1024.0)
|
||||
} else {
|
||||
format!("{} B", b as i64)
|
||||
}
|
||||
}
|
||||
|
||||
/// `GET /api/ip/{ip}/export` -- return a comprehensive JSON export of all
|
||||
/// sessions, stats, and quota usage for an IP. Suitable for download/archival.
|
||||
async fn api_export(
|
||||
State(state): State<Arc<WebState>>,
|
||||
Path(ip): Path<String>,
|
||||
) -> Result<impl IntoResponse, AppError> {
|
||||
let conn = state
|
||||
.query_conn
|
||||
.lock()
|
||||
.map_err(|e| anyhow::anyhow!("lock: {}", e))?;
|
||||
|
||||
// Stats
|
||||
let stats = conn.query_row(
|
||||
"SELECT COUNT(*), COALESCE(SUM(tx_bytes),0), COALESCE(SUM(rx_bytes),0),
|
||||
COALESCE(SUM(CASE WHEN ended_at IS NOT NULL AND started_at IS NOT NULL
|
||||
THEN (julianday(ended_at)-julianday(started_at))*86400.0 ELSE 0 END),0)
|
||||
FROM sessions WHERE peer_ip = ?1",
|
||||
params![ip],
|
||||
|row| {
|
||||
let n: i64 = row.get(0)?;
|
||||
let tx: i64 = row.get(1)?;
|
||||
let rx: i64 = row.get(2)?;
|
||||
let secs: f64 = row.get(3)?;
|
||||
Ok(StatsJson {
|
||||
total_sessions: n,
|
||||
total_tx_bytes: tx,
|
||||
total_rx_bytes: rx,
|
||||
avg_tx_mbps: if secs > 0.0 { tx as f64 * 8.0 / secs / 1e6 } else { 0.0 },
|
||||
avg_rx_mbps: if secs > 0.0 { rx as f64 * 8.0 / secs / 1e6 } else { 0.0 },
|
||||
})
|
||||
},
|
||||
)?;
|
||||
|
||||
// Quota
|
||||
let daily_used: i64 = conn.query_row(
|
||||
"SELECT COALESCE(SUM(inbound_bytes + outbound_bytes), 0) FROM ip_usage
|
||||
WHERE ip = ?1 AND date = date('now')",
|
||||
params![ip],
|
||||
|row| row.get(0),
|
||||
).unwrap_or(0);
|
||||
|
||||
let quota = QuotaJson {
|
||||
daily_used_bytes: daily_used,
|
||||
daily_used_human: human_bytes(daily_used),
|
||||
daily_limit_bytes: "see server config".to_string(),
|
||||
};
|
||||
|
||||
// Sessions with computed fields (duration computed by SQLite)
|
||||
let mut stmt = conn.prepare(
|
||||
"SELECT id, started_at, ended_at, protocol, direction, tx_bytes, rx_bytes,
|
||||
CASE WHEN ended_at IS NOT NULL AND started_at IS NOT NULL
|
||||
THEN (julianday(ended_at) - julianday(started_at)) * 86400.0
|
||||
ELSE 0 END AS dur_secs
|
||||
FROM sessions WHERE peer_ip = ?1 ORDER BY started_at DESC LIMIT 100",
|
||||
)?;
|
||||
let sessions: Vec<ExportSessionJson> = stmt.query_map(params![ip], |row| {
|
||||
let tx: i64 = row.get(5)?;
|
||||
let rx: i64 = row.get(6)?;
|
||||
let dur: f64 = row.get(7)?;
|
||||
Ok(ExportSessionJson {
|
||||
id: row.get(0)?,
|
||||
started_at: row.get(1)?,
|
||||
ended_at: row.get(2)?,
|
||||
protocol: row.get(3)?,
|
||||
direction: row.get(4)?,
|
||||
tx_bytes: tx,
|
||||
rx_bytes: rx,
|
||||
tx_human: human_bytes(tx),
|
||||
rx_human: human_bytes(rx),
|
||||
duration_secs: dur,
|
||||
avg_tx_mbps: if dur > 0.0 { tx as f64 * 8.0 / dur / 1e6 } else { 0.0 },
|
||||
avg_rx_mbps: if dur > 0.0 { rx as f64 * 8.0 / dur / 1e6 } else { 0.0 },
|
||||
})
|
||||
})?.filter_map(Result::ok).collect();
|
||||
|
||||
let export = ExportJson {
|
||||
ip: ip.clone(),
|
||||
exported_at: {
|
||||
// Simple UTC timestamp without chrono
|
||||
use std::time::{SystemTime, UNIX_EPOCH};
|
||||
let secs = SystemTime::now().duration_since(UNIX_EPOCH).unwrap_or_default().as_secs();
|
||||
format!("{}", secs) // Unix timestamp — universally parseable
|
||||
},
|
||||
stats,
|
||||
quota,
|
||||
sessions,
|
||||
};
|
||||
|
||||
let json_string = serde_json::to_string_pretty(&export)
|
||||
.map_err(|e| anyhow::anyhow!("json serialize: {}", e))?;
|
||||
|
||||
Ok((
|
||||
StatusCode::OK,
|
||||
[
|
||||
(axum::http::header::CONTENT_TYPE, "application/json".to_string()),
|
||||
(axum::http::header::CONTENT_DISPOSITION,
|
||||
format!("attachment; filename=\"btest-{}.json\"", ip)),
|
||||
],
|
||||
json_string,
|
||||
))
|
||||
}
|
||||
|
||||
/// `GET /api/session/{id}/intervals` -- return per-second throughput data
|
||||
/// for a session.
|
||||
///
|
||||
/// If the `session_intervals` table does not exist or contains no rows for
|
||||
/// the requested session, an empty JSON array is returned.
|
||||
async fn api_intervals(
|
||||
State(state): State<Arc<WebState>>,
|
||||
Path(id): Path<i64>,
|
||||
) -> Result<axum::Json<Vec<IntervalJson>>, AppError> {
|
||||
let intervals = {
|
||||
let conn = state
|
||||
.query_conn
|
||||
.lock()
|
||||
.map_err(|e| anyhow::anyhow!("lock: {}", e))?;
|
||||
|
||||
// Guard against the table not existing (e.g. first run before
|
||||
// `ensure_web_tables` was ever called on this database file).
|
||||
let table_exists: bool = conn
|
||||
.query_row(
|
||||
"SELECT COUNT(*) FROM sqlite_master \
|
||||
WHERE type = 'table' AND name = 'session_intervals'",
|
||||
[],
|
||||
|row| row.get::<_, i64>(0),
|
||||
)
|
||||
.map(|c| c > 0)
|
||||
.unwrap_or(false);
|
||||
|
||||
if !table_exists {
|
||||
Vec::new()
|
||||
} else {
|
||||
let mut stmt = conn.prepare(
|
||||
"SELECT second, tx_bytes, rx_bytes
|
||||
FROM session_intervals
|
||||
WHERE session_id = ?1
|
||||
ORDER BY second ASC",
|
||||
)?;
|
||||
let rows = stmt.query_map(params![id], |row| {
|
||||
Ok(IntervalJson {
|
||||
second: row.get(0)?,
|
||||
tx_bytes: row.get(1)?,
|
||||
rx_bytes: row.get(2)?,
|
||||
})
|
||||
})?;
|
||||
rows.filter_map(Result::ok).collect::<Vec<_>>()
|
||||
}
|
||||
};
|
||||
|
||||
Ok(axum::Json(intervals))
|
||||
}
|
||||
387
src/server_pro/web/templates/dashboard.html
Normal file
387
src/server_pro/web/templates/dashboard.html
Normal file
@@ -0,0 +1,387 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1">
|
||||
<title>Dashboard — {{ ip }} — btest-rs</title>
|
||||
<style>
|
||||
* { margin: 0; padding: 0; box-sizing: border-box; }
|
||||
body {
|
||||
font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Helvetica, Arial, sans-serif;
|
||||
background: #0f1117;
|
||||
color: #e1e4e8;
|
||||
min-height: 100vh;
|
||||
padding: 1.5rem;
|
||||
}
|
||||
a { color: #58a6ff; text-decoration: none; }
|
||||
a:hover { text-decoration: underline; }
|
||||
|
||||
.header {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
gap: 1rem;
|
||||
margin-bottom: 1.5rem;
|
||||
flex-wrap: wrap;
|
||||
}
|
||||
.header h1 { font-size: 1.5rem; color: #58a6ff; }
|
||||
.header .ip-label {
|
||||
font-size: 1.1rem;
|
||||
color: #8b949e;
|
||||
font-family: monospace;
|
||||
}
|
||||
.header .home-link { margin-left: auto; }
|
||||
|
||||
/* Stats cards */
|
||||
.stats {
|
||||
display: grid;
|
||||
grid-template-columns: repeat(auto-fit, minmax(160px, 1fr));
|
||||
gap: 1rem;
|
||||
margin-bottom: 1.5rem;
|
||||
}
|
||||
.stat-card {
|
||||
background: #161b22;
|
||||
border: 1px solid #30363d;
|
||||
border-radius: 8px;
|
||||
padding: 1rem;
|
||||
}
|
||||
.stat-card .label {
|
||||
color: #8b949e;
|
||||
font-size: 0.8rem;
|
||||
text-transform: uppercase;
|
||||
letter-spacing: 0.05em;
|
||||
}
|
||||
.stat-card .value {
|
||||
font-size: 1.4rem;
|
||||
font-weight: 600;
|
||||
margin-top: 0.25rem;
|
||||
}
|
||||
|
||||
/* Table */
|
||||
.table-wrap {
|
||||
overflow-x: auto;
|
||||
margin-bottom: 1.5rem;
|
||||
}
|
||||
table {
|
||||
width: 100%;
|
||||
border-collapse: collapse;
|
||||
background: #161b22;
|
||||
border-radius: 8px;
|
||||
overflow: hidden;
|
||||
}
|
||||
th, td {
|
||||
padding: 0.6rem 1rem;
|
||||
text-align: left;
|
||||
border-bottom: 1px solid #21262d;
|
||||
white-space: nowrap;
|
||||
}
|
||||
th {
|
||||
background: #0d1117;
|
||||
color: #8b949e;
|
||||
font-size: 0.8rem;
|
||||
text-transform: uppercase;
|
||||
letter-spacing: 0.04em;
|
||||
}
|
||||
tr { cursor: pointer; }
|
||||
tr:hover td { background: #1c2128; }
|
||||
tr.selected td { background: #1f3a5f; }
|
||||
|
||||
.proto-tcp { color: #3fb950; }
|
||||
.proto-udp { color: #d29922; }
|
||||
.dir-tx { color: #f78166; }
|
||||
.dir-rx { color: #58a6ff; }
|
||||
.dir-both { color: #bc8cff; }
|
||||
|
||||
/* Chart area */
|
||||
.chart-section {
|
||||
background: #161b22;
|
||||
border: 1px solid #30363d;
|
||||
border-radius: 8px;
|
||||
padding: 1.5rem;
|
||||
margin-bottom: 1.5rem;
|
||||
}
|
||||
.chart-section h2 {
|
||||
font-size: 1rem;
|
||||
color: #8b949e;
|
||||
margin-bottom: 1rem;
|
||||
}
|
||||
.chart-container {
|
||||
position: relative;
|
||||
width: 100%;
|
||||
max-height: 360px;
|
||||
}
|
||||
.chart-placeholder {
|
||||
text-align: center;
|
||||
color: #484f58;
|
||||
padding: 3rem 0;
|
||||
}
|
||||
|
||||
.footer {
|
||||
text-align: center;
|
||||
color: #484f58;
|
||||
font-size: 0.8rem;
|
||||
margin-top: 2rem;
|
||||
}
|
||||
.no-data {
|
||||
text-align: center;
|
||||
padding: 3rem;
|
||||
color: #484f58;
|
||||
}
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
|
||||
<div class="header">
|
||||
<h1>btest-rs</h1>
|
||||
<span class="ip-label">{{ ip }}</span>
|
||||
<span class="home-link"><a href="/">Home</a></span>
|
||||
</div>
|
||||
|
||||
<!-- Stats summary (filled via API) -->
|
||||
<div class="stats" id="stats-grid">
|
||||
<div class="stat-card">
|
||||
<div class="label">Total Tests</div>
|
||||
<div class="value" id="stat-total-tests">—</div>
|
||||
</div>
|
||||
<div class="stat-card">
|
||||
<div class="label">Total TX</div>
|
||||
<div class="value" id="stat-total-tx">—</div>
|
||||
</div>
|
||||
<div class="stat-card">
|
||||
<div class="label">Total RX</div>
|
||||
<div class="value" id="stat-total-rx">—</div>
|
||||
</div>
|
||||
<div class="stat-card">
|
||||
<div class="label">Avg TX Mbps</div>
|
||||
<div class="value" id="stat-avg-tx">—</div>
|
||||
</div>
|
||||
<div class="stat-card">
|
||||
<div class="label">Avg RX Mbps</div>
|
||||
<div class="value" id="stat-avg-rx">—</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Chart for selected session -->
|
||||
<div class="chart-section">
|
||||
<h2 id="chart-title">Select a test below to view its throughput chart</h2>
|
||||
<div class="chart-container">
|
||||
<canvas id="throughput-chart"></canvas>
|
||||
<div class="chart-placeholder" id="chart-placeholder">Click a row in the table to load the throughput graph for that session.</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Sessions table -->
|
||||
<div class="table-wrap">
|
||||
<table>
|
||||
<thead>
|
||||
<tr>
|
||||
<th>#</th>
|
||||
<th>Date</th>
|
||||
<th>Protocol</th>
|
||||
<th>Direction</th>
|
||||
<th>TX Bytes</th>
|
||||
<th>RX Bytes</th>
|
||||
<th>Duration</th>
|
||||
<th>Avg TX Mbps</th>
|
||||
<th>Avg RX Mbps</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody id="sessions-body">
|
||||
<tr><td colspan="9" class="no-data">Loading sessions...</td></tr>
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
|
||||
<div class="footer">Powered by btest-rs</div>
|
||||
|
||||
<script src="https://cdn.jsdelivr.net/npm/chart.js"></script>
|
||||
<script>
|
||||
var currentIp = "{{ ip }}";
|
||||
var throughputChart = null;
|
||||
|
||||
function formatBytes(b) {
|
||||
if (b === 0) return '0 B';
|
||||
var units = ['B', 'KB', 'MB', 'GB', 'TB'];
|
||||
var i = Math.floor(Math.log(b) / Math.log(1024));
|
||||
if (i >= units.length) i = units.length - 1;
|
||||
return (b / Math.pow(1024, i)).toFixed(1) + ' ' + units[i];
|
||||
}
|
||||
|
||||
function formatMbps(bytesPerSec) {
|
||||
return (bytesPerSec * 8 / 1e6).toFixed(2);
|
||||
}
|
||||
|
||||
function durationStr(startedAt, endedAt) {
|
||||
if (!startedAt || !endedAt) return '--';
|
||||
var ms = new Date(endedAt) - new Date(startedAt);
|
||||
if (ms < 0) return '--';
|
||||
var s = Math.round(ms / 1000);
|
||||
if (s < 60) return s + 's';
|
||||
return Math.floor(s / 60) + 'm ' + (s % 60) + 's';
|
||||
}
|
||||
|
||||
function durationSec(startedAt, endedAt) {
|
||||
if (!startedAt || !endedAt) return 0;
|
||||
var ms = new Date(endedAt) - new Date(startedAt);
|
||||
return Math.max(ms / 1000, 0.001);
|
||||
}
|
||||
|
||||
// Load summary stats
|
||||
fetch('/api/ip/' + encodeURIComponent(currentIp) + '/stats')
|
||||
.then(function(r) { return r.json(); })
|
||||
.then(function(data) {
|
||||
document.getElementById('stat-total-tests').textContent = data.total_sessions || 0;
|
||||
document.getElementById('stat-total-tx').textContent = formatBytes(data.total_tx_bytes || 0);
|
||||
document.getElementById('stat-total-rx').textContent = formatBytes(data.total_rx_bytes || 0);
|
||||
document.getElementById('stat-avg-tx').textContent = data.avg_tx_mbps ? data.avg_tx_mbps.toFixed(2) : '0.00';
|
||||
document.getElementById('stat-avg-rx').textContent = data.avg_rx_mbps ? data.avg_rx_mbps.toFixed(2) : '0.00';
|
||||
})
|
||||
.catch(function() {});
|
||||
|
||||
// Load sessions list
|
||||
fetch('/api/ip/' + encodeURIComponent(currentIp) + '/sessions')
|
||||
.then(function(r) { return r.json(); })
|
||||
.then(function(sessions) {
|
||||
var tbody = document.getElementById('sessions-body');
|
||||
if (!sessions || sessions.length === 0) {
|
||||
tbody.innerHTML = '<tr><td colspan="9" class="no-data">No test sessions found for this IP.</td></tr>';
|
||||
return;
|
||||
}
|
||||
tbody.innerHTML = '';
|
||||
sessions.forEach(function(s, i) {
|
||||
var tr = document.createElement('tr');
|
||||
tr.dataset.sessionId = s.id;
|
||||
tr.onclick = function() { selectSession(s.id, tr); };
|
||||
|
||||
var dur = durationSec(s.started_at, s.ended_at);
|
||||
var avgTx = dur > 0 ? formatMbps(s.tx_bytes / dur) : '0.00';
|
||||
var avgRx = dur > 0 ? formatMbps(s.rx_bytes / dur) : '0.00';
|
||||
var proto = (s.protocol || 'TCP').toUpperCase();
|
||||
var dir = (s.direction || 'BOTH').toUpperCase();
|
||||
var protoClass = proto === 'UDP' ? 'proto-udp' : 'proto-tcp';
|
||||
var dirClass = dir === 'TX' ? 'dir-tx' : dir === 'RX' ? 'dir-rx' : 'dir-both';
|
||||
|
||||
tr.innerHTML =
|
||||
'<td>' + (i + 1) + '</td>' +
|
||||
'<td>' + (s.started_at || '--') + '</td>' +
|
||||
'<td class="' + protoClass + '">' + proto + '</td>' +
|
||||
'<td class="' + dirClass + '">' + dir + '</td>' +
|
||||
'<td>' + formatBytes(s.tx_bytes || 0) + '</td>' +
|
||||
'<td>' + formatBytes(s.rx_bytes || 0) + '</td>' +
|
||||
'<td>' + durationStr(s.started_at, s.ended_at) + '</td>' +
|
||||
'<td>' + avgTx + '</td>' +
|
||||
'<td>' + avgRx + '</td>';
|
||||
tbody.appendChild(tr);
|
||||
});
|
||||
|
||||
// Auto-select the first (most recent) session
|
||||
if (sessions.length > 0) {
|
||||
var firstRow = tbody.querySelector('tr');
|
||||
if (firstRow) selectSession(sessions[0].id, firstRow);
|
||||
}
|
||||
})
|
||||
.catch(function() {
|
||||
document.getElementById('sessions-body').innerHTML =
|
||||
'<tr><td colspan="9" class="no-data">Failed to load sessions.</td></tr>';
|
||||
});
|
||||
|
||||
function selectSession(sessionId, rowEl) {
|
||||
// Highlight selected row
|
||||
var rows = document.querySelectorAll('#sessions-body tr');
|
||||
rows.forEach(function(r) { r.classList.remove('selected'); });
|
||||
rowEl.classList.add('selected');
|
||||
|
||||
document.getElementById('chart-title').textContent = 'Throughput for session #' + sessionId;
|
||||
document.getElementById('chart-placeholder').style.display = 'none';
|
||||
|
||||
fetch('/api/session/' + sessionId + '/intervals')
|
||||
.then(function(r) { return r.json(); })
|
||||
.then(function(intervals) {
|
||||
renderChart(intervals);
|
||||
})
|
||||
.catch(function() {
|
||||
document.getElementById('chart-placeholder').style.display = 'block';
|
||||
document.getElementById('chart-placeholder').textContent = 'Failed to load interval data.';
|
||||
});
|
||||
}
|
||||
|
||||
function renderChart(intervals) {
|
||||
var canvas = document.getElementById('throughput-chart');
|
||||
if (throughputChart) {
|
||||
throughputChart.destroy();
|
||||
}
|
||||
|
||||
if (!intervals || intervals.length === 0) {
|
||||
document.getElementById('chart-placeholder').style.display = 'block';
|
||||
document.getElementById('chart-placeholder').textContent = 'No interval data available for this session.';
|
||||
return;
|
||||
}
|
||||
|
||||
var labels = intervals.map(function(d) { return d.second + 's'; });
|
||||
var txData = intervals.map(function(d) { return (d.tx_bytes * 8 / 1e6).toFixed(2); });
|
||||
var rxData = intervals.map(function(d) { return (d.rx_bytes * 8 / 1e6).toFixed(2); });
|
||||
|
||||
throughputChart = new Chart(canvas, {
|
||||
type: 'line',
|
||||
data: {
|
||||
labels: labels,
|
||||
datasets: [
|
||||
{
|
||||
label: 'TX Mbps',
|
||||
data: txData,
|
||||
borderColor: '#f78166',
|
||||
backgroundColor: 'rgba(247, 129, 102, 0.1)',
|
||||
borderWidth: 2,
|
||||
fill: true,
|
||||
tension: 0.3,
|
||||
pointRadius: 1
|
||||
},
|
||||
{
|
||||
label: 'RX Mbps',
|
||||
data: rxData,
|
||||
borderColor: '#58a6ff',
|
||||
backgroundColor: 'rgba(88, 166, 255, 0.1)',
|
||||
borderWidth: 2,
|
||||
fill: true,
|
||||
tension: 0.3,
|
||||
pointRadius: 1
|
||||
}
|
||||
]
|
||||
},
|
||||
options: {
|
||||
responsive: true,
|
||||
maintainAspectRatio: false,
|
||||
interaction: {
|
||||
intersect: false,
|
||||
mode: 'index'
|
||||
},
|
||||
scales: {
|
||||
x: {
|
||||
title: { display: true, text: 'Time', color: '#8b949e' },
|
||||
ticks: { color: '#8b949e' },
|
||||
grid: { color: '#21262d' }
|
||||
},
|
||||
y: {
|
||||
title: { display: true, text: 'Mbps', color: '#8b949e' },
|
||||
ticks: { color: '#8b949e' },
|
||||
grid: { color: '#21262d' },
|
||||
beginAtZero: true
|
||||
}
|
||||
},
|
||||
plugins: {
|
||||
legend: {
|
||||
labels: { color: '#e1e4e8' }
|
||||
},
|
||||
tooltip: {
|
||||
backgroundColor: '#161b22',
|
||||
borderColor: '#30363d',
|
||||
borderWidth: 1,
|
||||
titleColor: '#e1e4e8',
|
||||
bodyColor: '#8b949e'
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
</script>
|
||||
</body>
|
||||
</html>
|
||||
160
src/server_pro/web/templates/index.html
Normal file
160
src/server_pro/web/templates/index.html
Normal file
@@ -0,0 +1,160 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1">
|
||||
<title>btest-rs Public Bandwidth Test Server</title>
|
||||
<style>
|
||||
* { margin: 0; padding: 0; box-sizing: border-box; }
|
||||
body {
|
||||
font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Helvetica, Arial, sans-serif;
|
||||
background: #0f1117;
|
||||
color: #e1e4e8;
|
||||
min-height: 100vh;
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
align-items: center;
|
||||
justify-content: center;
|
||||
}
|
||||
.container {
|
||||
max-width: 560px;
|
||||
width: 90%;
|
||||
text-align: center;
|
||||
padding: 2rem;
|
||||
}
|
||||
h1 {
|
||||
font-size: 2rem;
|
||||
margin-bottom: 0.5rem;
|
||||
color: #58a6ff;
|
||||
}
|
||||
.subtitle {
|
||||
color: #8b949e;
|
||||
margin-bottom: 2rem;
|
||||
line-height: 1.5;
|
||||
}
|
||||
.search-box {
|
||||
display: flex;
|
||||
gap: 0.5rem;
|
||||
margin-bottom: 1.5rem;
|
||||
}
|
||||
.search-box input {
|
||||
flex: 1;
|
||||
padding: 0.75rem 1rem;
|
||||
border: 1px solid #30363d;
|
||||
border-radius: 6px;
|
||||
background: #161b22;
|
||||
color: #e1e4e8;
|
||||
font-size: 1rem;
|
||||
outline: none;
|
||||
}
|
||||
.search-box input:focus {
|
||||
border-color: #58a6ff;
|
||||
}
|
||||
.search-box input::placeholder {
|
||||
color: #484f58;
|
||||
}
|
||||
.search-box button {
|
||||
padding: 0.75rem 1.5rem;
|
||||
background: #238636;
|
||||
color: #fff;
|
||||
border: none;
|
||||
border-radius: 6px;
|
||||
font-size: 1rem;
|
||||
cursor: pointer;
|
||||
white-space: nowrap;
|
||||
}
|
||||
.search-box button:hover {
|
||||
background: #2ea043;
|
||||
}
|
||||
.info {
|
||||
background: #161b22;
|
||||
border: 1px solid #30363d;
|
||||
border-radius: 8px;
|
||||
padding: 1.5rem;
|
||||
text-align: left;
|
||||
line-height: 1.6;
|
||||
color: #8b949e;
|
||||
}
|
||||
.info h3 {
|
||||
color: #e1e4e8;
|
||||
margin-bottom: 0.5rem;
|
||||
}
|
||||
.info code {
|
||||
background: #0d1117;
|
||||
padding: 0.15rem 0.4rem;
|
||||
border-radius: 4px;
|
||||
font-size: 0.9em;
|
||||
color: #58a6ff;
|
||||
}
|
||||
.auto-link {
|
||||
margin-top: 1rem;
|
||||
font-size: 0.9rem;
|
||||
}
|
||||
.auto-link a {
|
||||
color: #58a6ff;
|
||||
text-decoration: none;
|
||||
}
|
||||
.auto-link a:hover {
|
||||
text-decoration: underline;
|
||||
}
|
||||
.footer {
|
||||
margin-top: 2rem;
|
||||
color: #484f58;
|
||||
font-size: 0.8rem;
|
||||
}
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<div class="container">
|
||||
<h1>btest-rs</h1>
|
||||
<p class="subtitle">Public MikroTik Bandwidth Test Server — view your test results and history.</p>
|
||||
|
||||
<form class="search-box" id="ip-form" onsubmit="return goToDashboard()">
|
||||
<input type="text" id="ip-input" placeholder="Enter your IP address (e.g. 203.0.113.5)" autocomplete="off">
|
||||
<button type="submit">View Results</button>
|
||||
</form>
|
||||
|
||||
<div class="auto-link" id="auto-detect">
|
||||
Detecting your IP...
|
||||
</div>
|
||||
|
||||
<div class="info">
|
||||
<h3>How it works</h3>
|
||||
<p>
|
||||
Run a bandwidth test from your MikroTik router targeting this server.
|
||||
After the test completes, enter your public IP above to see
|
||||
throughput charts, session history, and aggregate statistics.
|
||||
</p>
|
||||
<p style="margin-top: 0.5rem;">
|
||||
Example: <code>/tool bandwidth-test address=this-server protocol=tcp direction=both</code>
|
||||
</p>
|
||||
</div>
|
||||
|
||||
<div class="footer">Powered by btest-rs</div>
|
||||
</div>
|
||||
|
||||
<script>
|
||||
function goToDashboard() {
|
||||
var ip = document.getElementById('ip-input').value.trim();
|
||||
if (ip) {
|
||||
window.location.href = '/dashboard/' + encodeURIComponent(ip);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
// Auto-detect visitor IP and offer a direct link
|
||||
fetch('https://api.ipify.org?format=json')
|
||||
.then(function(r) { return r.json(); })
|
||||
.then(function(data) {
|
||||
if (data.ip) {
|
||||
document.getElementById('ip-input').value = data.ip;
|
||||
document.getElementById('auto-detect').innerHTML =
|
||||
'Detected IP: <a href="/dashboard/' + encodeURIComponent(data.ip) + '">' + data.ip + '</a> — click to view your dashboard';
|
||||
}
|
||||
})
|
||||
.catch(function() {
|
||||
document.getElementById('auto-detect').textContent = '';
|
||||
});
|
||||
</script>
|
||||
</body>
|
||||
</html>
|
||||
@@ -235,7 +235,7 @@ async fn test_csv_created_client() {
|
||||
// Initialize CSV
|
||||
btest_rs::csv_output::init(&csv_path).unwrap();
|
||||
|
||||
let (tx, rx, lost, intervals) = run_client_test(
|
||||
let (tx, rx, lost, _intervals) = run_client_test(
|
||||
"127.0.0.1", port, false, true, false, None, None,
|
||||
).await;
|
||||
|
||||
@@ -336,3 +336,67 @@ async fn test_bandwidth_state_running_flag() {
|
||||
state.running.store(false, Ordering::SeqCst);
|
||||
assert!(!state.running.load(Ordering::Relaxed));
|
||||
}
|
||||
|
||||
// --- CPU Reporting Tests ---
|
||||
|
||||
/// Helper that returns the full BandwidthState (not just summary) so we can check remote_cpu.
|
||||
async fn run_client_with_state(
|
||||
host: &str, port: u16, transmit: bool, receive: bool, udp: bool,
|
||||
secs: u64,
|
||||
) -> std::sync::Arc<btest_rs::bandwidth::BandwidthState> {
|
||||
let direction = match (transmit, receive) {
|
||||
(true, false) => btest_rs::protocol::CMD_DIR_RX,
|
||||
(false, true) => btest_rs::protocol::CMD_DIR_TX,
|
||||
(true, true) => btest_rs::protocol::CMD_DIR_BOTH,
|
||||
_ => panic!("must specify direction"),
|
||||
};
|
||||
let state = btest_rs::bandwidth::BandwidthState::new();
|
||||
let state_clone = state.clone();
|
||||
let host = host.to_string();
|
||||
|
||||
let handle = tokio::spawn(async move {
|
||||
btest_rs::client::run_client(
|
||||
&host, port, direction, udp,
|
||||
0, 0, None, None, false, state_clone,
|
||||
).await
|
||||
});
|
||||
|
||||
tokio::time::sleep(Duration::from_secs(secs)).await;
|
||||
state.running.store(false, Ordering::SeqCst);
|
||||
tokio::time::sleep(Duration::from_millis(500)).await;
|
||||
handle.abort();
|
||||
|
||||
state
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_local_cpu_nonzero() {
|
||||
// CPU sampler should return > 0 on supported platforms after warming up
|
||||
btest_rs::cpu::start_sampler();
|
||||
std::thread::sleep(Duration::from_secs(2));
|
||||
let cpu = btest_rs::cpu::get();
|
||||
// On CI or idle machines, CPU may genuinely be 0, so just check it doesn't panic
|
||||
// and returns a value in range
|
||||
assert!(cpu <= 100, "CPU should be 0-100, got {}", cpu);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_tcp_remote_cpu_both() {
|
||||
let port = BASE_PORT + 20;
|
||||
start_server_noauth(port).await;
|
||||
let state = run_client_with_state("127.0.0.1", port, true, true, false, 3).await;
|
||||
let remote_cpu = state.remote_cpu.load(Ordering::Relaxed);
|
||||
// On loopback with bidirectional traffic, server CPU should be > 0
|
||||
// The status messages are interleaved in the TCP data stream
|
||||
assert!(remote_cpu > 0, "TCP BOTH: remote CPU should be > 0 on loopback, got {}", remote_cpu);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_tcp_remote_cpu_tx_only() {
|
||||
let port = BASE_PORT + 21;
|
||||
start_server_noauth(port).await;
|
||||
let state = run_client_with_state("127.0.0.1", port, true, false, false, 3).await;
|
||||
let remote_cpu = state.remote_cpu.load(Ordering::Relaxed);
|
||||
// TX-only: server sends status messages that the status reader should parse
|
||||
assert!(remote_cpu > 0, "TCP TX-only: remote CPU should be > 0 on loopback, got {}", remote_cpu);
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user