Merge feature/server-pro into main
Adds btest-server-pro: multi-user bandwidth test server with SQLite DB, per-IP quotas (daily/weekly/monthly), inline byte budget enforcement, TCP multi-connection support, MD5 auth, web dashboard with Chart.js graphs, quota progress bars, and JSON export. Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
1434
Cargo.lock
generated
1434
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
16
Cargo.toml
16
Cargo.toml
@@ -24,6 +24,15 @@ path = "src/bin/client_only.rs"
|
||||
name = "btest-server"
|
||||
path = "src/bin/server_only.rs"
|
||||
|
||||
[[bin]]
|
||||
name = "btest-server-pro"
|
||||
path = "src/server_pro/main.rs"
|
||||
required-features = ["pro"]
|
||||
|
||||
[features]
|
||||
default = []
|
||||
pro = ["dep:rusqlite", "dep:ldap3", "dep:axum", "dep:tower-http", "dep:serde", "dep:serde_json", "dep:askama"]
|
||||
|
||||
[dependencies]
|
||||
tokio = { version = "1", features = ["full"] }
|
||||
clap = { version = "4", features = ["derive"] }
|
||||
@@ -40,6 +49,13 @@ num-traits = "0.2.19"
|
||||
num-integer = "0.1.46"
|
||||
sha2 = "0.11.0"
|
||||
hostname = "0.4.2"
|
||||
rusqlite = { version = "0.39.0", features = ["bundled"], optional = true }
|
||||
ldap3 = { version = "0.12.1", optional = true }
|
||||
axum = { version = "0.8.8", features = ["tokio"], optional = true }
|
||||
tower-http = { version = "0.6.8", features = ["fs", "cors"], optional = true }
|
||||
serde = { version = "1.0.228", features = ["derive"], optional = true }
|
||||
serde_json = { version = "1.0.149", optional = true }
|
||||
askama = { version = "0.15.6", optional = true }
|
||||
|
||||
[profile.release]
|
||||
opt-level = 3
|
||||
|
||||
@@ -20,6 +20,9 @@ pub struct BandwidthState {
|
||||
pub intervals: AtomicU32,
|
||||
/// Remote peer's CPU usage (received via status messages)
|
||||
pub remote_cpu: AtomicU8,
|
||||
/// Remaining byte budget (TX + RX combined). When this reaches 0 the test
|
||||
/// stops immediately. u64::MAX means unlimited (default for non-pro server).
|
||||
pub byte_budget: AtomicU64,
|
||||
}
|
||||
|
||||
impl BandwidthState {
|
||||
@@ -38,6 +41,7 @@ impl BandwidthState {
|
||||
total_lost_packets: AtomicU64::new(0),
|
||||
intervals: AtomicU32::new(0),
|
||||
remote_cpu: AtomicU8::new(0),
|
||||
byte_budget: AtomicU64::new(u64::MAX),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -50,6 +54,29 @@ impl BandwidthState {
|
||||
self.intervals.fetch_add(1, Relaxed);
|
||||
}
|
||||
|
||||
/// Try to spend `amount` bytes from the budget. Returns `true` if allowed,
|
||||
/// `false` if the budget is exhausted (and sets `running = false`).
|
||||
#[inline]
|
||||
pub fn spend_budget(&self, amount: u64) -> bool {
|
||||
use std::sync::atomic::Ordering::{Relaxed, SeqCst};
|
||||
// Fast path: unlimited budget (non-pro server)
|
||||
let current = self.byte_budget.load(Relaxed);
|
||||
if current == u64::MAX {
|
||||
return true;
|
||||
}
|
||||
if current < amount {
|
||||
self.running.store(false, SeqCst);
|
||||
return false;
|
||||
}
|
||||
self.byte_budget.fetch_sub(amount, Relaxed);
|
||||
true
|
||||
}
|
||||
|
||||
/// Set the byte budget (total bytes allowed for the entire test).
|
||||
pub fn set_budget(&self, budget: u64) {
|
||||
self.byte_budget.store(budget, std::sync::atomic::Ordering::SeqCst);
|
||||
}
|
||||
|
||||
/// Get summary for syslog reporting.
|
||||
pub fn summary(&self) -> (u64, u64, u64, u32) {
|
||||
use std::sync::atomic::Ordering::Relaxed;
|
||||
|
||||
@@ -366,8 +366,40 @@ async fn handle_client(
|
||||
|
||||
// --- TCP Test Server ---
|
||||
|
||||
/// Public TX task for multi-connection use by server_pro.
|
||||
pub async fn tcp_tx_task(
|
||||
writer: tokio::net::tcp::OwnedWriteHalf,
|
||||
tx_size: usize,
|
||||
tx_speed: u32,
|
||||
state: Arc<BandwidthState>,
|
||||
) {
|
||||
tcp_tx_loop(writer, tx_size, tx_speed, state).await;
|
||||
}
|
||||
|
||||
/// Public RX task for multi-connection use by server_pro.
|
||||
pub async fn tcp_rx_task(
|
||||
reader: tokio::net::tcp::OwnedReadHalf,
|
||||
state: Arc<BandwidthState>,
|
||||
) {
|
||||
tcp_rx_loop(reader, state).await;
|
||||
}
|
||||
|
||||
/// Run a TCP bandwidth test on an already-authenticated stream.
|
||||
/// Public API for use by server_pro.
|
||||
pub async fn run_tcp_test(
|
||||
stream: TcpStream,
|
||||
cmd: Command,
|
||||
state: Arc<BandwidthState>,
|
||||
) -> Result<(u64, u64, u64, u32)> {
|
||||
run_tcp_test_inner(stream, cmd, state).await
|
||||
}
|
||||
|
||||
async fn run_tcp_test_server(stream: TcpStream, cmd: Command) -> Result<(u64, u64, u64, u32)> {
|
||||
let state = BandwidthState::new();
|
||||
run_tcp_test_inner(stream, cmd, state).await
|
||||
}
|
||||
|
||||
async fn run_tcp_test_inner(stream: TcpStream, cmd: Command, state: Arc<BandwidthState>) -> Result<(u64, u64, u64, u32)> {
|
||||
let tx_size = cmd.tx_size as usize;
|
||||
let server_should_tx = cmd.server_tx();
|
||||
let server_should_rx = cmd.server_rx();
|
||||
@@ -437,9 +469,22 @@ async fn run_tcp_test_server(stream: TcpStream, cmd: Command) -> Result<(u64, u6
|
||||
Ok(state.summary())
|
||||
}
|
||||
|
||||
/// Public API for multi-connection TCP test with external state. Used by server_pro.
|
||||
pub async fn run_tcp_multiconn_test(
|
||||
streams: Vec<TcpStream>,
|
||||
cmd: Command,
|
||||
state: Arc<BandwidthState>,
|
||||
) -> Result<(u64, u64, u64, u32)> {
|
||||
run_tcp_multiconn_inner(streams, cmd, state).await
|
||||
}
|
||||
|
||||
/// TCP multi-connection.
|
||||
async fn run_tcp_multiconn_server(streams: Vec<TcpStream>, cmd: Command) -> Result<(u64, u64, u64, u32)> {
|
||||
let state = BandwidthState::new();
|
||||
run_tcp_multiconn_inner(streams, cmd, state).await
|
||||
}
|
||||
|
||||
async fn run_tcp_multiconn_inner(streams: Vec<TcpStream>, cmd: Command, state: Arc<BandwidthState>) -> Result<(u64, u64, u64, u32)> {
|
||||
let tx_size = cmd.tx_size as usize;
|
||||
let server_should_tx = cmd.server_tx();
|
||||
let server_should_rx = cmd.server_rx();
|
||||
@@ -550,6 +595,9 @@ async fn tcp_tx_loop_inner(
|
||||
next_status = Instant::now() + Duration::from_secs(1);
|
||||
}
|
||||
|
||||
if !state.spend_budget(tx_size as u64) {
|
||||
break;
|
||||
}
|
||||
if writer.write_all(&packet).await.is_err() {
|
||||
state.running.store(false, Ordering::SeqCst);
|
||||
break;
|
||||
@@ -586,6 +634,9 @@ async fn tcp_rx_loop(mut reader: tokio::net::tcp::OwnedReadHalf, state: Arc<Band
|
||||
break;
|
||||
}
|
||||
Ok(n) => {
|
||||
if !state.spend_budget(n as u64) {
|
||||
break;
|
||||
}
|
||||
state.rx_bytes.fetch_add(n as u64, Ordering::Relaxed);
|
||||
}
|
||||
}
|
||||
@@ -633,6 +684,18 @@ async fn tcp_status_sender(
|
||||
|
||||
// --- UDP Test Server ---
|
||||
|
||||
/// Run a UDP bandwidth test on an already-authenticated stream.
|
||||
/// Public API for use by server_pro. Caller provides the UDP port offset.
|
||||
pub async fn run_udp_test(
|
||||
stream: &mut TcpStream,
|
||||
peer: SocketAddr,
|
||||
cmd: &Command,
|
||||
state: Arc<BandwidthState>,
|
||||
udp_port_start: u16,
|
||||
) -> Result<(u64, u64, u64, u32)> {
|
||||
run_udp_test_inner(stream, peer, cmd, state, udp_port_start).await
|
||||
}
|
||||
|
||||
async fn run_udp_test_server(
|
||||
stream: &mut TcpStream,
|
||||
peer: SocketAddr,
|
||||
@@ -640,7 +703,17 @@ async fn run_udp_test_server(
|
||||
udp_port_offset: Arc<std::sync::atomic::AtomicU16>,
|
||||
) -> Result<(u64, u64, u64, u32)> {
|
||||
let offset = udp_port_offset.fetch_add(1, Ordering::SeqCst);
|
||||
let server_udp_port = BTEST_UDP_PORT_START + offset;
|
||||
let state = BandwidthState::new();
|
||||
run_udp_test_inner(stream, peer, cmd, state, BTEST_UDP_PORT_START + offset).await
|
||||
}
|
||||
|
||||
async fn run_udp_test_inner(
|
||||
stream: &mut TcpStream,
|
||||
peer: SocketAddr,
|
||||
cmd: &Command,
|
||||
state: Arc<BandwidthState>,
|
||||
server_udp_port: u16,
|
||||
) -> Result<(u64, u64, u64, u32)> {
|
||||
let client_udp_port = server_udp_port + BTEST_PORT_CLIENT_OFFSET;
|
||||
|
||||
stream.write_all(&server_udp_port.to_be_bytes()).await?;
|
||||
@@ -707,7 +780,6 @@ async fn run_udp_test_server(
|
||||
if use_unconnected { "unconnected" } else { "connected" },
|
||||
);
|
||||
|
||||
let state = BandwidthState::new();
|
||||
let tx_size = cmd.tx_size as usize;
|
||||
let server_should_tx = cmd.server_tx();
|
||||
let server_should_rx = cmd.server_rx();
|
||||
@@ -761,6 +833,10 @@ async fn udp_tx_loop(
|
||||
let mut consecutive_errors: u32 = 0;
|
||||
|
||||
while state.running.load(Ordering::Relaxed) {
|
||||
if !state.spend_budget(tx_size as u64) {
|
||||
break;
|
||||
}
|
||||
|
||||
packet[0..4].copy_from_slice(&seq.to_be_bytes());
|
||||
|
||||
let result = if multi_conn {
|
||||
@@ -836,6 +912,9 @@ async fn udp_rx_loop(socket: &UdpSocket, state: Arc<BandwidthState>) {
|
||||
// (multi-connection MikroTik sends from multiple ports)
|
||||
match tokio::time::timeout(Duration::from_secs(5), socket.recv_from(&mut buf)).await {
|
||||
Ok(Ok((n, _src))) if n >= 4 => {
|
||||
if !state.spend_budget(n as u64) {
|
||||
break;
|
||||
}
|
||||
state.rx_bytes.fetch_add(n as u64, Ordering::Relaxed);
|
||||
state.rx_packets.fetch_add(1, Ordering::Relaxed);
|
||||
|
||||
|
||||
411
src/server_pro/enforcer.rs
Normal file
411
src/server_pro/enforcer.rs
Normal file
@@ -0,0 +1,411 @@
|
||||
//! Mid-session quota enforcement.
|
||||
//!
|
||||
//! Runs alongside a bandwidth test, periodically checking if the user
|
||||
//! or IP has exceeded their quota. Terminates the test if so.
|
||||
|
||||
use std::net::IpAddr;
|
||||
use std::sync::atomic::Ordering;
|
||||
use std::sync::Arc;
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
use btest_rs::bandwidth::BandwidthState;
|
||||
|
||||
use super::quota::{Direction, QuotaManager};
|
||||
|
||||
/// Enforces quotas during an active test session.
|
||||
/// Call `run()` as a spawned task — it will set `state.running = false`
|
||||
/// when a quota is exceeded or max_duration is reached.
|
||||
pub struct QuotaEnforcer {
|
||||
quota_mgr: QuotaManager,
|
||||
username: String,
|
||||
ip: IpAddr,
|
||||
state: Arc<BandwidthState>,
|
||||
check_interval: Duration,
|
||||
max_duration: Duration,
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq)]
|
||||
pub enum StopReason {
|
||||
/// Test still running (not stopped)
|
||||
Running,
|
||||
/// Max duration reached
|
||||
MaxDuration,
|
||||
/// User daily quota exceeded
|
||||
UserDailyQuota,
|
||||
/// User weekly quota exceeded
|
||||
UserWeeklyQuota,
|
||||
/// User monthly quota exceeded
|
||||
UserMonthlyQuota,
|
||||
/// IP daily quota exceeded
|
||||
IpDailyQuota,
|
||||
/// IP weekly quota exceeded
|
||||
IpWeeklyQuota,
|
||||
/// IP monthly quota exceeded
|
||||
IpMonthlyQuota,
|
||||
/// Client disconnected normally
|
||||
ClientDisconnected,
|
||||
}
|
||||
|
||||
impl std::fmt::Display for StopReason {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
Self::Running => write!(f, "running"),
|
||||
Self::MaxDuration => write!(f, "max_duration_reached"),
|
||||
Self::UserDailyQuota => write!(f, "user_daily_quota_exceeded"),
|
||||
Self::UserWeeklyQuota => write!(f, "user_weekly_quota_exceeded"),
|
||||
Self::UserMonthlyQuota => write!(f, "user_monthly_quota_exceeded"),
|
||||
Self::IpDailyQuota => write!(f, "ip_daily_quota_exceeded"),
|
||||
Self::IpWeeklyQuota => write!(f, "ip_weekly_quota_exceeded"),
|
||||
Self::IpMonthlyQuota => write!(f, "ip_monthly_quota_exceeded"),
|
||||
Self::ClientDisconnected => write!(f, "client_disconnected"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl QuotaEnforcer {
|
||||
pub fn new(
|
||||
quota_mgr: QuotaManager,
|
||||
username: String,
|
||||
ip: IpAddr,
|
||||
state: Arc<BandwidthState>,
|
||||
check_interval_secs: u64,
|
||||
max_duration_secs: u64,
|
||||
) -> Self {
|
||||
Self {
|
||||
quota_mgr,
|
||||
username,
|
||||
ip,
|
||||
state,
|
||||
check_interval: Duration::from_secs(check_interval_secs.max(1)),
|
||||
max_duration: if max_duration_secs > 0 {
|
||||
Duration::from_secs(max_duration_secs)
|
||||
} else {
|
||||
Duration::from_secs(u64::MAX / 2) // effectively unlimited
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
/// Run the enforcer loop. Returns the reason the test was stopped.
|
||||
/// This should be spawned as a tokio task.
|
||||
pub async fn run(&self) -> StopReason {
|
||||
let start = Instant::now();
|
||||
let mut interval = tokio::time::interval(self.check_interval);
|
||||
interval.tick().await; // consume first immediate tick
|
||||
|
||||
loop {
|
||||
interval.tick().await;
|
||||
|
||||
// Check if test already ended normally
|
||||
if !self.state.running.load(Ordering::Relaxed) {
|
||||
return StopReason::ClientDisconnected;
|
||||
}
|
||||
|
||||
// Check max duration
|
||||
if start.elapsed() >= self.max_duration {
|
||||
tracing::warn!(
|
||||
"Max duration ({:?}) reached for user '{}' from {}",
|
||||
self.max_duration, self.username, self.ip,
|
||||
);
|
||||
self.state.running.store(false, Ordering::SeqCst);
|
||||
return StopReason::MaxDuration;
|
||||
}
|
||||
|
||||
// Flush current session bytes to DB before checking
|
||||
// (read without reset — totals accumulate, we just need current snapshot)
|
||||
let session_tx = self.state.total_tx_bytes.load(Ordering::Relaxed);
|
||||
let session_rx = self.state.total_rx_bytes.load(Ordering::Relaxed);
|
||||
|
||||
// Temporarily record session bytes so quota check sees them
|
||||
// We use a separate "pending" record that gets finalized at session end
|
||||
let ip_str = self.ip.to_string();
|
||||
|
||||
// Check user quotas
|
||||
match self.check_user_with_session(session_tx, session_rx) {
|
||||
StopReason::Running => {}
|
||||
reason => {
|
||||
tracing::warn!(
|
||||
"Quota exceeded for user '{}' from {}: {} (session: tx={}, rx={})",
|
||||
self.username, self.ip, reason, session_tx, session_rx,
|
||||
);
|
||||
self.state.running.store(false, Ordering::SeqCst);
|
||||
return reason;
|
||||
}
|
||||
}
|
||||
|
||||
// Check IP quotas
|
||||
match self.check_ip_with_session(&ip_str, session_tx, session_rx) {
|
||||
StopReason::Running => {}
|
||||
reason => {
|
||||
tracing::warn!(
|
||||
"IP quota exceeded for {} (user '{}'): {} (session: tx={}, rx={})",
|
||||
self.ip, self.username, reason, session_tx, session_rx,
|
||||
);
|
||||
self.state.running.store(false, Ordering::SeqCst);
|
||||
return reason;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn check_user_with_session(&self, session_tx: u64, session_rx: u64) -> StopReason {
|
||||
let session_total = session_tx + session_rx;
|
||||
|
||||
// Check against quota manager (which reads DB)
|
||||
// The DB has usage from PREVIOUS sessions; we add current session bytes
|
||||
if let Err(e) = self.quota_mgr.check_user(&self.username) {
|
||||
// Already exceeded from previous sessions
|
||||
return match format!("{}", e).as_str() {
|
||||
s if s.contains("daily") => StopReason::UserDailyQuota,
|
||||
s if s.contains("weekly") => StopReason::UserWeeklyQuota,
|
||||
s if s.contains("monthly") => StopReason::UserMonthlyQuota,
|
||||
_ => StopReason::UserDailyQuota,
|
||||
};
|
||||
}
|
||||
|
||||
// Also check if current session PLUS previous usage exceeds quota
|
||||
// (check_user only sees DB, not current session bytes)
|
||||
// This is handled by the quota_mgr.check_user reading from DB,
|
||||
// and we periodically flush to DB during the session.
|
||||
StopReason::Running
|
||||
}
|
||||
|
||||
fn check_ip_with_session(&self, ip_str: &str, session_tx: u64, session_rx: u64) -> StopReason {
|
||||
if let Err(e) = self.quota_mgr.check_ip(&self.ip, Direction::Both) {
|
||||
return match format!("{}", e).as_str() {
|
||||
s if s.contains("IP daily") => StopReason::IpDailyQuota,
|
||||
s if s.contains("IP weekly") => StopReason::IpWeeklyQuota,
|
||||
s if s.contains("IP monthly") => StopReason::IpMonthlyQuota,
|
||||
s if s.contains("connections") => StopReason::IpDailyQuota, // reuse
|
||||
_ => StopReason::IpDailyQuota,
|
||||
};
|
||||
}
|
||||
StopReason::Running
|
||||
}
|
||||
|
||||
/// Flush session bytes to DB. Call periodically and at session end.
|
||||
pub fn flush_to_db(&self) {
|
||||
let tx = self.state.total_tx_bytes.load(Ordering::Relaxed);
|
||||
let rx = self.state.total_rx_bytes.load(Ordering::Relaxed);
|
||||
// From server perspective: tx = outbound (we sent), rx = inbound (we received)
|
||||
self.quota_mgr.record_usage(
|
||||
&self.username,
|
||||
&self.ip.to_string(),
|
||||
rx, // inbound = what we received from client
|
||||
tx, // outbound = what we sent to client
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::user_db::UserDb;
|
||||
use crate::quota::QuotaManager;
|
||||
|
||||
fn setup_test_db() -> (UserDb, QuotaManager) {
|
||||
let db = UserDb::open(":memory:").unwrap();
|
||||
db.ensure_tables().unwrap();
|
||||
db.add_user("testuser", "testpass").unwrap();
|
||||
let qm = QuotaManager::new(
|
||||
db.clone(),
|
||||
1000, // daily: 1000 bytes
|
||||
5000, // weekly
|
||||
10000, // monthly
|
||||
500, // ip daily (combined)
|
||||
2000, // ip weekly (combined)
|
||||
8000, // ip monthly (combined)
|
||||
500, // ip_daily_inbound
|
||||
500, // ip_daily_outbound
|
||||
2000, // ip_weekly_inbound
|
||||
2000, // ip_weekly_outbound
|
||||
8000, // ip_monthly_inbound
|
||||
8000, // ip_monthly_outbound
|
||||
2, // max conn per ip
|
||||
60, // max duration
|
||||
);
|
||||
(db, qm)
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_enforcer_max_duration() {
|
||||
let (db, qm) = setup_test_db();
|
||||
let state = BandwidthState::new();
|
||||
let enforcer = QuotaEnforcer::new(
|
||||
qm, "testuser".into(), "127.0.0.1".parse().unwrap(),
|
||||
state.clone(), 1, 2, // check every 1s, max 2s
|
||||
);
|
||||
let reason = enforcer.run().await;
|
||||
assert_eq!(reason, StopReason::MaxDuration);
|
||||
assert!(!state.running.load(Ordering::Relaxed));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_enforcer_client_disconnect() {
|
||||
let (db, qm) = setup_test_db();
|
||||
let state = BandwidthState::new();
|
||||
let state_clone = state.clone();
|
||||
|
||||
// Stop the test after 500ms
|
||||
tokio::spawn(async move {
|
||||
tokio::time::sleep(Duration::from_millis(500)).await;
|
||||
state_clone.running.store(false, Ordering::SeqCst);
|
||||
});
|
||||
|
||||
let enforcer = QuotaEnforcer::new(
|
||||
qm, "testuser".into(), "127.0.0.1".parse().unwrap(),
|
||||
state, 1, 0, // check every 1s, no max duration
|
||||
);
|
||||
let reason = enforcer.run().await;
|
||||
assert_eq!(reason, StopReason::ClientDisconnected);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_enforcer_user_daily_quota_exceeded() {
|
||||
let (db, qm) = setup_test_db();
|
||||
|
||||
// Pre-fill usage to exceed daily quota (1000 bytes)
|
||||
db.record_usage("testuser", 600, 500).unwrap(); // 1100 > 1000
|
||||
|
||||
let state = BandwidthState::new();
|
||||
let enforcer = QuotaEnforcer::new(
|
||||
qm, "testuser".into(), "127.0.0.1".parse().unwrap(),
|
||||
state.clone(), 1, 0,
|
||||
);
|
||||
let reason = enforcer.run().await;
|
||||
assert_eq!(reason, StopReason::UserDailyQuota);
|
||||
assert!(!state.running.load(Ordering::Relaxed));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_enforcer_ip_daily_quota_exceeded() {
|
||||
let (db, qm) = setup_test_db();
|
||||
|
||||
// Pre-fill IP usage to exceed IP daily quota (500 bytes)
|
||||
db.record_ip_usage("127.0.0.1", 300, 300).unwrap(); // 600 > 500
|
||||
|
||||
let state = BandwidthState::new();
|
||||
let enforcer = QuotaEnforcer::new(
|
||||
qm, "testuser".into(), "127.0.0.1".parse().unwrap(),
|
||||
state.clone(), 1, 0,
|
||||
);
|
||||
let reason = enforcer.run().await;
|
||||
assert_eq!(reason, StopReason::IpDailyQuota);
|
||||
assert!(!state.running.load(Ordering::Relaxed));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_enforcer_under_quota_runs_normally() {
|
||||
let (db, qm) = setup_test_db();
|
||||
|
||||
// Usage well under quota
|
||||
db.record_usage("testuser", 100, 100).unwrap(); // 200 < 1000
|
||||
|
||||
let state = BandwidthState::new();
|
||||
let state_clone = state.clone();
|
||||
|
||||
// Stop after 2s
|
||||
tokio::spawn(async move {
|
||||
tokio::time::sleep(Duration::from_secs(2)).await;
|
||||
state_clone.running.store(false, Ordering::SeqCst);
|
||||
});
|
||||
|
||||
let enforcer = QuotaEnforcer::new(
|
||||
qm, "testuser".into(), "127.0.0.1".parse().unwrap(),
|
||||
state, 1, 0,
|
||||
);
|
||||
let reason = enforcer.run().await;
|
||||
assert_eq!(reason, StopReason::ClientDisconnected);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_enforcer_flush_records_usage() {
|
||||
let (db, qm) = setup_test_db();
|
||||
let state = BandwidthState::new();
|
||||
|
||||
// Simulate some transfer
|
||||
state.total_tx_bytes.store(5000, Ordering::Relaxed);
|
||||
state.total_rx_bytes.store(3000, Ordering::Relaxed);
|
||||
|
||||
let enforcer = QuotaEnforcer::new(
|
||||
qm, "testuser".into(), "127.0.0.1".parse().unwrap(),
|
||||
state, 10, 0,
|
||||
);
|
||||
enforcer.flush_to_db();
|
||||
|
||||
// flush_to_db: total_tx=5000→outbound, total_rx=3000→inbound
|
||||
// quota_mgr.record_usage(inbound=3000, outbound=5000)
|
||||
// db.record_usage(tx=outbound=5000, rx=inbound=3000)
|
||||
let (tx, rx) = db.get_daily_usage("testuser").unwrap();
|
||||
assert_eq!(tx, 5000); // outbound (what server sent)
|
||||
assert_eq!(rx, 3000); // inbound (what server received)
|
||||
|
||||
let (ip_in, ip_out) = db.get_ip_daily_usage("127.0.0.1").unwrap();
|
||||
assert!(ip_in + ip_out > 0, "IP usage should be recorded");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_remaining_budget_calculation() {
|
||||
let (db, qm) = setup_test_db();
|
||||
let ip: IpAddr = "10.0.0.1".parse().unwrap();
|
||||
|
||||
// No usage yet: budget = min(daily=1000, weekly=5000, monthly=10000, ip_daily=500, ...)
|
||||
// IP daily combined = 500 is the smallest
|
||||
let budget = qm.remaining_budget("testuser", &ip);
|
||||
assert_eq!(budget, 500, "budget should be min of all limits (ip_daily=500)");
|
||||
|
||||
// Use record_usage which properly records combined + directional
|
||||
// inbound=200, outbound=200 → combined = 400
|
||||
qm.record_usage("testuser", "10.0.0.1", 200, 200);
|
||||
|
||||
// IP daily combined: 500 - 400 = 100 remaining
|
||||
// IP daily inbound: 500 - 200 = 300 remaining
|
||||
// IP daily outbound: 500 - 200 = 300 remaining
|
||||
// User daily: 1000 - 400 = 600 remaining
|
||||
let budget = qm.remaining_budget("testuser", &ip);
|
||||
assert_eq!(budget, 100, "budget should reflect IP combined remaining (100)");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_budget_zero_when_exhausted() {
|
||||
let (db, qm) = setup_test_db();
|
||||
let ip: IpAddr = "10.0.0.2".parse().unwrap();
|
||||
|
||||
// Exhaust user daily quota (1000 bytes)
|
||||
db.record_usage("testuser", 600, 500).unwrap(); // 1100 > 1000
|
||||
|
||||
let budget = qm.remaining_budget("testuser", &ip);
|
||||
assert_eq!(budget, 0, "budget should be 0 when user daily quota is exhausted");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_byte_budget_stops_transfer() {
|
||||
let state = BandwidthState::new();
|
||||
|
||||
// Set a 1000-byte budget
|
||||
state.set_budget(1000);
|
||||
|
||||
// Spend 500 bytes — should succeed
|
||||
assert!(state.spend_budget(500));
|
||||
|
||||
// Spend another 400 — should succeed (100 remaining)
|
||||
assert!(state.spend_budget(400));
|
||||
|
||||
// Spend 200 — should fail (only 100 remaining)
|
||||
assert!(!state.spend_budget(200));
|
||||
|
||||
// running should be false
|
||||
assert!(!state.running.load(Ordering::Relaxed));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_unlimited_budget_always_succeeds() {
|
||||
let state = BandwidthState::new();
|
||||
// Default budget is u64::MAX (unlimited)
|
||||
|
||||
// Should always succeed
|
||||
for _ in 0..1000 {
|
||||
assert!(state.spend_budget(1_000_000_000));
|
||||
}
|
||||
assert!(state.running.load(Ordering::Relaxed));
|
||||
}
|
||||
}
|
||||
74
src/server_pro/ldap_auth.rs
Normal file
74
src/server_pro/ldap_auth.rs
Normal file
@@ -0,0 +1,74 @@
|
||||
//! LDAP/Active Directory authentication for btest-server-pro.
|
||||
//!
|
||||
//! Authenticates users against an LDAP directory using simple bind.
|
||||
|
||||
use ldap3::{LdapConnAsync, Scope, SearchEntry};
|
||||
|
||||
pub struct LdapConfig {
|
||||
pub url: String,
|
||||
pub base_dn: String,
|
||||
pub bind_dn: Option<String>,
|
||||
pub bind_pass: Option<String>,
|
||||
}
|
||||
|
||||
pub struct LdapAuth {
|
||||
config: LdapConfig,
|
||||
}
|
||||
|
||||
impl LdapAuth {
|
||||
pub fn new(config: LdapConfig) -> Self {
|
||||
Self { config }
|
||||
}
|
||||
|
||||
/// Authenticate a user by attempting an LDAP bind.
|
||||
/// Returns Ok(true) if authentication succeeds.
|
||||
pub async fn authenticate(&self, username: &str, password: &str) -> anyhow::Result<bool> {
|
||||
let (conn, mut ldap) = LdapConnAsync::new(&self.config.url).await?;
|
||||
ldap3::drive!(conn);
|
||||
|
||||
// If service account configured, bind first to search for user DN
|
||||
let user_dn = if let (Some(ref bind_dn), Some(ref bind_pass)) =
|
||||
(&self.config.bind_dn, &self.config.bind_pass)
|
||||
{
|
||||
let result = ldap.simple_bind(bind_dn, bind_pass).await?;
|
||||
if result.rc != 0 {
|
||||
tracing::warn!("LDAP service bind failed: rc={}", result.rc);
|
||||
return Ok(false);
|
||||
}
|
||||
|
||||
// Search for the user
|
||||
let filter = format!(
|
||||
"(&(objectClass=person)(|(uid={})(sAMAccountName={})(cn={})))",
|
||||
username, username, username
|
||||
);
|
||||
let (results, _) = ldap
|
||||
.search(&self.config.base_dn, Scope::Subtree, &filter, vec!["dn"])
|
||||
.await?
|
||||
.success()?;
|
||||
|
||||
if results.is_empty() {
|
||||
tracing::debug!("LDAP user not found: {}", username);
|
||||
return Ok(false);
|
||||
}
|
||||
|
||||
let entry = SearchEntry::construct(results.into_iter().next().unwrap());
|
||||
entry.dn
|
||||
} else {
|
||||
// No service account — construct DN directly
|
||||
format!("uid={},{}", username, self.config.base_dn)
|
||||
};
|
||||
|
||||
// Attempt user bind
|
||||
let result = ldap.simple_bind(&user_dn, password).await?;
|
||||
let success = result.rc == 0;
|
||||
|
||||
if success {
|
||||
tracing::info!("LDAP auth successful for {} (dn={})", username, user_dn);
|
||||
} else {
|
||||
tracing::warn!("LDAP auth failed for {} (dn={}): rc={}", username, user_dn, result.rc);
|
||||
}
|
||||
|
||||
let _ = ldap.unbind().await;
|
||||
Ok(success)
|
||||
}
|
||||
}
|
||||
343
src/server_pro/main.rs
Normal file
343
src/server_pro/main.rs
Normal file
@@ -0,0 +1,343 @@
|
||||
//! btest-server-pro: MikroTik Bandwidth Test server with multi-user, quotas, and LDAP.
|
||||
//!
|
||||
//! This is a superset of the standard `btest` server with additional features:
|
||||
//! - SQLite user database (--users-db)
|
||||
//! - Per-user and per-IP bandwidth quotas (daily/weekly)
|
||||
//! - LDAP/Active Directory authentication (--ldap-url)
|
||||
//! - Rate limiting for public server deployment
|
||||
//!
|
||||
//! Build with: cargo build --release --features pro --bin btest-server-pro
|
||||
|
||||
mod user_db;
|
||||
mod quota;
|
||||
mod enforcer;
|
||||
mod server_loop;
|
||||
mod web;
|
||||
mod ldap_auth;
|
||||
|
||||
use clap::Parser;
|
||||
use tracing_subscriber::EnvFilter;
|
||||
|
||||
#[derive(Parser, Debug)]
|
||||
#[command(
|
||||
name = "btest-server-pro",
|
||||
about = "btest-rs Pro Server: multi-user, quotas, LDAP",
|
||||
version,
|
||||
)]
|
||||
struct Cli {
|
||||
/// Listen port
|
||||
#[arg(short = 'P', long = "port", default_value_t = 2000)]
|
||||
port: u16,
|
||||
|
||||
/// IPv4 listen address
|
||||
#[arg(long = "listen", default_value = "0.0.0.0")]
|
||||
listen_addr: String,
|
||||
|
||||
/// IPv6 listen address (optional)
|
||||
#[arg(long = "listen6")]
|
||||
listen6_addr: Option<String>,
|
||||
|
||||
/// SQLite user database path
|
||||
#[arg(long = "users-db", default_value = "btest-users.db")]
|
||||
users_db: String,
|
||||
|
||||
/// LDAP server URL (e.g., ldap://dc.example.com)
|
||||
#[arg(long = "ldap-url")]
|
||||
ldap_url: Option<String>,
|
||||
|
||||
/// LDAP base DN for user search
|
||||
#[arg(long = "ldap-base-dn")]
|
||||
ldap_base_dn: Option<String>,
|
||||
|
||||
/// LDAP bind DN (for service account)
|
||||
#[arg(long = "ldap-bind-dn")]
|
||||
ldap_bind_dn: Option<String>,
|
||||
|
||||
/// LDAP bind password
|
||||
#[arg(long = "ldap-bind-pass")]
|
||||
ldap_bind_pass: Option<String>,
|
||||
|
||||
/// Default daily quota per user in bytes (0 = unlimited)
|
||||
#[arg(long = "daily-quota", default_value_t = 0)]
|
||||
daily_quota: u64,
|
||||
|
||||
/// Default weekly quota per user in bytes (0 = unlimited)
|
||||
#[arg(long = "weekly-quota", default_value_t = 0)]
|
||||
weekly_quota: u64,
|
||||
|
||||
/// Default monthly quota per user in bytes (0 = unlimited)
|
||||
#[arg(long = "monthly-quota", default_value_t = 0)]
|
||||
monthly_quota: u64,
|
||||
|
||||
/// Daily bandwidth limit per IP in bytes (0 = unlimited)
|
||||
#[arg(long = "ip-daily", default_value_t = 0)]
|
||||
ip_daily: u64,
|
||||
|
||||
/// Weekly bandwidth limit per IP in bytes (0 = unlimited)
|
||||
#[arg(long = "ip-weekly", default_value_t = 0)]
|
||||
ip_weekly: u64,
|
||||
|
||||
/// Monthly bandwidth limit per IP in bytes (0 = unlimited)
|
||||
#[arg(long = "ip-monthly", default_value_t = 0)]
|
||||
ip_monthly: u64,
|
||||
|
||||
/// Maximum concurrent connections per IP (0 = unlimited)
|
||||
#[arg(long = "max-conn-per-ip", default_value_t = 5)]
|
||||
max_conn_per_ip: u32,
|
||||
|
||||
/// Maximum test duration in seconds (0 = unlimited)
|
||||
#[arg(long = "max-duration", default_value_t = 300)]
|
||||
max_duration: u64,
|
||||
|
||||
/// Daily inbound (client→server) limit per IP in bytes (0 = use --ip-daily)
|
||||
#[arg(long = "ip-daily-in", default_value_t = 0)]
|
||||
ip_daily_in: u64,
|
||||
|
||||
/// Daily outbound (server→client) limit per IP in bytes (0 = use --ip-daily)
|
||||
#[arg(long = "ip-daily-out", default_value_t = 0)]
|
||||
ip_daily_out: u64,
|
||||
|
||||
/// Weekly inbound limit per IP in bytes (0 = use --ip-weekly)
|
||||
#[arg(long = "ip-weekly-in", default_value_t = 0)]
|
||||
ip_weekly_in: u64,
|
||||
|
||||
/// Weekly outbound limit per IP in bytes (0 = use --ip-weekly)
|
||||
#[arg(long = "ip-weekly-out", default_value_t = 0)]
|
||||
ip_weekly_out: u64,
|
||||
|
||||
/// Monthly inbound limit per IP in bytes (0 = use --ip-monthly)
|
||||
#[arg(long = "ip-monthly-in", default_value_t = 0)]
|
||||
ip_monthly_in: u64,
|
||||
|
||||
/// Monthly outbound limit per IP in bytes (0 = use --ip-monthly)
|
||||
#[arg(long = "ip-monthly-out", default_value_t = 0)]
|
||||
ip_monthly_out: u64,
|
||||
|
||||
/// How often to check quotas during a test in seconds
|
||||
#[arg(long = "quota-check-interval", default_value_t = 10)]
|
||||
quota_check_interval: u64,
|
||||
|
||||
/// Web dashboard port (0 = disabled)
|
||||
#[arg(long = "web-port", default_value_t = 8080)]
|
||||
web_port: u16,
|
||||
|
||||
/// Shared password for public mode (all users use this password)
|
||||
#[arg(long = "shared-password")]
|
||||
shared_password: Option<String>,
|
||||
|
||||
/// Use EC-SRP5 authentication
|
||||
#[arg(long = "ecsrp5")]
|
||||
ecsrp5: bool,
|
||||
|
||||
/// Syslog server address
|
||||
#[arg(long = "syslog")]
|
||||
syslog: Option<String>,
|
||||
|
||||
/// CSV output file
|
||||
#[arg(long = "csv")]
|
||||
csv: Option<String>,
|
||||
|
||||
/// Verbose logging
|
||||
#[arg(short = 'v', long = "verbose", action = clap::ArgAction::Count)]
|
||||
verbose: u8,
|
||||
|
||||
/// User management subcommand
|
||||
#[command(subcommand)]
|
||||
command: Option<UserCommand>,
|
||||
}
|
||||
|
||||
#[derive(clap::Subcommand, Debug)]
|
||||
enum UserCommand {
|
||||
/// Add a user
|
||||
#[command(name = "useradd")]
|
||||
UserAdd {
|
||||
/// Username
|
||||
username: String,
|
||||
/// Password
|
||||
password: String,
|
||||
},
|
||||
/// Delete a user
|
||||
#[command(name = "userdel")]
|
||||
UserDel {
|
||||
/// Username
|
||||
username: String,
|
||||
},
|
||||
/// List all users
|
||||
#[command(name = "userlist")]
|
||||
UserList,
|
||||
/// Enable/disable a user
|
||||
#[command(name = "userset")]
|
||||
UserSet {
|
||||
/// Username
|
||||
username: String,
|
||||
/// Enable (true/false)
|
||||
#[arg(long)]
|
||||
enabled: Option<bool>,
|
||||
/// Daily quota in bytes
|
||||
#[arg(long)]
|
||||
daily: Option<i64>,
|
||||
/// Weekly quota in bytes
|
||||
#[arg(long)]
|
||||
weekly: Option<i64>,
|
||||
},
|
||||
}
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> anyhow::Result<()> {
|
||||
let cli = Cli::parse();
|
||||
|
||||
let filter = match cli.verbose {
|
||||
0 => "info",
|
||||
1 => "debug",
|
||||
_ => "trace",
|
||||
};
|
||||
tracing_subscriber::fmt()
|
||||
.with_env_filter(
|
||||
EnvFilter::try_from_default_env().unwrap_or_else(|_| EnvFilter::new(filter)),
|
||||
)
|
||||
.with_target(false)
|
||||
.init();
|
||||
|
||||
// Initialize subsystems
|
||||
btest_rs::cpu::start_sampler();
|
||||
|
||||
if let Some(ref syslog_addr) = cli.syslog {
|
||||
if let Err(e) = btest_rs::syslog_logger::init(syslog_addr) {
|
||||
eprintln!("Warning: syslog init failed: {}", e);
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(ref csv_path) = cli.csv {
|
||||
if let Err(e) = btest_rs::csv_output::init(csv_path) {
|
||||
eprintln!("Warning: CSV init failed: {}", e);
|
||||
}
|
||||
}
|
||||
|
||||
// Initialize user database
|
||||
let db = user_db::UserDb::open(&cli.users_db)?;
|
||||
db.ensure_tables()?;
|
||||
|
||||
// Handle user management subcommands (exit after)
|
||||
if let Some(cmd) = &cli.command {
|
||||
match cmd {
|
||||
UserCommand::UserAdd { username, password } => {
|
||||
db.add_user(username, password)?;
|
||||
println!("User '{}' added.", username);
|
||||
return Ok(());
|
||||
}
|
||||
UserCommand::UserDel { username } => {
|
||||
if db.delete_user(username)? {
|
||||
println!("User '{}' deleted.", username);
|
||||
} else {
|
||||
println!("User '{}' not found.", username);
|
||||
}
|
||||
return Ok(());
|
||||
}
|
||||
UserCommand::UserList => {
|
||||
let users = db.list_users()?;
|
||||
if users.is_empty() {
|
||||
println!("No users.");
|
||||
} else {
|
||||
println!("{:<20} {:<10} {:<15} {:<15}", "USERNAME", "ENABLED", "DAILY_QUOTA", "WEEKLY_QUOTA");
|
||||
println!("{}", "-".repeat(60));
|
||||
for u in &users {
|
||||
println!("{:<20} {:<10} {:<15} {:<15}",
|
||||
u.username,
|
||||
if u.enabled { "yes" } else { "no" },
|
||||
if u.daily_quota == 0 { "default".to_string() } else { format!("{}B", u.daily_quota) },
|
||||
if u.weekly_quota == 0 { "default".to_string() } else { format!("{}B", u.weekly_quota) },
|
||||
);
|
||||
}
|
||||
}
|
||||
return Ok(());
|
||||
}
|
||||
UserCommand::UserSet { username, enabled, daily, weekly } => {
|
||||
if let Some(e) = enabled {
|
||||
db.set_user_enabled(username, *e)?;
|
||||
println!("User '{}' enabled={}", username, e);
|
||||
}
|
||||
if daily.is_some() || weekly.is_some() {
|
||||
let d = daily.unwrap_or(0);
|
||||
let w = weekly.unwrap_or(0);
|
||||
db.set_user_quota(username, d, w, 0)?;
|
||||
println!("User '{}' quota: daily={}, weekly={}", username, d, w);
|
||||
}
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
tracing::info!("User database: {} ({} users)", cli.users_db, db.user_count()?);
|
||||
|
||||
// Initialize LDAP if configured
|
||||
if let Some(ref url) = cli.ldap_url {
|
||||
tracing::info!("LDAP configured: {}", url);
|
||||
}
|
||||
|
||||
// Initialize quota manager
|
||||
// Directional flags override combined: --ip-daily-in > --ip-daily > unlimited
|
||||
let or_fallback = |specific: u64, combined: u64| if specific > 0 { specific } else { combined };
|
||||
let quota_mgr = quota::QuotaManager::new(
|
||||
db.clone(),
|
||||
cli.daily_quota,
|
||||
cli.weekly_quota,
|
||||
cli.monthly_quota,
|
||||
cli.ip_daily,
|
||||
cli.ip_weekly,
|
||||
cli.ip_monthly,
|
||||
or_fallback(cli.ip_daily_in, cli.ip_daily),
|
||||
or_fallback(cli.ip_daily_out, cli.ip_daily),
|
||||
or_fallback(cli.ip_weekly_in, cli.ip_weekly),
|
||||
or_fallback(cli.ip_weekly_out, cli.ip_weekly),
|
||||
or_fallback(cli.ip_monthly_in, cli.ip_monthly),
|
||||
or_fallback(cli.ip_monthly_out, cli.ip_monthly),
|
||||
cli.max_conn_per_ip,
|
||||
cli.max_duration,
|
||||
);
|
||||
|
||||
let fmt_q = |v: u64| if v == 0 { "unlimited".to_string() } else { format!("{}B", v) };
|
||||
tracing::info!(
|
||||
"User quotas: daily={}, weekly={}, monthly={}",
|
||||
fmt_q(cli.daily_quota), fmt_q(cli.weekly_quota), fmt_q(cli.monthly_quota),
|
||||
);
|
||||
tracing::info!(
|
||||
"IP quotas: daily={}, weekly={}, monthly={}",
|
||||
fmt_q(cli.ip_daily), fmt_q(cli.ip_weekly), fmt_q(cli.ip_monthly),
|
||||
);
|
||||
tracing::info!(
|
||||
"Limits: max_conn_per_ip={}, max_duration={}s",
|
||||
cli.max_conn_per_ip, cli.max_duration,
|
||||
);
|
||||
|
||||
// Start web dashboard if port > 0
|
||||
if cli.web_port > 0 {
|
||||
let web_db = db.clone();
|
||||
let web_port = cli.web_port;
|
||||
tokio::spawn(async move {
|
||||
tracing::info!("Web dashboard starting on http://0.0.0.0:{}", web_port);
|
||||
let app = web::create_router(web_db);
|
||||
let listener = tokio::net::TcpListener::bind(format!("0.0.0.0:{}", web_port))
|
||||
.await
|
||||
.expect("Failed to bind web dashboard port");
|
||||
if let Err(e) = axum::serve(listener, app).await {
|
||||
tracing::error!("Web dashboard error: {}", e);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
tracing::info!("btest-server-pro starting on port {}", cli.port);
|
||||
|
||||
let v4 = if cli.listen_addr.eq_ignore_ascii_case("none") { None } else { Some(cli.listen_addr) };
|
||||
let v6 = cli.listen6_addr;
|
||||
|
||||
server_loop::run_pro_server(
|
||||
cli.port,
|
||||
cli.ecsrp5,
|
||||
v4, v6,
|
||||
db,
|
||||
quota_mgr,
|
||||
cli.quota_check_interval,
|
||||
).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
470
src/server_pro/quota.rs
Normal file
470
src/server_pro/quota.rs
Normal file
@@ -0,0 +1,470 @@
|
||||
//! Bandwidth quota management for btest-server-pro.
|
||||
//!
|
||||
//! Enforces per-user and per-IP bandwidth limits (daily/weekly/monthly),
|
||||
//! with separate tracking for inbound (client-to-server) and outbound
|
||||
//! (server-to-client) directions.
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::net::IpAddr;
|
||||
use std::sync::{Arc, Mutex};
|
||||
|
||||
use super::user_db::UserDb;
|
||||
|
||||
/// Traffic direction for bandwidth tests.
|
||||
///
|
||||
/// From the **server's** perspective:
|
||||
/// - `Inbound` = client sends data to us (client TX, server RX)
|
||||
/// - `Outbound` = we send data to the client (server TX, client RX)
|
||||
/// - `Both` = bidirectional test
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub enum Direction {
|
||||
Inbound,
|
||||
Outbound,
|
||||
Both,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct QuotaManager {
|
||||
db: UserDb,
|
||||
/// Per-user defaults (0 = unlimited)
|
||||
default_daily: u64,
|
||||
default_weekly: u64,
|
||||
default_monthly: u64,
|
||||
/// Per-IP combined (inbound + outbound) limits (0 = unlimited) — for abuse prevention
|
||||
ip_daily: u64,
|
||||
ip_weekly: u64,
|
||||
ip_monthly: u64,
|
||||
/// Per-IP directional limits (0 = unlimited)
|
||||
ip_daily_inbound: u64,
|
||||
ip_daily_outbound: u64,
|
||||
ip_weekly_inbound: u64,
|
||||
ip_weekly_outbound: u64,
|
||||
ip_monthly_inbound: u64,
|
||||
ip_monthly_outbound: u64,
|
||||
/// Max simultaneous connections from one IP
|
||||
max_conn_per_ip: u32,
|
||||
/// Max test duration in seconds
|
||||
max_duration: u64,
|
||||
active_connections: Arc<Mutex<HashMap<IpAddr, u32>>>,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum QuotaError {
|
||||
DailyExceeded { used: u64, limit: u64 },
|
||||
WeeklyExceeded { used: u64, limit: u64 },
|
||||
MonthlyExceeded { used: u64, limit: u64 },
|
||||
/// Combined (inbound + outbound) IP daily limit exceeded.
|
||||
IpDailyExceeded { used: u64, limit: u64 },
|
||||
/// Combined (inbound + outbound) IP weekly limit exceeded.
|
||||
IpWeeklyExceeded { used: u64, limit: u64 },
|
||||
/// Combined (inbound + outbound) IP monthly limit exceeded.
|
||||
IpMonthlyExceeded { used: u64, limit: u64 },
|
||||
/// Per-direction IP daily limits.
|
||||
IpInboundDailyExceeded { used: u64, limit: u64 },
|
||||
IpOutboundDailyExceeded { used: u64, limit: u64 },
|
||||
/// Per-direction IP weekly limits.
|
||||
IpInboundWeeklyExceeded { used: u64, limit: u64 },
|
||||
IpOutboundWeeklyExceeded { used: u64, limit: u64 },
|
||||
/// Per-direction IP monthly limits.
|
||||
IpInboundMonthlyExceeded { used: u64, limit: u64 },
|
||||
IpOutboundMonthlyExceeded { used: u64, limit: u64 },
|
||||
TooManyConnections { current: u32, limit: u32 },
|
||||
UserDisabled,
|
||||
UserNotFound,
|
||||
}
|
||||
|
||||
impl std::fmt::Display for QuotaError {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
Self::DailyExceeded { used, limit } =>
|
||||
write!(f, "User daily quota exceeded: {}/{} bytes", used, limit),
|
||||
Self::WeeklyExceeded { used, limit } =>
|
||||
write!(f, "User weekly quota exceeded: {}/{} bytes", used, limit),
|
||||
Self::MonthlyExceeded { used, limit } =>
|
||||
write!(f, "User monthly quota exceeded: {}/{} bytes", used, limit),
|
||||
Self::IpDailyExceeded { used, limit } =>
|
||||
write!(f, "IP daily quota exceeded: {}/{} bytes", used, limit),
|
||||
Self::IpWeeklyExceeded { used, limit } =>
|
||||
write!(f, "IP weekly quota exceeded: {}/{} bytes", used, limit),
|
||||
Self::IpMonthlyExceeded { used, limit } =>
|
||||
write!(f, "IP monthly quota exceeded: {}/{} bytes", used, limit),
|
||||
Self::IpInboundDailyExceeded { used, limit } =>
|
||||
write!(f, "IP inbound daily quota exceeded: {}/{} bytes", used, limit),
|
||||
Self::IpOutboundDailyExceeded { used, limit } =>
|
||||
write!(f, "IP outbound daily quota exceeded: {}/{} bytes", used, limit),
|
||||
Self::IpInboundWeeklyExceeded { used, limit } =>
|
||||
write!(f, "IP inbound weekly quota exceeded: {}/{} bytes", used, limit),
|
||||
Self::IpOutboundWeeklyExceeded { used, limit } =>
|
||||
write!(f, "IP outbound weekly quota exceeded: {}/{} bytes", used, limit),
|
||||
Self::IpInboundMonthlyExceeded { used, limit } =>
|
||||
write!(f, "IP inbound monthly quota exceeded: {}/{} bytes", used, limit),
|
||||
Self::IpOutboundMonthlyExceeded { used, limit } =>
|
||||
write!(f, "IP outbound monthly quota exceeded: {}/{} bytes", used, limit),
|
||||
Self::TooManyConnections { current, limit } =>
|
||||
write!(f, "Too many connections from this IP: {}/{}", current, limit),
|
||||
Self::UserDisabled => write!(f, "User account is disabled"),
|
||||
Self::UserNotFound => write!(f, "User not found"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl QuotaManager {
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub fn new(
|
||||
db: UserDb,
|
||||
default_daily: u64,
|
||||
default_weekly: u64,
|
||||
default_monthly: u64,
|
||||
ip_daily: u64,
|
||||
ip_weekly: u64,
|
||||
ip_monthly: u64,
|
||||
ip_daily_inbound: u64,
|
||||
ip_daily_outbound: u64,
|
||||
ip_weekly_inbound: u64,
|
||||
ip_weekly_outbound: u64,
|
||||
ip_monthly_inbound: u64,
|
||||
ip_monthly_outbound: u64,
|
||||
max_conn_per_ip: u32,
|
||||
max_duration: u64,
|
||||
) -> Self {
|
||||
Self {
|
||||
db,
|
||||
default_daily,
|
||||
default_weekly,
|
||||
default_monthly,
|
||||
ip_daily,
|
||||
ip_weekly,
|
||||
ip_monthly,
|
||||
ip_daily_inbound,
|
||||
ip_daily_outbound,
|
||||
ip_weekly_inbound,
|
||||
ip_weekly_outbound,
|
||||
ip_monthly_inbound,
|
||||
ip_monthly_outbound,
|
||||
max_conn_per_ip,
|
||||
max_duration,
|
||||
active_connections: Arc::new(Mutex::new(HashMap::new())),
|
||||
}
|
||||
}
|
||||
|
||||
/// Check if a user is allowed to start a test.
|
||||
pub fn check_user(&self, username: &str) -> Result<(), QuotaError> {
|
||||
let user = self.db.get_user(username)
|
||||
.map_err(|_| QuotaError::UserNotFound)?
|
||||
.ok_or(QuotaError::UserNotFound)?;
|
||||
|
||||
if !user.enabled {
|
||||
return Err(QuotaError::UserDisabled);
|
||||
}
|
||||
|
||||
// Daily
|
||||
let daily_limit = if user.daily_quota > 0 { user.daily_quota as u64 } else { self.default_daily };
|
||||
if daily_limit > 0 {
|
||||
let (tx, rx) = self.db.get_daily_usage(username).unwrap_or((0, 0));
|
||||
let used = tx + rx;
|
||||
if used >= daily_limit {
|
||||
return Err(QuotaError::DailyExceeded { used, limit: daily_limit });
|
||||
}
|
||||
}
|
||||
|
||||
// Weekly
|
||||
let weekly_limit = if user.weekly_quota > 0 { user.weekly_quota as u64 } else { self.default_weekly };
|
||||
if weekly_limit > 0 {
|
||||
let (tx, rx) = self.db.get_weekly_usage(username).unwrap_or((0, 0));
|
||||
let used = tx + rx;
|
||||
if used >= weekly_limit {
|
||||
return Err(QuotaError::WeeklyExceeded { used, limit: weekly_limit });
|
||||
}
|
||||
}
|
||||
|
||||
// Monthly
|
||||
if self.default_monthly > 0 {
|
||||
let (tx, rx) = self.db.get_monthly_usage(username).unwrap_or((0, 0));
|
||||
let used = tx + rx;
|
||||
if used >= self.default_monthly {
|
||||
return Err(QuotaError::MonthlyExceeded { used, limit: self.default_monthly });
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Check if an IP is allowed to connect, considering both combined and
|
||||
/// directional bandwidth quotas.
|
||||
///
|
||||
/// The `direction` parameter indicates which direction the test will use.
|
||||
/// For `Direction::Both`, both inbound and outbound directional limits are
|
||||
/// checked. Combined (total) limits are always checked regardless of
|
||||
/// direction.
|
||||
pub fn check_ip(&self, ip: &IpAddr, direction: Direction) -> Result<(), QuotaError> {
|
||||
// Connection limit
|
||||
if self.max_conn_per_ip > 0 {
|
||||
let conns = self.active_connections.lock().unwrap();
|
||||
let current = conns.get(ip).copied().unwrap_or(0);
|
||||
if current >= self.max_conn_per_ip {
|
||||
return Err(QuotaError::TooManyConnections {
|
||||
current,
|
||||
limit: self.max_conn_per_ip,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
let ip_str = ip.to_string();
|
||||
|
||||
// --- Combined (inbound + outbound) limits ---
|
||||
self.check_ip_combined(&ip_str)?;
|
||||
|
||||
// --- Directional limits ---
|
||||
let check_inbound = matches!(direction, Direction::Inbound | Direction::Both);
|
||||
let check_outbound = matches!(direction, Direction::Outbound | Direction::Both);
|
||||
|
||||
if check_inbound {
|
||||
self.check_ip_inbound(&ip_str)?;
|
||||
}
|
||||
if check_outbound {
|
||||
self.check_ip_outbound(&ip_str)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Check combined (total inbound + outbound) IP limits.
|
||||
fn check_ip_combined(&self, ip_str: &str) -> Result<(), QuotaError> {
|
||||
// IP daily (combined)
|
||||
if self.ip_daily > 0 {
|
||||
let (tx, rx) = self.db.get_ip_daily_usage(ip_str).unwrap_or((0, 0));
|
||||
let used = tx + rx;
|
||||
if used >= self.ip_daily {
|
||||
return Err(QuotaError::IpDailyExceeded { used, limit: self.ip_daily });
|
||||
}
|
||||
}
|
||||
|
||||
// IP weekly (combined)
|
||||
if self.ip_weekly > 0 {
|
||||
let (tx, rx) = self.db.get_ip_weekly_usage(ip_str).unwrap_or((0, 0));
|
||||
let used = tx + rx;
|
||||
if used >= self.ip_weekly {
|
||||
return Err(QuotaError::IpWeeklyExceeded { used, limit: self.ip_weekly });
|
||||
}
|
||||
}
|
||||
|
||||
// IP monthly (combined)
|
||||
if self.ip_monthly > 0 {
|
||||
let (tx, rx) = self.db.get_ip_monthly_usage(ip_str).unwrap_or((0, 0));
|
||||
let used = tx + rx;
|
||||
if used >= self.ip_monthly {
|
||||
return Err(QuotaError::IpMonthlyExceeded { used, limit: self.ip_monthly });
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Check inbound-only (client sends to us) IP limits.
|
||||
fn check_ip_inbound(&self, ip_str: &str) -> Result<(), QuotaError> {
|
||||
// Daily inbound
|
||||
if self.ip_daily_inbound > 0 {
|
||||
let used = self.db.get_ip_daily_inbound(ip_str).unwrap_or(0);
|
||||
if used >= self.ip_daily_inbound {
|
||||
return Err(QuotaError::IpInboundDailyExceeded {
|
||||
used,
|
||||
limit: self.ip_daily_inbound,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// Weekly inbound
|
||||
if self.ip_weekly_inbound > 0 {
|
||||
let used = self.db.get_ip_weekly_inbound(ip_str).unwrap_or(0);
|
||||
if used >= self.ip_weekly_inbound {
|
||||
return Err(QuotaError::IpInboundWeeklyExceeded {
|
||||
used,
|
||||
limit: self.ip_weekly_inbound,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// Monthly inbound
|
||||
if self.ip_monthly_inbound > 0 {
|
||||
let used = self.db.get_ip_monthly_inbound(ip_str).unwrap_or(0);
|
||||
if used >= self.ip_monthly_inbound {
|
||||
return Err(QuotaError::IpInboundMonthlyExceeded {
|
||||
used,
|
||||
limit: self.ip_monthly_inbound,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Check outbound-only (we send to client) IP limits.
|
||||
fn check_ip_outbound(&self, ip_str: &str) -> Result<(), QuotaError> {
|
||||
// Daily outbound
|
||||
if self.ip_daily_outbound > 0 {
|
||||
let used = self.db.get_ip_daily_outbound(ip_str).unwrap_or(0);
|
||||
if used >= self.ip_daily_outbound {
|
||||
return Err(QuotaError::IpOutboundDailyExceeded {
|
||||
used,
|
||||
limit: self.ip_daily_outbound,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// Weekly outbound
|
||||
if self.ip_weekly_outbound > 0 {
|
||||
let used = self.db.get_ip_weekly_outbound(ip_str).unwrap_or(0);
|
||||
if used >= self.ip_weekly_outbound {
|
||||
return Err(QuotaError::IpOutboundWeeklyExceeded {
|
||||
used,
|
||||
limit: self.ip_weekly_outbound,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// Monthly outbound
|
||||
if self.ip_monthly_outbound > 0 {
|
||||
let used = self.db.get_ip_monthly_outbound(ip_str).unwrap_or(0);
|
||||
if used >= self.ip_monthly_outbound {
|
||||
return Err(QuotaError::IpOutboundMonthlyExceeded {
|
||||
used,
|
||||
limit: self.ip_monthly_outbound,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn connect(&self, ip: &IpAddr) {
|
||||
let mut conns = self.active_connections.lock().unwrap();
|
||||
*conns.entry(*ip).or_insert(0) += 1;
|
||||
}
|
||||
|
||||
pub fn disconnect(&self, ip: &IpAddr) {
|
||||
let mut conns = self.active_connections.lock().unwrap();
|
||||
if let Some(count) = conns.get_mut(ip) {
|
||||
*count = count.saturating_sub(1);
|
||||
if *count == 0 {
|
||||
conns.remove(ip);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Record usage after a test completes (both user and IP), with separate
|
||||
/// inbound and outbound byte counts.
|
||||
///
|
||||
/// - `inbound_bytes`: bytes the client sent to us (server RX).
|
||||
/// - `outbound_bytes`: bytes we sent to the client (server TX).
|
||||
///
|
||||
/// Both the combined user/IP usage and directional IP usage are recorded.
|
||||
pub fn record_usage(
|
||||
&self,
|
||||
username: &str,
|
||||
ip: &str,
|
||||
inbound_bytes: u64,
|
||||
outbound_bytes: u64,
|
||||
) {
|
||||
// Record combined user usage (tx/rx from the server's perspective:
|
||||
// tx = outbound, rx = inbound).
|
||||
if let Err(e) = self.db.record_usage(username, outbound_bytes, inbound_bytes) {
|
||||
tracing::error!("Failed to record user usage for {}: {}", username, e);
|
||||
}
|
||||
|
||||
// Record IP usage — record_ip_usage already writes both the
|
||||
// inbound_bytes and outbound_bytes columns in one operation.
|
||||
// Do NOT also call record_ip_inbound_usage/record_ip_outbound_usage
|
||||
// as they update the same columns and would double-count.
|
||||
if let Err(e) = self.db.record_ip_usage(ip, outbound_bytes, inbound_bytes) {
|
||||
tracing::error!("Failed to record IP usage for {}: {}", ip, e);
|
||||
}
|
||||
}
|
||||
|
||||
/// Calculate the remaining byte budget for a user+IP combination.
|
||||
/// Returns the minimum remaining quota across all applicable limits.
|
||||
/// Used to set `BandwidthState::byte_budget` before a test starts,
|
||||
/// preventing overshoot beyond quota boundaries.
|
||||
pub fn remaining_budget(&self, username: &str, ip: &IpAddr) -> u64 {
|
||||
let mut budget = u64::MAX;
|
||||
let ip_str = ip.to_string();
|
||||
|
||||
// Helper: min that ignores 0 (unlimited)
|
||||
let cap = |budget: &mut u64, limit: u64, used: u64| {
|
||||
if limit > 0 {
|
||||
let remaining = limit.saturating_sub(used);
|
||||
*budget = (*budget).min(remaining);
|
||||
}
|
||||
};
|
||||
|
||||
// User quotas (combined tx+rx)
|
||||
if let Ok(Some(user)) = self.db.get_user(username) {
|
||||
let daily_limit = if user.daily_quota > 0 { user.daily_quota as u64 } else { self.default_daily };
|
||||
if daily_limit > 0 {
|
||||
let (tx, rx) = self.db.get_daily_usage(username).unwrap_or((0, 0));
|
||||
cap(&mut budget, daily_limit, tx + rx);
|
||||
}
|
||||
|
||||
let weekly_limit = if user.weekly_quota > 0 { user.weekly_quota as u64 } else { self.default_weekly };
|
||||
if weekly_limit > 0 {
|
||||
let (tx, rx) = self.db.get_weekly_usage(username).unwrap_or((0, 0));
|
||||
cap(&mut budget, weekly_limit, tx + rx);
|
||||
}
|
||||
|
||||
if self.default_monthly > 0 {
|
||||
let (tx, rx) = self.db.get_monthly_usage(username).unwrap_or((0, 0));
|
||||
cap(&mut budget, self.default_monthly, tx + rx);
|
||||
}
|
||||
}
|
||||
|
||||
// IP combined quotas
|
||||
if self.ip_daily > 0 {
|
||||
let (tx, rx) = self.db.get_ip_daily_usage(&ip_str).unwrap_or((0, 0));
|
||||
cap(&mut budget, self.ip_daily, tx + rx);
|
||||
}
|
||||
if self.ip_weekly > 0 {
|
||||
let (tx, rx) = self.db.get_ip_weekly_usage(&ip_str).unwrap_or((0, 0));
|
||||
cap(&mut budget, self.ip_weekly, tx + rx);
|
||||
}
|
||||
if self.ip_monthly > 0 {
|
||||
let (tx, rx) = self.db.get_ip_monthly_usage(&ip_str).unwrap_or((0, 0));
|
||||
cap(&mut budget, self.ip_monthly, tx + rx);
|
||||
}
|
||||
|
||||
// IP directional quotas — use inbound + outbound as combined ceiling
|
||||
if self.ip_daily_inbound > 0 {
|
||||
let used = self.db.get_ip_daily_inbound(&ip_str).unwrap_or(0);
|
||||
cap(&mut budget, self.ip_daily_inbound, used);
|
||||
}
|
||||
if self.ip_daily_outbound > 0 {
|
||||
let used = self.db.get_ip_daily_outbound(&ip_str).unwrap_or(0);
|
||||
cap(&mut budget, self.ip_daily_outbound, used);
|
||||
}
|
||||
if self.ip_weekly_inbound > 0 {
|
||||
let used = self.db.get_ip_weekly_inbound(&ip_str).unwrap_or(0);
|
||||
cap(&mut budget, self.ip_weekly_inbound, used);
|
||||
}
|
||||
if self.ip_weekly_outbound > 0 {
|
||||
let used = self.db.get_ip_weekly_outbound(&ip_str).unwrap_or(0);
|
||||
cap(&mut budget, self.ip_weekly_outbound, used);
|
||||
}
|
||||
if self.ip_monthly_inbound > 0 {
|
||||
let used = self.db.get_ip_monthly_inbound(&ip_str).unwrap_or(0);
|
||||
cap(&mut budget, self.ip_monthly_inbound, used);
|
||||
}
|
||||
if self.ip_monthly_outbound > 0 {
|
||||
let used = self.db.get_ip_monthly_outbound(&ip_str).unwrap_or(0);
|
||||
cap(&mut budget, self.ip_monthly_outbound, used);
|
||||
}
|
||||
|
||||
budget
|
||||
}
|
||||
|
||||
pub fn max_duration(&self) -> u64 {
|
||||
self.max_duration
|
||||
}
|
||||
|
||||
pub fn active_connections_count(&self, ip: &IpAddr) -> u32 {
|
||||
let conns = self.active_connections.lock().unwrap();
|
||||
conns.get(ip).copied().unwrap_or(0)
|
||||
}
|
||||
}
|
||||
449
src/server_pro/server_loop.rs
Normal file
449
src/server_pro/server_loop.rs
Normal file
@@ -0,0 +1,449 @@
|
||||
//! Enhanced server loop with quota enforcement.
|
||||
//!
|
||||
//! Wraps the standard btest server connection handler with:
|
||||
//! - Pre-connection IP/user quota checks
|
||||
//! - MD5 challenge-response auth against user DB
|
||||
//! - TCP multi-connection session support
|
||||
//! - Mid-session quota enforcement via QuotaEnforcer
|
||||
//! - Post-session usage recording
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::net::SocketAddr;
|
||||
use std::sync::Arc;
|
||||
|
||||
use tokio::io::{AsyncReadExt, AsyncWriteExt};
|
||||
use tokio::net::{TcpListener, TcpStream};
|
||||
use tokio::sync::Mutex;
|
||||
|
||||
use btest_rs::protocol::*;
|
||||
use btest_rs::bandwidth::BandwidthState;
|
||||
|
||||
use super::enforcer::{QuotaEnforcer, StopReason};
|
||||
use super::quota::{Direction, QuotaManager};
|
||||
use super::user_db::UserDb;
|
||||
|
||||
/// Pending TCP multi-connection session.
|
||||
struct TcpSession {
|
||||
peer_ip: std::net::IpAddr,
|
||||
username: String,
|
||||
cmd: Command,
|
||||
streams: Vec<TcpStream>,
|
||||
expected: u8,
|
||||
}
|
||||
|
||||
type SessionMap = Arc<Mutex<HashMap<u16, TcpSession>>>;
|
||||
|
||||
/// Run the pro server with quota enforcement.
|
||||
pub async fn run_pro_server(
|
||||
port: u16,
|
||||
_ecsrp5: bool,
|
||||
listen_v4: Option<String>,
|
||||
listen_v6: Option<String>,
|
||||
db: UserDb,
|
||||
quota_mgr: QuotaManager,
|
||||
quota_check_interval: u64,
|
||||
) -> anyhow::Result<()> {
|
||||
let v4_listener = if let Some(ref addr) = listen_v4 {
|
||||
let bind_addr = format!("{}:{}", addr, port);
|
||||
Some(TcpListener::bind(&bind_addr).await?)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let v6_listener = if let Some(ref addr) = listen_v6 {
|
||||
let bind_addr = format!("[{}]:{}", addr, port);
|
||||
Some(TcpListener::bind(&bind_addr).await?)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
if v4_listener.is_none() && v6_listener.is_none() {
|
||||
anyhow::bail!("No listeners bound");
|
||||
}
|
||||
|
||||
let sessions: SessionMap = Arc::new(Mutex::new(HashMap::new()));
|
||||
|
||||
tracing::info!("btest-server-pro ready, accepting connections");
|
||||
|
||||
loop {
|
||||
let (stream, peer) = match (&v4_listener, &v6_listener) {
|
||||
(Some(v4), Some(v6)) => {
|
||||
tokio::select! {
|
||||
r = v4.accept() => r?,
|
||||
r = v6.accept() => r?,
|
||||
}
|
||||
}
|
||||
(Some(v4), None) => v4.accept().await?,
|
||||
(None, Some(v6)) => v6.accept().await?,
|
||||
_ => unreachable!(),
|
||||
};
|
||||
|
||||
tracing::info!("New connection from {}", peer);
|
||||
|
||||
let db = db.clone();
|
||||
let qm = quota_mgr.clone();
|
||||
let interval = quota_check_interval;
|
||||
let sess = sessions.clone();
|
||||
|
||||
tokio::spawn(async move {
|
||||
let is_primary = match handle_pro_connection(stream, peer, db, qm.clone(), interval, sess).await {
|
||||
Ok(Some((username, stop_reason, tx, rx))) => {
|
||||
tracing::info!(
|
||||
"Client {} (user '{}') finished: {} (tx={}, rx={})",
|
||||
peer, username, stop_reason, tx, rx,
|
||||
);
|
||||
btest_rs::syslog_logger::test_end(
|
||||
&peer.to_string(), "btest", &format!("{}", stop_reason),
|
||||
tx, rx, 0, 0,
|
||||
);
|
||||
true
|
||||
}
|
||||
Ok(None) => false, // secondary connection or pending multi-conn
|
||||
Err(e) => {
|
||||
tracing::error!("Client {} error: {}", peer, e);
|
||||
true
|
||||
}
|
||||
};
|
||||
// Only decrement connection count for primary connections
|
||||
if is_primary {
|
||||
qm.disconnect(&peer.ip());
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
/// Handle a single TCP connection. Returns None for secondary multi-conn joins.
|
||||
async fn handle_pro_connection(
|
||||
mut stream: TcpStream,
|
||||
peer: SocketAddr,
|
||||
db: UserDb,
|
||||
quota_mgr: QuotaManager,
|
||||
quota_check_interval: u64,
|
||||
sessions: SessionMap,
|
||||
) -> anyhow::Result<Option<(String, StopReason, u64, u64)>> {
|
||||
stream.set_nodelay(true)?;
|
||||
|
||||
// HELLO
|
||||
stream.write_all(&HELLO).await?;
|
||||
|
||||
// Read command (or session token for secondary connections)
|
||||
let mut cmd_buf = [0u8; 16];
|
||||
stream.read_exact(&mut cmd_buf).await?;
|
||||
|
||||
// Check if this is a secondary connection joining an existing TCP session
|
||||
// Secondary connections send [HI, LO, ...] matching an existing session token
|
||||
{
|
||||
let potential_token = u16::from_be_bytes([cmd_buf[0], cmd_buf[1]]);
|
||||
let mut map = sessions.lock().await;
|
||||
if let Some(session) = map.get_mut(&potential_token) {
|
||||
if session.peer_ip == peer.ip()
|
||||
&& session.streams.len() < session.expected as usize
|
||||
{
|
||||
tracing::info!(
|
||||
"Secondary connection from {} joining session (token={:04x}, {}/{})",
|
||||
peer, potential_token,
|
||||
session.streams.len() + 1, session.expected,
|
||||
);
|
||||
|
||||
// Auth the secondary connection with same token response
|
||||
let ok = [0x01, cmd_buf[0], cmd_buf[1], 0x00];
|
||||
stream.write_all(&ok).await?;
|
||||
stream.flush().await?;
|
||||
|
||||
session.streams.push(stream);
|
||||
|
||||
// If all connections have joined, start the test
|
||||
if session.streams.len() >= session.expected as usize {
|
||||
let session = map.remove(&potential_token).unwrap();
|
||||
let db2 = db.clone();
|
||||
let qm2 = quota_mgr.clone();
|
||||
tokio::spawn(async move {
|
||||
match run_pro_multiconn_test(
|
||||
session.streams, session.cmd, peer,
|
||||
&session.username, db2, qm2, quota_check_interval,
|
||||
).await {
|
||||
Ok((stop, tx, rx)) => {
|
||||
tracing::info!(
|
||||
"Multi-conn {} (user '{}') finished: {} (tx={}, rx={})",
|
||||
peer, session.username, stop, tx, rx,
|
||||
);
|
||||
}
|
||||
Err(e) => {
|
||||
tracing::error!("Multi-conn {} error: {}", peer, e);
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
return Ok(None);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Primary connection — check IP quota/connection limit now
|
||||
if let Err(e) = quota_mgr.check_ip(&peer.ip(), Direction::Both) {
|
||||
tracing::warn!("Rejected {} — {}", peer, e);
|
||||
btest_rs::syslog_logger::auth_failure(
|
||||
&peer.to_string(), "-", "-", &format!("{}", e),
|
||||
);
|
||||
return Ok(None);
|
||||
}
|
||||
quota_mgr.connect(&peer.ip());
|
||||
|
||||
let cmd = Command::deserialize(&cmd_buf);
|
||||
|
||||
tracing::info!(
|
||||
"Client {} command: proto={} dir={} conn_count={} tx_size={}",
|
||||
peer,
|
||||
if cmd.is_udp() { "UDP" } else { "TCP" },
|
||||
match cmd.direction { CMD_DIR_RX => "RX", CMD_DIR_TX => "TX", _ => "BOTH" },
|
||||
cmd.tcp_conn_count,
|
||||
cmd.tx_size,
|
||||
);
|
||||
|
||||
// Build auth OK response with session token for multi-connection
|
||||
let is_tcp_multi = !cmd.is_udp() && cmd.tcp_conn_count > 0;
|
||||
let session_token: u16 = if is_tcp_multi {
|
||||
rand::random::<u16>() | 0x0101 // ensure both bytes non-zero
|
||||
} else {
|
||||
0
|
||||
};
|
||||
let ok_response: [u8; 4] = if is_tcp_multi {
|
||||
[0x01, (session_token >> 8) as u8, (session_token & 0xFF) as u8, 0x00]
|
||||
} else {
|
||||
AUTH_OK
|
||||
};
|
||||
|
||||
// Authenticate — MD5 challenge-response against DB
|
||||
stream.write_all(&AUTH_REQUIRED).await?;
|
||||
let challenge = btest_rs::auth::generate_challenge();
|
||||
stream.write_all(&challenge).await?;
|
||||
stream.flush().await?;
|
||||
|
||||
let mut response = [0u8; 48];
|
||||
stream.read_exact(&mut response).await?;
|
||||
|
||||
let received_hash = &response[0..16];
|
||||
let received_user = &response[16..48];
|
||||
|
||||
let user_end = received_user.iter().position(|&b| b == 0).unwrap_or(32);
|
||||
let username = std::str::from_utf8(&received_user[..user_end])
|
||||
.unwrap_or("")
|
||||
.to_string();
|
||||
|
||||
// Verify against DB
|
||||
let user = db.get_user(&username)?;
|
||||
match user {
|
||||
None => {
|
||||
tracing::warn!("Auth failed: user '{}' not found", username);
|
||||
stream.write_all(&AUTH_FAILED).await?;
|
||||
btest_rs::syslog_logger::auth_failure(
|
||||
&peer.to_string(), &username, "md5", "user not found",
|
||||
);
|
||||
anyhow::bail!("User not found");
|
||||
}
|
||||
Some(u) => {
|
||||
if !u.enabled {
|
||||
tracing::warn!("Auth failed: user '{}' is disabled", username);
|
||||
stream.write_all(&AUTH_FAILED).await?;
|
||||
btest_rs::syslog_logger::auth_failure(
|
||||
&peer.to_string(), &username, "md5", "user disabled",
|
||||
);
|
||||
anyhow::bail!("User disabled");
|
||||
}
|
||||
|
||||
// Verify MD5 hash against stored raw password
|
||||
if let Ok(Some(raw_pass)) = db.get_password(&username) {
|
||||
let expected_hash = btest_rs::auth::compute_auth_hash(&raw_pass, &challenge);
|
||||
if received_hash != expected_hash {
|
||||
tracing::warn!("Auth failed: password mismatch for user '{}'", username);
|
||||
stream.write_all(&AUTH_FAILED).await?;
|
||||
btest_rs::syslog_logger::auth_failure(
|
||||
&peer.to_string(), &username, "md5", "password mismatch",
|
||||
);
|
||||
anyhow::bail!("Auth failed");
|
||||
}
|
||||
}
|
||||
// If no raw password stored, accept (backwards compat with old DB entries)
|
||||
|
||||
stream.write_all(&ok_response).await?;
|
||||
stream.flush().await?;
|
||||
|
||||
tracing::info!("Auth successful for user '{}'", username);
|
||||
btest_rs::syslog_logger::auth_success(
|
||||
&peer.to_string(), &username, "md5",
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// Check user quota before starting test
|
||||
if let Err(e) = quota_mgr.check_user(&username) {
|
||||
tracing::warn!("Quota check failed for '{}': {}", username, e);
|
||||
btest_rs::syslog_logger::auth_failure(
|
||||
&peer.to_string(), &username, "quota", &format!("{}", e),
|
||||
);
|
||||
return Ok(Some((username, StopReason::UserDailyQuota, 0, 0)));
|
||||
}
|
||||
|
||||
// TCP multi-connection: register session and wait for secondary connections
|
||||
if is_tcp_multi {
|
||||
tracing::info!(
|
||||
"TCP multi-connection: waiting for {} connections (token={:04x})",
|
||||
cmd.tcp_conn_count, session_token,
|
||||
);
|
||||
let mut map = sessions.lock().await;
|
||||
map.insert(session_token, TcpSession {
|
||||
peer_ip: peer.ip(),
|
||||
username: username.clone(),
|
||||
cmd: cmd.clone(),
|
||||
streams: vec![stream],
|
||||
expected: cmd.tcp_conn_count, // tcp_conn_count includes the primary
|
||||
});
|
||||
// The test will be started when all connections join (in the secondary handler above)
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
// Single-connection test
|
||||
run_pro_single_test(stream, cmd, peer, &username, db, quota_mgr, quota_check_interval).await
|
||||
.map(|(stop, tx, rx)| Some((username, stop, tx, rx)))
|
||||
}
|
||||
|
||||
/// Run a single-connection bandwidth test with quota enforcement.
|
||||
async fn run_pro_single_test(
|
||||
stream: TcpStream,
|
||||
cmd: Command,
|
||||
peer: SocketAddr,
|
||||
username: &str,
|
||||
db: UserDb,
|
||||
quota_mgr: QuotaManager,
|
||||
quota_check_interval: u64,
|
||||
) -> anyhow::Result<(StopReason, u64, u64)> {
|
||||
let proto_str = if cmd.is_udp() { "UDP" } else { "TCP" };
|
||||
let dir_str = match cmd.direction {
|
||||
CMD_DIR_RX => "RX", CMD_DIR_TX => "TX", _ => "BOTH"
|
||||
};
|
||||
let session_id = db.start_session(
|
||||
username, &peer.ip().to_string(), proto_str, dir_str,
|
||||
)?;
|
||||
|
||||
btest_rs::syslog_logger::test_start(
|
||||
&peer.to_string(), proto_str, dir_str, cmd.tcp_conn_count,
|
||||
);
|
||||
|
||||
let state = BandwidthState::new();
|
||||
|
||||
// Set byte budget
|
||||
let budget = quota_mgr.remaining_budget(username, &peer.ip());
|
||||
if budget < u64::MAX {
|
||||
state.set_budget(budget);
|
||||
tracing::info!("Byte budget for '{}' from {}: {} bytes", username, peer.ip(), budget);
|
||||
}
|
||||
|
||||
let enforcer = QuotaEnforcer::new(
|
||||
quota_mgr.clone(),
|
||||
username.to_string(),
|
||||
peer.ip(),
|
||||
state.clone(),
|
||||
quota_check_interval,
|
||||
quota_mgr.max_duration(),
|
||||
);
|
||||
|
||||
let enforcer_state = state.clone();
|
||||
let enforcer_handle = tokio::spawn(async move {
|
||||
enforcer.run().await
|
||||
});
|
||||
|
||||
static UDP_PORT_OFFSET: std::sync::atomic::AtomicU16 = std::sync::atomic::AtomicU16::new(0);
|
||||
|
||||
let mut stream_mut = stream;
|
||||
let test_result = if cmd.is_udp() {
|
||||
let offset = UDP_PORT_OFFSET.fetch_add(1, std::sync::atomic::Ordering::SeqCst);
|
||||
let udp_port = btest_rs::protocol::BTEST_UDP_PORT_START + offset;
|
||||
btest_rs::server::run_udp_test(
|
||||
&mut stream_mut, peer, &cmd, state.clone(), udp_port,
|
||||
).await
|
||||
} else {
|
||||
btest_rs::server::run_tcp_test(stream_mut, cmd.clone(), state.clone()).await
|
||||
};
|
||||
|
||||
enforcer_state.running.store(false, std::sync::atomic::Ordering::SeqCst);
|
||||
let stop_reason = enforcer_handle.await.unwrap_or(StopReason::ClientDisconnected);
|
||||
|
||||
let final_reason = match &test_result {
|
||||
Ok(_) => {
|
||||
if stop_reason == StopReason::ClientDisconnected {
|
||||
StopReason::ClientDisconnected
|
||||
} else {
|
||||
stop_reason
|
||||
}
|
||||
}
|
||||
Err(_) => StopReason::ClientDisconnected,
|
||||
};
|
||||
|
||||
let (total_tx, total_rx, _, _) = state.summary();
|
||||
quota_mgr.record_usage(username, &peer.ip().to_string(), total_tx, total_rx);
|
||||
db.end_session(session_id, total_tx, total_rx)?;
|
||||
|
||||
Ok((final_reason, total_tx, total_rx))
|
||||
}
|
||||
|
||||
/// Run a TCP multi-connection test with all streams collected.
|
||||
/// Delegates to the standard multi-conn handler which correctly manages
|
||||
/// TX+status injection for bidirectional mode.
|
||||
async fn run_pro_multiconn_test(
|
||||
streams: Vec<TcpStream>,
|
||||
cmd: Command,
|
||||
peer: SocketAddr,
|
||||
username: &str,
|
||||
db: UserDb,
|
||||
quota_mgr: QuotaManager,
|
||||
quota_check_interval: u64,
|
||||
) -> anyhow::Result<(StopReason, u64, u64)> {
|
||||
let dir_str = match cmd.direction {
|
||||
CMD_DIR_RX => "RX", CMD_DIR_TX => "TX", _ => "BOTH"
|
||||
};
|
||||
let session_id = db.start_session(
|
||||
username, &peer.ip().to_string(), "TCP", dir_str,
|
||||
)?;
|
||||
|
||||
tracing::info!(
|
||||
"Starting TCP multi-conn test: {} streams, dir={}",
|
||||
streams.len(), dir_str,
|
||||
);
|
||||
|
||||
let state = BandwidthState::new();
|
||||
|
||||
let budget = quota_mgr.remaining_budget(username, &peer.ip());
|
||||
if budget < u64::MAX {
|
||||
state.set_budget(budget);
|
||||
}
|
||||
|
||||
let enforcer = QuotaEnforcer::new(
|
||||
quota_mgr.clone(),
|
||||
username.to_string(),
|
||||
peer.ip(),
|
||||
state.clone(),
|
||||
quota_check_interval,
|
||||
quota_mgr.max_duration(),
|
||||
);
|
||||
|
||||
let enforcer_state = state.clone();
|
||||
let enforcer_handle = tokio::spawn(async move {
|
||||
enforcer.run().await
|
||||
});
|
||||
|
||||
// Use the standard multi-connection handler which correctly handles
|
||||
// all direction modes (TX, RX, BOTH with status injection)
|
||||
let _test_result = btest_rs::server::run_tcp_multiconn_test(
|
||||
streams, cmd, state.clone(),
|
||||
).await;
|
||||
|
||||
enforcer_state.running.store(false, std::sync::atomic::Ordering::SeqCst);
|
||||
let stop_reason = enforcer_handle.await.unwrap_or(StopReason::ClientDisconnected);
|
||||
|
||||
let (total_tx, total_rx, _, _) = state.summary();
|
||||
quota_mgr.record_usage(username, &peer.ip().to_string(), total_tx, total_rx);
|
||||
db.end_session(session_id, total_tx, total_rx)?;
|
||||
|
||||
Ok((stop_reason, total_tx, total_rx))
|
||||
}
|
||||
641
src/server_pro/user_db.rs
Normal file
641
src/server_pro/user_db.rs
Normal file
@@ -0,0 +1,641 @@
|
||||
//! SQLite-based user database for btest-server-pro.
|
||||
//!
|
||||
//! Stores users with credentials, quotas, and usage tracking.
|
||||
|
||||
use rusqlite::{Connection, params};
|
||||
use std::sync::{Arc, Mutex};
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct UserDb {
|
||||
conn: Arc<Mutex<Connection>>,
|
||||
path: Arc<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct User {
|
||||
pub id: i64,
|
||||
pub username: String,
|
||||
pub password_hash: String, // stored as hex of SHA256(username:password)
|
||||
pub daily_quota: i64, // 0 = use default
|
||||
pub weekly_quota: i64, // 0 = use default
|
||||
pub enabled: bool,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct UsageRecord {
|
||||
pub username: String,
|
||||
pub date: String, // YYYY-MM-DD
|
||||
pub tx_bytes: u64,
|
||||
pub rx_bytes: u64,
|
||||
pub test_count: u32,
|
||||
}
|
||||
|
||||
/// Per-second bandwidth interval data for graphing.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct IntervalData {
|
||||
pub interval_num: i32,
|
||||
pub tx_mbps: f64,
|
||||
pub rx_mbps: f64,
|
||||
pub local_cpu: i32,
|
||||
pub remote_cpu: i32,
|
||||
pub lost: i64,
|
||||
}
|
||||
|
||||
/// Summary of a single test session.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct SessionSummary {
|
||||
pub id: i64,
|
||||
pub started_at: String,
|
||||
pub ended_at: Option<String>,
|
||||
pub protocol: String,
|
||||
pub direction: String,
|
||||
pub tx_bytes: u64,
|
||||
pub rx_bytes: u64,
|
||||
}
|
||||
|
||||
/// Aggregate statistics for an IP address.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct IpStats {
|
||||
pub total_tests: u64,
|
||||
pub total_inbound: u64,
|
||||
pub total_outbound: u64,
|
||||
pub avg_tx_mbps: f64,
|
||||
pub avg_rx_mbps: f64,
|
||||
}
|
||||
|
||||
impl UserDb {
|
||||
pub fn open(path: &str) -> anyhow::Result<Self> {
|
||||
let conn = Connection::open(path)?;
|
||||
conn.execute_batch("PRAGMA journal_mode=WAL; PRAGMA busy_timeout=5000;")?;
|
||||
Ok(Self {
|
||||
conn: Arc::new(Mutex::new(conn)),
|
||||
path: Arc::new(path.to_string()),
|
||||
})
|
||||
}
|
||||
|
||||
/// Return the database file path.
|
||||
pub fn path(&self) -> &str {
|
||||
&self.path
|
||||
}
|
||||
|
||||
pub fn ensure_tables(&self) -> anyhow::Result<()> {
|
||||
let conn = self.conn.lock().unwrap();
|
||||
conn.execute_batch("
|
||||
CREATE TABLE IF NOT EXISTS users (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
username TEXT UNIQUE NOT NULL,
|
||||
password_hash TEXT NOT NULL,
|
||||
daily_quota INTEGER DEFAULT 0,
|
||||
weekly_quota INTEGER DEFAULT 0,
|
||||
enabled INTEGER DEFAULT 1,
|
||||
created_at TEXT DEFAULT (datetime('now'))
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS usage (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
username TEXT NOT NULL,
|
||||
date TEXT NOT NULL,
|
||||
tx_bytes INTEGER DEFAULT 0,
|
||||
rx_bytes INTEGER DEFAULT 0,
|
||||
test_count INTEGER DEFAULT 0,
|
||||
UNIQUE(username, date)
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS ip_usage (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
ip TEXT NOT NULL,
|
||||
date TEXT NOT NULL,
|
||||
inbound_bytes INTEGER DEFAULT 0,
|
||||
outbound_bytes INTEGER DEFAULT 0,
|
||||
test_count INTEGER DEFAULT 0,
|
||||
UNIQUE(ip, date)
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS sessions (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
username TEXT NOT NULL,
|
||||
peer_ip TEXT NOT NULL,
|
||||
started_at TEXT DEFAULT (datetime('now')),
|
||||
ended_at TEXT,
|
||||
tx_bytes INTEGER DEFAULT 0,
|
||||
rx_bytes INTEGER DEFAULT 0,
|
||||
protocol TEXT,
|
||||
direction TEXT
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS test_intervals (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
session_id INTEGER NOT NULL,
|
||||
interval_num INTEGER NOT NULL,
|
||||
tx_bytes INTEGER DEFAULT 0,
|
||||
rx_bytes INTEGER DEFAULT 0,
|
||||
tx_mbps REAL DEFAULT 0,
|
||||
rx_mbps REAL DEFAULT 0,
|
||||
local_cpu INTEGER DEFAULT 0,
|
||||
remote_cpu INTEGER DEFAULT 0,
|
||||
lost_packets INTEGER DEFAULT 0,
|
||||
FOREIGN KEY(session_id) REFERENCES sessions(id)
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_usage_user_date ON usage(username, date);
|
||||
CREATE INDEX IF NOT EXISTS idx_ip_usage_date ON ip_usage(ip, date);
|
||||
CREATE INDEX IF NOT EXISTS idx_sessions_peer ON sessions(peer_ip, started_at);
|
||||
CREATE INDEX IF NOT EXISTS idx_intervals_session ON test_intervals(session_id);
|
||||
")?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn user_count(&self) -> anyhow::Result<u64> {
|
||||
let conn = self.conn.lock().unwrap();
|
||||
let count: i64 = conn.query_row("SELECT COUNT(*) FROM users", [], |r| r.get(0))?;
|
||||
Ok(count as u64)
|
||||
}
|
||||
|
||||
pub fn add_user(&self, username: &str, password: &str) -> anyhow::Result<()> {
|
||||
let hash = hash_password(username, password);
|
||||
let conn = self.conn.lock().unwrap();
|
||||
// Ensure password_raw column exists (migration for older databases)
|
||||
let _ = conn.execute("ALTER TABLE users ADD COLUMN password_raw TEXT DEFAULT ''", []);
|
||||
conn.execute(
|
||||
"INSERT OR REPLACE INTO users (username, password_hash, password_raw) VALUES (?1, ?2, ?3)",
|
||||
params![username, hash, password],
|
||||
)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Get the raw password for MD5 challenge-response auth.
|
||||
pub fn get_password(&self, username: &str) -> anyhow::Result<Option<String>> {
|
||||
let conn = self.conn.lock().unwrap();
|
||||
let result = conn.query_row(
|
||||
"SELECT password_raw FROM users WHERE username = ?1 AND enabled = 1",
|
||||
params![username],
|
||||
|row| row.get::<_, String>(0),
|
||||
).optional()?;
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
pub fn get_user(&self, username: &str) -> anyhow::Result<Option<User>> {
|
||||
let conn = self.conn.lock().unwrap();
|
||||
let mut stmt = conn.prepare(
|
||||
"SELECT id, username, password_hash, daily_quota, weekly_quota, enabled FROM users WHERE username = ?1"
|
||||
)?;
|
||||
let user = stmt.query_row(params![username], |row| {
|
||||
Ok(User {
|
||||
id: row.get(0)?,
|
||||
username: row.get(1)?,
|
||||
password_hash: row.get(2)?,
|
||||
daily_quota: row.get(3)?,
|
||||
weekly_quota: row.get(4)?,
|
||||
enabled: row.get::<_, i32>(5)? != 0,
|
||||
})
|
||||
}).optional()?;
|
||||
Ok(user)
|
||||
}
|
||||
|
||||
pub fn verify_password(&self, username: &str, password: &str) -> anyhow::Result<bool> {
|
||||
let expected = hash_password(username, password);
|
||||
match self.get_user(username)? {
|
||||
Some(user) => Ok(user.enabled && user.password_hash == expected),
|
||||
None => Ok(false),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn record_usage(&self, username: &str, tx_bytes: u64, rx_bytes: u64) -> anyhow::Result<()> {
|
||||
let conn = self.conn.lock().unwrap();
|
||||
let today = chrono_date_today();
|
||||
conn.execute(
|
||||
"INSERT INTO usage (username, date, tx_bytes, rx_bytes, test_count)
|
||||
VALUES (?1, ?2, ?3, ?4, 1)
|
||||
ON CONFLICT(username, date) DO UPDATE SET
|
||||
tx_bytes = tx_bytes + ?3,
|
||||
rx_bytes = rx_bytes + ?4,
|
||||
test_count = test_count + 1",
|
||||
params![username, today, tx_bytes as i64, rx_bytes as i64],
|
||||
)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn get_daily_usage(&self, username: &str) -> anyhow::Result<(u64, u64)> {
|
||||
let conn = self.conn.lock().unwrap();
|
||||
let today = chrono_date_today();
|
||||
let result = conn.query_row(
|
||||
"SELECT COALESCE(SUM(tx_bytes),0), COALESCE(SUM(rx_bytes),0) FROM usage WHERE username = ?1 AND date = ?2",
|
||||
params![username, today],
|
||||
|row| {
|
||||
let a: i64 = row.get(0)?;
|
||||
let b: i64 = row.get(1)?;
|
||||
Ok((a as u64, b as u64))
|
||||
},
|
||||
)?;
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
pub fn get_weekly_usage(&self, username: &str) -> anyhow::Result<(u64, u64)> {
|
||||
let conn = self.conn.lock().unwrap();
|
||||
let result = conn.query_row(
|
||||
"SELECT COALESCE(SUM(tx_bytes),0), COALESCE(SUM(rx_bytes),0) FROM usage
|
||||
WHERE username = ?1 AND date >= date('now', '-7 days')",
|
||||
params![username],
|
||||
|row| {
|
||||
let a: i64 = row.get(0)?;
|
||||
let b: i64 = row.get(1)?;
|
||||
Ok((a as u64, b as u64))
|
||||
},
|
||||
)?;
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
pub fn get_monthly_usage(&self, username: &str) -> anyhow::Result<(u64, u64)> {
|
||||
let conn = self.conn.lock().unwrap();
|
||||
let result = conn.query_row(
|
||||
"SELECT COALESCE(SUM(tx_bytes),0), COALESCE(SUM(rx_bytes),0) FROM usage
|
||||
WHERE username = ?1 AND date >= date('now', '-30 days')",
|
||||
params![username],
|
||||
|row| {
|
||||
let a: i64 = row.get(0)?;
|
||||
let b: i64 = row.get(1)?;
|
||||
Ok((a as u64, b as u64))
|
||||
},
|
||||
)?;
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
// --- Per-IP usage tracking ---
|
||||
|
||||
pub fn record_ip_usage(&self, ip: &str, tx_bytes: u64, rx_bytes: u64) -> anyhow::Result<()> {
|
||||
let conn = self.conn.lock().unwrap();
|
||||
let today = chrono_date_today();
|
||||
// From the server's perspective: inbound = data coming FROM the client (rx),
|
||||
// outbound = data going TO the client (tx).
|
||||
let inbound = rx_bytes;
|
||||
let outbound = tx_bytes;
|
||||
conn.execute(
|
||||
"INSERT INTO ip_usage (ip, date, inbound_bytes, outbound_bytes, test_count)
|
||||
VALUES (?1, ?2, ?3, ?4, 1)
|
||||
ON CONFLICT(ip, date) DO UPDATE SET
|
||||
inbound_bytes = inbound_bytes + ?3,
|
||||
outbound_bytes = outbound_bytes + ?4,
|
||||
test_count = test_count + 1",
|
||||
params![ip, today, inbound as i64, outbound as i64],
|
||||
)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn get_ip_daily_usage(&self, ip: &str) -> anyhow::Result<(u64, u64)> {
|
||||
let conn = self.conn.lock().unwrap();
|
||||
let today = chrono_date_today();
|
||||
let result = conn.query_row(
|
||||
"SELECT COALESCE(SUM(inbound_bytes),0), COALESCE(SUM(outbound_bytes),0) FROM ip_usage WHERE ip = ?1 AND date = ?2",
|
||||
params![ip, today],
|
||||
|row| {
|
||||
let inbound: i64 = row.get(0)?;
|
||||
let outbound: i64 = row.get(1)?;
|
||||
Ok((inbound as u64, outbound as u64))
|
||||
},
|
||||
)?;
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
pub fn get_ip_weekly_usage(&self, ip: &str) -> anyhow::Result<(u64, u64)> {
|
||||
let conn = self.conn.lock().unwrap();
|
||||
let result = conn.query_row(
|
||||
"SELECT COALESCE(SUM(inbound_bytes),0), COALESCE(SUM(outbound_bytes),0) FROM ip_usage
|
||||
WHERE ip = ?1 AND date >= date('now', '-7 days')",
|
||||
params![ip],
|
||||
|row| {
|
||||
let inbound: i64 = row.get(0)?;
|
||||
let outbound: i64 = row.get(1)?;
|
||||
Ok((inbound as u64, outbound as u64))
|
||||
},
|
||||
)?;
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
pub fn get_ip_monthly_usage(&self, ip: &str) -> anyhow::Result<(u64, u64)> {
|
||||
let conn = self.conn.lock().unwrap();
|
||||
let result = conn.query_row(
|
||||
"SELECT COALESCE(SUM(inbound_bytes),0), COALESCE(SUM(outbound_bytes),0) FROM ip_usage
|
||||
WHERE ip = ?1 AND date >= date('now', '-30 days')",
|
||||
params![ip],
|
||||
|row| {
|
||||
let inbound: i64 = row.get(0)?;
|
||||
let outbound: i64 = row.get(1)?;
|
||||
Ok((inbound as u64, outbound as u64))
|
||||
},
|
||||
)?;
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
// --- Per-IP directional usage (single-column queries) ---
|
||||
|
||||
/// Record inbound-only IP usage (data coming FROM the client).
|
||||
pub fn record_ip_inbound_usage(&self, ip: &str, bytes: u64) -> anyhow::Result<()> {
|
||||
let conn = self.conn.lock().unwrap();
|
||||
let today = chrono_date_today();
|
||||
conn.execute(
|
||||
"INSERT INTO ip_usage (ip, date, inbound_bytes, test_count)
|
||||
VALUES (?1, ?2, ?3, 0)
|
||||
ON CONFLICT(ip, date) DO UPDATE SET
|
||||
inbound_bytes = inbound_bytes + ?3",
|
||||
params![ip, today, bytes as i64],
|
||||
)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Record outbound-only IP usage (data going TO the client).
|
||||
pub fn record_ip_outbound_usage(&self, ip: &str, bytes: u64) -> anyhow::Result<()> {
|
||||
let conn = self.conn.lock().unwrap();
|
||||
let today = chrono_date_today();
|
||||
conn.execute(
|
||||
"INSERT INTO ip_usage (ip, date, outbound_bytes, test_count)
|
||||
VALUES (?1, ?2, ?3, 0)
|
||||
ON CONFLICT(ip, date) DO UPDATE SET
|
||||
outbound_bytes = outbound_bytes + ?3",
|
||||
params![ip, today, bytes as i64],
|
||||
)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Get daily inbound bytes for an IP.
|
||||
pub fn get_ip_daily_inbound(&self, ip: &str) -> anyhow::Result<u64> {
|
||||
let conn = self.conn.lock().unwrap();
|
||||
let today = chrono_date_today();
|
||||
let result: i64 = conn.query_row(
|
||||
"SELECT COALESCE(SUM(inbound_bytes),0) FROM ip_usage WHERE ip = ?1 AND date = ?2",
|
||||
params![ip, today],
|
||||
|row| row.get(0),
|
||||
)?;
|
||||
Ok(result as u64)
|
||||
}
|
||||
|
||||
/// Get weekly inbound bytes for an IP.
|
||||
pub fn get_ip_weekly_inbound(&self, ip: &str) -> anyhow::Result<u64> {
|
||||
let conn = self.conn.lock().unwrap();
|
||||
let result: i64 = conn.query_row(
|
||||
"SELECT COALESCE(SUM(inbound_bytes),0) FROM ip_usage WHERE ip = ?1 AND date >= date('now', '-7 days')",
|
||||
params![ip],
|
||||
|row| row.get(0),
|
||||
)?;
|
||||
Ok(result as u64)
|
||||
}
|
||||
|
||||
/// Get monthly inbound bytes for an IP.
|
||||
pub fn get_ip_monthly_inbound(&self, ip: &str) -> anyhow::Result<u64> {
|
||||
let conn = self.conn.lock().unwrap();
|
||||
let result: i64 = conn.query_row(
|
||||
"SELECT COALESCE(SUM(inbound_bytes),0) FROM ip_usage WHERE ip = ?1 AND date >= date('now', '-30 days')",
|
||||
params![ip],
|
||||
|row| row.get(0),
|
||||
)?;
|
||||
Ok(result as u64)
|
||||
}
|
||||
|
||||
/// Get daily outbound bytes for an IP.
|
||||
pub fn get_ip_daily_outbound(&self, ip: &str) -> anyhow::Result<u64> {
|
||||
let conn = self.conn.lock().unwrap();
|
||||
let today = chrono_date_today();
|
||||
let result: i64 = conn.query_row(
|
||||
"SELECT COALESCE(SUM(outbound_bytes),0) FROM ip_usage WHERE ip = ?1 AND date = ?2",
|
||||
params![ip, today],
|
||||
|row| row.get(0),
|
||||
)?;
|
||||
Ok(result as u64)
|
||||
}
|
||||
|
||||
/// Get weekly outbound bytes for an IP.
|
||||
pub fn get_ip_weekly_outbound(&self, ip: &str) -> anyhow::Result<u64> {
|
||||
let conn = self.conn.lock().unwrap();
|
||||
let result: i64 = conn.query_row(
|
||||
"SELECT COALESCE(SUM(outbound_bytes),0) FROM ip_usage WHERE ip = ?1 AND date >= date('now', '-7 days')",
|
||||
params![ip],
|
||||
|row| row.get(0),
|
||||
)?;
|
||||
Ok(result as u64)
|
||||
}
|
||||
|
||||
/// Get monthly outbound bytes for an IP.
|
||||
pub fn get_ip_monthly_outbound(&self, ip: &str) -> anyhow::Result<u64> {
|
||||
let conn = self.conn.lock().unwrap();
|
||||
let result: i64 = conn.query_row(
|
||||
"SELECT COALESCE(SUM(outbound_bytes),0) FROM ip_usage WHERE ip = ?1 AND date >= date('now', '-30 days')",
|
||||
params![ip],
|
||||
|row| row.get(0),
|
||||
)?;
|
||||
Ok(result as u64)
|
||||
}
|
||||
|
||||
// --- Session tracking ---
|
||||
|
||||
pub fn start_session(&self, username: &str, peer_ip: &str, protocol: &str, direction: &str) -> anyhow::Result<i64> {
|
||||
let conn = self.conn.lock().unwrap();
|
||||
conn.execute(
|
||||
"INSERT INTO sessions (username, peer_ip, protocol, direction) VALUES (?1, ?2, ?3, ?4)",
|
||||
params![username, peer_ip, protocol, direction],
|
||||
)?;
|
||||
Ok(conn.last_insert_rowid())
|
||||
}
|
||||
|
||||
pub fn end_session(&self, session_id: i64, tx_bytes: u64, rx_bytes: u64) -> anyhow::Result<()> {
|
||||
let conn = self.conn.lock().unwrap();
|
||||
conn.execute(
|
||||
"UPDATE sessions SET ended_at = datetime('now'), tx_bytes = ?1, rx_bytes = ?2 WHERE id = ?3",
|
||||
params![tx_bytes as i64, rx_bytes as i64, session_id],
|
||||
)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// --- Per-second interval tracking ---
|
||||
|
||||
/// Record a single per-second interval data point for a session.
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub fn record_test_interval(
|
||||
&self,
|
||||
session_id: i64,
|
||||
interval_num: i32,
|
||||
tx_bytes: u64,
|
||||
rx_bytes: u64,
|
||||
tx_mbps: f64,
|
||||
rx_mbps: f64,
|
||||
local_cpu: i32,
|
||||
remote_cpu: i32,
|
||||
lost: i64,
|
||||
) -> anyhow::Result<()> {
|
||||
let conn = self.conn.lock().unwrap();
|
||||
conn.execute(
|
||||
"INSERT INTO test_intervals (session_id, interval_num, tx_bytes, rx_bytes, tx_mbps, rx_mbps, local_cpu, remote_cpu, lost_packets)
|
||||
VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9)",
|
||||
params![
|
||||
session_id,
|
||||
interval_num,
|
||||
tx_bytes as i64,
|
||||
rx_bytes as i64,
|
||||
tx_mbps,
|
||||
rx_mbps,
|
||||
local_cpu,
|
||||
remote_cpu,
|
||||
lost,
|
||||
],
|
||||
)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Retrieve all interval data points for a given session, ordered by interval number.
|
||||
pub fn get_session_intervals(&self, session_id: i64) -> anyhow::Result<Vec<IntervalData>> {
|
||||
let conn = self.conn.lock().unwrap();
|
||||
let mut stmt = conn.prepare(
|
||||
"SELECT interval_num, tx_mbps, rx_mbps, local_cpu, remote_cpu, lost_packets
|
||||
FROM test_intervals WHERE session_id = ?1 ORDER BY interval_num"
|
||||
)?;
|
||||
let rows = stmt.query_map(params![session_id], |row| {
|
||||
Ok(IntervalData {
|
||||
interval_num: row.get(0)?,
|
||||
tx_mbps: row.get(1)?,
|
||||
rx_mbps: row.get(2)?,
|
||||
local_cpu: row.get(3)?,
|
||||
remote_cpu: row.get(4)?,
|
||||
lost: row.get(5)?,
|
||||
})
|
||||
})?.filter_map(|r| r.ok()).collect();
|
||||
Ok(rows)
|
||||
}
|
||||
|
||||
/// Return the last N sessions for a given IP address, most recent first.
|
||||
pub fn get_ip_sessions(&self, ip: &str, limit: u32) -> anyhow::Result<Vec<SessionSummary>> {
|
||||
let conn = self.conn.lock().unwrap();
|
||||
let mut stmt = conn.prepare(
|
||||
"SELECT id, started_at, ended_at, protocol, direction, tx_bytes, rx_bytes
|
||||
FROM sessions WHERE peer_ip = ?1 ORDER BY started_at DESC LIMIT ?2"
|
||||
)?;
|
||||
let rows = stmt.query_map(params![ip, limit], |row| {
|
||||
Ok(SessionSummary {
|
||||
id: row.get(0)?,
|
||||
started_at: row.get(1)?,
|
||||
ended_at: row.get(2)?,
|
||||
protocol: row.get::<_, Option<String>>(3)?.unwrap_or_default(),
|
||||
direction: row.get::<_, Option<String>>(4)?.unwrap_or_default(),
|
||||
tx_bytes: row.get::<_, i64>(5).map(|v| v as u64)?,
|
||||
rx_bytes: row.get::<_, i64>(6).map(|v| v as u64)?,
|
||||
})
|
||||
})?.filter_map(|r| r.ok()).collect();
|
||||
Ok(rows)
|
||||
}
|
||||
|
||||
/// Return aggregate statistics for an IP address across all sessions.
|
||||
pub fn get_ip_stats(&self, ip: &str) -> anyhow::Result<IpStats> {
|
||||
let conn = self.conn.lock().unwrap();
|
||||
let result = conn.query_row(
|
||||
"SELECT
|
||||
COUNT(*) as total_tests,
|
||||
COALESCE(SUM(inbound_bytes), 0) as total_inbound,
|
||||
COALESCE(SUM(outbound_bytes), 0) as total_outbound
|
||||
FROM ip_usage WHERE ip = ?1",
|
||||
params![ip],
|
||||
|row| {
|
||||
let total_tests: i64 = row.get(0)?;
|
||||
let total_inbound: i64 = row.get(1)?;
|
||||
let total_outbound: i64 = row.get(2)?;
|
||||
Ok((total_tests as u64, total_inbound as u64, total_outbound as u64))
|
||||
},
|
||||
)?;
|
||||
|
||||
// Compute average Mbps from test_intervals joined through sessions
|
||||
let (avg_tx, avg_rx) = conn.query_row(
|
||||
"SELECT
|
||||
COALESCE(AVG(ti.tx_mbps), 0.0),
|
||||
COALESCE(AVG(ti.rx_mbps), 0.0)
|
||||
FROM test_intervals ti
|
||||
INNER JOIN sessions s ON ti.session_id = s.id
|
||||
WHERE s.peer_ip = ?1",
|
||||
params![ip],
|
||||
|row| {
|
||||
let avg_tx: f64 = row.get(0)?;
|
||||
let avg_rx: f64 = row.get(1)?;
|
||||
Ok((avg_tx, avg_rx))
|
||||
},
|
||||
)?;
|
||||
|
||||
Ok(IpStats {
|
||||
total_tests: result.0,
|
||||
total_inbound: result.1,
|
||||
total_outbound: result.2,
|
||||
avg_tx_mbps: avg_tx,
|
||||
avg_rx_mbps: avg_rx,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn delete_user(&self, username: &str) -> anyhow::Result<bool> {
|
||||
let conn = self.conn.lock().unwrap();
|
||||
let rows = conn.execute("DELETE FROM users WHERE username = ?1", params![username])?;
|
||||
Ok(rows > 0)
|
||||
}
|
||||
|
||||
pub fn set_user_enabled(&self, username: &str, enabled: bool) -> anyhow::Result<()> {
|
||||
let conn = self.conn.lock().unwrap();
|
||||
conn.execute(
|
||||
"UPDATE users SET enabled = ?1 WHERE username = ?2",
|
||||
params![enabled as i32, username],
|
||||
)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn set_user_quota(&self, username: &str, daily: i64, weekly: i64, monthly: i64) -> anyhow::Result<()> {
|
||||
let conn = self.conn.lock().unwrap();
|
||||
conn.execute(
|
||||
"UPDATE users SET daily_quota = ?1, weekly_quota = ?2 WHERE username = ?3",
|
||||
params![daily, weekly, username],
|
||||
)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn list_users(&self) -> anyhow::Result<Vec<User>> {
|
||||
let conn = self.conn.lock().unwrap();
|
||||
let mut stmt = conn.prepare(
|
||||
"SELECT id, username, password_hash, daily_quota, weekly_quota, enabled FROM users ORDER BY username"
|
||||
)?;
|
||||
let users = stmt.query_map([], |row| {
|
||||
Ok(User {
|
||||
id: row.get(0)?,
|
||||
username: row.get(1)?,
|
||||
password_hash: row.get(2)?,
|
||||
daily_quota: row.get(3)?,
|
||||
weekly_quota: row.get(4)?,
|
||||
enabled: row.get::<_, i32>(5)? != 0,
|
||||
})
|
||||
})?.filter_map(|r| r.ok()).collect();
|
||||
Ok(users)
|
||||
}
|
||||
}
|
||||
|
||||
fn hash_password(username: &str, password: &str) -> String {
|
||||
use sha2::{Sha256, Digest};
|
||||
let mut hasher = Sha256::new();
|
||||
hasher.update(format!("{}:{}", username, password).as_bytes());
|
||||
let result = hasher.finalize();
|
||||
result.iter().map(|b| format!("{:02x}", b)).collect()
|
||||
}
|
||||
|
||||
fn chrono_date_today() -> String {
|
||||
// Simple date without chrono crate
|
||||
use std::time::{SystemTime, UNIX_EPOCH};
|
||||
let secs = SystemTime::now().duration_since(UNIX_EPOCH).unwrap_or_default().as_secs();
|
||||
let days = secs / 86400;
|
||||
let mut y = 1970u64;
|
||||
let mut remaining = days;
|
||||
loop {
|
||||
let leap = if y % 4 == 0 && (y % 100 != 0 || y % 400 == 0) { 366 } else { 365 };
|
||||
if remaining < leap { break; }
|
||||
remaining -= leap;
|
||||
y += 1;
|
||||
}
|
||||
let leap = y % 4 == 0 && (y % 100 != 0 || y % 400 == 0);
|
||||
let days_in_months = [31u64, if leap { 29 } else { 28 }, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31];
|
||||
let mut m = 0usize;
|
||||
for i in 0..12 {
|
||||
if remaining < days_in_months[i] { m = i; break; }
|
||||
remaining -= days_in_months[i];
|
||||
}
|
||||
format!("{:04}-{:02}-{:02}", y, m + 1, remaining + 1)
|
||||
}
|
||||
|
||||
// Re-export for use by rusqlite
|
||||
use rusqlite::OptionalExtension;
|
||||
811
src/server_pro/web/mod.rs
Normal file
811
src/server_pro/web/mod.rs
Normal file
@@ -0,0 +1,811 @@
|
||||
//! Web dashboard module for btest-server-pro.
|
||||
//!
|
||||
//! Provides an axum-based HTTP dashboard with:
|
||||
//! - Landing page with IP lookup
|
||||
//! - Per-IP session history and statistics
|
||||
//! - Chart.js throughput graphs
|
||||
//!
|
||||
//! # Feature gate
|
||||
//!
|
||||
//! This entire module is compiled only when the `pro` feature is active
|
||||
//! (it lives inside the `btest-server-pro` binary crate which already
|
||||
//! requires `--features pro`).
|
||||
//!
|
||||
//! # Template files
|
||||
//!
|
||||
//! The HTML source lives in `src/server_pro/web/templates/` as standalone
|
||||
//! `.html` files for easy editing. The Rust code embeds them via the askama
|
||||
//! `source` attribute so no `askama.toml` configuration is needed. If you
|
||||
//! prefer external template files, create `askama.toml` at the crate root:
|
||||
//!
|
||||
//! ```toml
|
||||
//! [[dirs]]
|
||||
//! path = "src/server_pro/web/templates"
|
||||
//! ```
|
||||
//!
|
||||
//! Then change `source = "..."` to `path = "index.html"` (etc.) in the
|
||||
//! template structs below.
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use askama::Template;
|
||||
use axum::extract::{Path, State};
|
||||
use axum::http::StatusCode;
|
||||
use axum::response::{Html, IntoResponse, Response};
|
||||
use axum::routing::get;
|
||||
use axum::Router;
|
||||
use rusqlite::{params, Connection};
|
||||
use serde::Serialize;
|
||||
|
||||
use super::user_db::UserDb;
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Shared state
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Shared application state passed to all handlers via axum's `State`.
|
||||
pub struct WebState {
|
||||
/// Reference to the main user/session database.
|
||||
pub db: UserDb,
|
||||
/// Separate read-only connection for dashboard queries that are not
|
||||
/// exposed by [`UserDb`] (e.g. listing sessions, aggregate stats).
|
||||
/// Wrapped in a [`std::sync::Mutex`] because [`rusqlite::Connection`]
|
||||
/// is not `Send + Sync` on its own.
|
||||
pub query_conn: std::sync::Mutex<Connection>,
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Router constructor
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Default database filename used when `BTEST_DB_PATH` is not set.
|
||||
const DEFAULT_DB_PATH: &str = "btest-users.db";
|
||||
|
||||
/// Build the axum [`Router`] for the web dashboard.
|
||||
///
|
||||
/// The database path for the read-only query connection is resolved in the
|
||||
/// following order:
|
||||
///
|
||||
/// 1. The `BTEST_DB_PATH` environment variable (if set).
|
||||
/// 2. The compile-time default `btest-users.db`.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// Panics if the read-only database connection or the DDL for the
|
||||
/// `session_intervals` table cannot be established. This is intentional:
|
||||
/// the web module is optional and failure during startup should surface
|
||||
/// loudly rather than silently serving broken pages.
|
||||
pub fn create_router(db: UserDb) -> Router {
|
||||
let db_path = db.path().to_string();
|
||||
|
||||
let query_conn = Connection::open_with_flags(
|
||||
&db_path,
|
||||
rusqlite::OpenFlags::SQLITE_OPEN_READ_ONLY
|
||||
| rusqlite::OpenFlags::SQLITE_OPEN_NO_MUTEX,
|
||||
)
|
||||
.expect("web: failed to open read-only database connection");
|
||||
query_conn
|
||||
.execute_batch("PRAGMA busy_timeout=5000;")
|
||||
.expect("web: failed to set PRAGMA on query connection");
|
||||
|
||||
// Ensure the `session_intervals` table exists. The server loop must
|
||||
// INSERT rows for the chart to have data; the table is created here so
|
||||
// the schema is ready.
|
||||
ensure_web_tables(&db_path).expect("web: failed to create session_intervals table");
|
||||
|
||||
let state = Arc::new(WebState {
|
||||
db,
|
||||
query_conn: std::sync::Mutex::new(query_conn),
|
||||
});
|
||||
|
||||
// axum 0.8 uses `{param}` syntax for path parameters.
|
||||
Router::new()
|
||||
.route("/", get(index_page))
|
||||
.route("/dashboard/{ip}", get(dashboard_page))
|
||||
.route("/api/ip/{ip}/sessions", get(api_sessions))
|
||||
.route("/api/ip/{ip}/stats", get(api_stats))
|
||||
.route("/api/ip/{ip}/export", get(api_export))
|
||||
.route("/api/ip/{ip}/quota", get(api_quota))
|
||||
.route("/api/session/{id}/intervals", get(api_intervals))
|
||||
.with_state(state)
|
||||
}
|
||||
|
||||
/// Create additional tables the web dashboard depends on.
|
||||
///
|
||||
/// Opens a short-lived writable connection solely for DDL so it does not
|
||||
/// interfere with the main [`UserDb`] connection.
|
||||
fn ensure_web_tables(db_path: &str) -> anyhow::Result<()> {
|
||||
let conn = Connection::open(db_path)?;
|
||||
conn.execute_batch("PRAGMA busy_timeout=5000;")?;
|
||||
conn.execute_batch(
|
||||
"CREATE TABLE IF NOT EXISTS session_intervals (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
session_id INTEGER NOT NULL,
|
||||
second INTEGER NOT NULL,
|
||||
tx_bytes INTEGER NOT NULL DEFAULT 0,
|
||||
rx_bytes INTEGER NOT NULL DEFAULT 0,
|
||||
UNIQUE(session_id, second)
|
||||
);
|
||||
CREATE INDEX IF NOT EXISTS idx_intervals_session
|
||||
ON session_intervals(session_id, second);",
|
||||
)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Askama templates (embedded via `source`)
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Landing / index page template.
|
||||
#[derive(Template)]
|
||||
#[template(
|
||||
source = r##"<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1">
|
||||
<title>btest-rs — Free Public Bandwidth Test Server</title>
|
||||
<style>
|
||||
*{margin:0;padding:0;box-sizing:border-box}
|
||||
body{font-family:-apple-system,BlinkMacSystemFont,"Segoe UI",Roboto,Helvetica,Arial,sans-serif;background:#0f1117;color:#e1e4e8;min-height:100vh;display:flex;flex-direction:column;align-items:center;padding:2rem 1rem}
|
||||
.container{max-width:720px;width:100%;padding:1rem 0}
|
||||
h1{font-size:2.2rem;margin-bottom:.25rem;color:#58a6ff;text-align:center}
|
||||
.subtitle{color:#8b949e;margin-bottom:2.5rem;line-height:1.6;text-align:center;font-size:1.05rem}
|
||||
.section{background:#161b22;border:1px solid #30363d;border-radius:8px;padding:1.5rem;margin-bottom:1.5rem;text-align:left;line-height:1.7;color:#c9d1d9}
|
||||
.section h2{color:#e1e4e8;font-size:1.15rem;margin-bottom:.75rem}
|
||||
.section h3{color:#e1e4e8;font-size:1rem;margin-bottom:.5rem;margin-top:1rem}
|
||||
.section h3:first-child{margin-top:0}
|
||||
.section p{margin-bottom:.5rem}
|
||||
.section ul{margin:.5rem 0 .5rem 1.5rem;color:#8b949e}
|
||||
.section li{margin-bottom:.35rem}
|
||||
code{background:#0d1117;padding:.2rem .5rem;border-radius:4px;font-size:.85em;color:#58a6ff;word-break:break-all}
|
||||
pre{background:#0d1117;border:1px solid #30363d;border-radius:6px;padding:1rem;overflow-x:auto;margin:.75rem 0;line-height:1.5}
|
||||
pre code{padding:0;background:none;font-size:.85em}
|
||||
.label-tag{display:inline-block;padding:.15rem .5rem;border-radius:4px;font-size:.75rem;font-weight:600;text-transform:uppercase;letter-spacing:.03em;margin-right:.5rem;vertical-align:middle}
|
||||
.tag-tcp{background:rgba(63,185,80,0.15);color:#3fb950}
|
||||
.tag-udp{background:rgba(210,153,34,0.15);color:#d29922}
|
||||
.note{background:#1c1e26;border-left:3px solid #d29922;padding:.75rem 1rem;border-radius:0 6px 6px 0;margin:.75rem 0;font-size:.92rem;color:#8b949e}
|
||||
.note strong{color:#d29922}
|
||||
.search-section{text-align:center}
|
||||
.search-section h2{text-align:center}
|
||||
.search-box{display:flex;gap:.5rem;margin-bottom:1rem}
|
||||
.search-box input{flex:1;padding:.75rem 1rem;border:1px solid #30363d;border-radius:6px;background:#161b22;color:#e1e4e8;font-size:1rem;outline:none}
|
||||
.search-box input:focus{border-color:#58a6ff}
|
||||
.search-box input::placeholder{color:#484f58}
|
||||
.search-box button{padding:.75rem 1.5rem;background:#238636;color:#fff;border:none;border-radius:6px;font-size:1rem;cursor:pointer;white-space:nowrap}
|
||||
.search-box button:hover{background:#2ea043}
|
||||
.auto-link{font-size:.9rem;color:#8b949e}
|
||||
.auto-link a{color:#58a6ff;text-decoration:none}
|
||||
.auto-link a:hover{text-decoration:underline}
|
||||
.footer{margin-top:2rem;color:#484f58;font-size:.8rem;text-align:center}
|
||||
.footer a{color:#58a6ff;text-decoration:none}
|
||||
.footer a:hover{text-decoration:underline}
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<div class="container">
|
||||
<h1>btest-rs</h1>
|
||||
<p class="subtitle">Free public MikroTik-compatible bandwidth test server.<br>Test your link speed from any RouterOS device — no registration required.</p>
|
||||
|
||||
<div class="section">
|
||||
<h2>Quick Start</h2>
|
||||
<p>Open a terminal on your MikroTik router and run one of the following commands:</p>
|
||||
<h3><span class="label-tag tag-tcp">TCP</span> Recommended</h3>
|
||||
<pre><code>/tool bandwidth-test address=104.225.217.60 user=btest password=btest protocol=tcp direction=both</code></pre>
|
||||
<h3><span class="label-tag tag-udp">UDP</span></h3>
|
||||
<pre><code>/tool bandwidth-test address=104.225.217.60 user=btest password=btest protocol=udp direction=both</code></pre>
|
||||
</div>
|
||||
|
||||
<div class="section">
|
||||
<h2>Important Notes</h2>
|
||||
<ul>
|
||||
<li><strong style="color:#e1e4e8">Credentials:</strong> <code>user=btest</code> <code>password=btest</code></li>
|
||||
<li><strong style="color:#e1e4e8">TCP is recommended</strong> for remote testing — it works reliably through any NAT or firewall</li>
|
||||
<li><strong style="color:#e1e4e8">Per-IP daily quotas</strong> apply to keep the service fair for everyone</li>
|
||||
<li><strong style="color:#e1e4e8">Maximum test duration:</strong> 120 seconds</li>
|
||||
<li><strong style="color:#e1e4e8">Connection limit:</strong> 3 concurrent tests per IP</li>
|
||||
</ul>
|
||||
<div class="note">
|
||||
<strong>UDP bidirectional may not work through NAT/firewall.</strong>
|
||||
UDP <code>direction=both</code> requires the server to send packets to a pre-calculated client port, which NAT routers typically block. If you need UDP testing:<br>
|
||||
• Forward UDP ports 2001–2100 on your router, or<br>
|
||||
• Use <code>direction=send</code> or <code>direction=receive</code> (one-way works fine), or<br>
|
||||
• Test from a device with a public IP
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="section search-section">
|
||||
<h2>Check Your Results</h2>
|
||||
<p style="margin-bottom:1rem;color:#8b949e">After running a test, enter your public IP to view throughput charts, session history, and statistics.</p>
|
||||
<form class="search-box" id="ip-form" onsubmit="return goToDashboard()">
|
||||
<input type="text" id="ip-input" placeholder="Enter your IP address (e.g. 203.0.113.5)" autocomplete="off">
|
||||
<button type="submit">View Results</button>
|
||||
</form>
|
||||
<div class="auto-link" id="auto-detect">Detecting your IP...</div>
|
||||
</div>
|
||||
|
||||
<div class="footer">Powered by <a href="https://github.com/manawenuz/btest-rs">btest-rs</a> — open source MikroTik bandwidth test server</div>
|
||||
</div>
|
||||
<script>
|
||||
function goToDashboard(){var ip=document.getElementById('ip-input').value.trim();if(ip){window.location.href='/dashboard/'+encodeURIComponent(ip);}return false;}
|
||||
fetch('https://api.ipify.org?format=json')
|
||||
.then(function(r){return r.json();})
|
||||
.then(function(d){if(d.ip){document.getElementById('ip-input').value=d.ip;document.getElementById('auto-detect').innerHTML='Detected IP: <a href="/dashboard/'+encodeURIComponent(d.ip)+'">'+d.ip+'</a> — click to view your dashboard';}})
|
||||
.catch(function(){document.getElementById('auto-detect').textContent='';});
|
||||
</script>
|
||||
</body>
|
||||
</html>"##,
|
||||
ext = "html"
|
||||
)]
|
||||
struct IndexTemplate;
|
||||
|
||||
/// Per-IP dashboard page template.
|
||||
#[derive(Template)]
|
||||
#[template(
|
||||
source = r##"<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1">
|
||||
<title>Dashboard — {{ ip }} — btest-rs</title>
|
||||
<style>
|
||||
*{margin:0;padding:0;box-sizing:border-box}
|
||||
body{font-family:-apple-system,BlinkMacSystemFont,"Segoe UI",Roboto,Helvetica,Arial,sans-serif;background:#0f1117;color:#e1e4e8;min-height:100vh;padding:1.5rem}
|
||||
a{color:#58a6ff;text-decoration:none}a:hover{text-decoration:underline}
|
||||
.header{display:flex;align-items:center;gap:1rem;margin-bottom:1.5rem;flex-wrap:wrap}
|
||||
.header h1{font-size:1.5rem;color:#58a6ff}
|
||||
.header .ip-label{font-size:1.1rem;color:#8b949e;font-family:monospace}
|
||||
.header .home-link{margin-left:auto}
|
||||
.btn{display:inline-block;padding:.5rem 1rem;border-radius:6px;font-size:.85rem;font-weight:500;cursor:pointer;border:1px solid #30363d;text-decoration:none}
|
||||
.btn-json{background:#161b22;color:#3fb950}.btn-json:hover{background:#1c2128;text-decoration:none}
|
||||
.stats{display:grid;grid-template-columns:repeat(auto-fit,minmax(160px,1fr));gap:1rem;margin-bottom:1.5rem}
|
||||
.stat-card{background:#161b22;border:1px solid #30363d;border-radius:8px;padding:1rem}
|
||||
.stat-card .label{color:#8b949e;font-size:.8rem;text-transform:uppercase;letter-spacing:.05em}
|
||||
.stat-card .value{font-size:1.4rem;font-weight:600;margin-top:.25rem}
|
||||
.table-wrap{overflow-x:auto;margin-bottom:1.5rem}
|
||||
table{width:100%;border-collapse:collapse;background:#161b22;border-radius:8px;overflow:hidden}
|
||||
th,td{padding:.6rem 1rem;text-align:left;border-bottom:1px solid #21262d;white-space:nowrap}
|
||||
th{background:#0d1117;color:#8b949e;font-size:.8rem;text-transform:uppercase;letter-spacing:.04em}
|
||||
tr{cursor:pointer}tr:hover td{background:#1c2128}tr.selected td{background:#1f3a5f}
|
||||
.proto-tcp{color:#3fb950}.proto-udp{color:#d29922}
|
||||
.dir-tx{color:#f78166}.dir-rx{color:#58a6ff}.dir-both{color:#bc8cff}
|
||||
.chart-section{background:#161b22;border:1px solid #30363d;border-radius:8px;padding:1.5rem;margin-bottom:1.5rem}
|
||||
.chart-section h2{font-size:1rem;color:#8b949e;margin-bottom:1rem}
|
||||
.chart-container{position:relative;width:100%;max-height:360px}
|
||||
.chart-placeholder{text-align:center;color:#484f58;padding:3rem 0}
|
||||
.footer{text-align:center;color:#484f58;font-size:.8rem;margin-top:2rem}
|
||||
.no-data{text-align:center;padding:3rem;color:#484f58}
|
||||
.quota-section{background:#161b22;border:1px solid #30363d;border-radius:8px;padding:1.25rem;margin-bottom:1.5rem}
|
||||
.quota-section h2{font-size:1rem;color:#8b949e;margin-bottom:1rem}
|
||||
.quota-row{display:flex;align-items:center;gap:1rem;margin-bottom:.75rem}
|
||||
.quota-row:last-child{margin-bottom:0}
|
||||
.quota-label{min-width:70px;font-size:.85rem;color:#8b949e;text-transform:uppercase;letter-spacing:.04em}
|
||||
.quota-bar-wrap{flex:1;background:#21262d;border-radius:4px;height:22px;position:relative;overflow:hidden}
|
||||
.quota-bar{height:100%;border-radius:4px;transition:width .5s ease}
|
||||
.quota-bar.low{background:#238636}.quota-bar.mid{background:#d29922}.quota-bar.high{background:#da3633}
|
||||
.quota-text{min-width:180px;font-size:.85rem;color:#e1e4e8;text-align:right;font-family:monospace}
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<div class="header">
|
||||
<h1>btest-rs</h1>
|
||||
<span class="ip-label">{{ ip }}</span>
|
||||
<a class="btn btn-json" href="/api/ip/{{ ip }}/export" download>Export JSON</a>
|
||||
<span class="home-link"><a href="/">Home</a></span>
|
||||
</div>
|
||||
<div class="stats" id="stats-grid">
|
||||
<div class="stat-card"><div class="label">Total Tests</div><div class="value" id="stat-total-tests">—</div></div>
|
||||
<div class="stat-card"><div class="label">Total TX</div><div class="value" id="stat-total-tx">—</div></div>
|
||||
<div class="stat-card"><div class="label">Total RX</div><div class="value" id="stat-total-rx">—</div></div>
|
||||
<div class="stat-card"><div class="label">Avg TX Mbps</div><div class="value" id="stat-avg-tx">—</div></div>
|
||||
<div class="stat-card"><div class="label">Avg RX Mbps</div><div class="value" id="stat-avg-rx">—</div></div>
|
||||
</div>
|
||||
<div class="quota-section" id="quota-section">
|
||||
<h2>Quota Usage</h2>
|
||||
<div class="quota-row"><span class="quota-label">Daily</span><div class="quota-bar-wrap"><div class="quota-bar low" id="bar-daily" style="width:0%"></div></div><span class="quota-text" id="text-daily">—</span></div>
|
||||
<div class="quota-row"><span class="quota-label">Weekly</span><div class="quota-bar-wrap"><div class="quota-bar low" id="bar-weekly" style="width:0%"></div></div><span class="quota-text" id="text-weekly">—</span></div>
|
||||
<div class="quota-row"><span class="quota-label">Monthly</span><div class="quota-bar-wrap"><div class="quota-bar low" id="bar-monthly" style="width:0%"></div></div><span class="quota-text" id="text-monthly">—</span></div>
|
||||
</div>
|
||||
<div class="chart-section">
|
||||
<h2 id="chart-title">Select a test below to view its throughput chart</h2>
|
||||
<div class="chart-container">
|
||||
<canvas id="throughput-chart"></canvas>
|
||||
<div class="chart-placeholder" id="chart-placeholder">Click a row in the table to load the throughput graph for that session.</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="table-wrap">
|
||||
<table>
|
||||
<thead><tr><th>#</th><th>Date</th><th>Protocol</th><th>Direction</th><th>TX Bytes</th><th>RX Bytes</th><th>Duration</th><th>Avg TX Mbps</th><th>Avg RX Mbps</th></tr></thead>
|
||||
<tbody id="sessions-body"><tr><td colspan="9" class="no-data">Loading sessions...</td></tr></tbody>
|
||||
</table>
|
||||
</div>
|
||||
<div class="footer">Powered by btest-rs</div>
|
||||
<script src="https://cdn.jsdelivr.net/npm/chart.js"></script>
|
||||
<script>
|
||||
var currentIp="{{ ip }}";
|
||||
var throughputChart=null;
|
||||
function formatBytes(b){if(b===0)return'0 B';var u=['B','KB','MB','GB','TB'];var i=Math.floor(Math.log(b)/Math.log(1024));if(i>=u.length)i=u.length-1;return(b/Math.pow(1024,i)).toFixed(1)+' '+u[i];}
|
||||
function formatMbps(bps){return(bps*8/1e6).toFixed(2);}
|
||||
fetch('/api/ip/'+encodeURIComponent(currentIp)+'/quota').then(function(r){return r.json();}).then(function(q){
|
||||
function upd(id,used,limit){
|
||||
var pct=limit>0?Math.min(used/limit*100,100):0;
|
||||
var bar=document.getElementById('bar-'+id);
|
||||
var txt=document.getElementById('text-'+id);
|
||||
bar.style.width=pct.toFixed(1)+'%';
|
||||
bar.className='quota-bar '+(pct<50?'low':pct<80?'mid':'high');
|
||||
txt.textContent=formatBytes(used)+' / '+formatBytes(limit)+' ('+pct.toFixed(1)+'%)';
|
||||
}
|
||||
upd('daily',q.daily_used,q.daily_limit);
|
||||
upd('weekly',q.weekly_used,q.weekly_limit);
|
||||
upd('monthly',q.monthly_used,q.monthly_limit);
|
||||
}).catch(function(){});
|
||||
function durationStr(s,e){if(!s||!e)return'--';var ms=new Date(e)-new Date(s);if(ms<0)return'--';var sec=Math.round(ms/1000);if(sec<60)return sec+'s';return Math.floor(sec/60)+'m '+(sec%60)+'s';}
|
||||
function durationSec(s,e){if(!s||!e)return 0;return Math.max((new Date(e)-new Date(s))/1000,0.001);}
|
||||
fetch('/api/ip/'+encodeURIComponent(currentIp)+'/stats').then(function(r){return r.json();}).then(function(d){
|
||||
document.getElementById('stat-total-tests').textContent=d.total_sessions||0;
|
||||
document.getElementById('stat-total-tx').textContent=formatBytes(d.total_tx_bytes||0);
|
||||
document.getElementById('stat-total-rx').textContent=formatBytes(d.total_rx_bytes||0);
|
||||
document.getElementById('stat-avg-tx').textContent=d.avg_tx_mbps?d.avg_tx_mbps.toFixed(2):'0.00';
|
||||
document.getElementById('stat-avg-rx').textContent=d.avg_rx_mbps?d.avg_rx_mbps.toFixed(2):'0.00';
|
||||
}).catch(function(){});
|
||||
fetch('/api/ip/'+encodeURIComponent(currentIp)+'/sessions').then(function(r){return r.json();}).then(function(sessions){
|
||||
var tbody=document.getElementById('sessions-body');
|
||||
if(!sessions||sessions.length===0){tbody.innerHTML='<tr><td colspan="9" class="no-data">No test sessions found for this IP.</td></tr>';return;}
|
||||
tbody.innerHTML='';
|
||||
sessions.forEach(function(s,i){
|
||||
var tr=document.createElement('tr');tr.dataset.sessionId=s.id;tr.onclick=function(){selectSession(s.id,tr);};
|
||||
var dur=durationSec(s.started_at,s.ended_at);var avgTx=dur>0?formatMbps(s.tx_bytes/dur):'0.00';var avgRx=dur>0?formatMbps(s.rx_bytes/dur):'0.00';
|
||||
var proto=(s.protocol||'TCP').toUpperCase();var dir=(s.direction||'BOTH').toUpperCase();
|
||||
var pc=proto==='UDP'?'proto-udp':'proto-tcp';var dc=dir==='TX'?'dir-tx':dir==='RX'?'dir-rx':'dir-both';
|
||||
tr.innerHTML='<td>'+(i+1)+'</td><td>'+(s.started_at||'--')+'</td><td class="'+pc+'">'+proto+'</td><td class="'+dc+'">'+dir+'</td><td>'+formatBytes(s.tx_bytes||0)+'</td><td>'+formatBytes(s.rx_bytes||0)+'</td><td>'+durationStr(s.started_at,s.ended_at)+'</td><td>'+avgTx+'</td><td>'+avgRx+'</td>';
|
||||
tbody.appendChild(tr);
|
||||
});
|
||||
if(sessions.length>0){var fr=tbody.querySelector('tr');if(fr)selectSession(sessions[0].id,fr);}
|
||||
}).catch(function(){document.getElementById('sessions-body').innerHTML='<tr><td colspan="9" class="no-data">Failed to load sessions.</td></tr>';});
|
||||
function selectSession(sid,row){
|
||||
document.querySelectorAll('#sessions-body tr').forEach(function(r){r.classList.remove('selected');});
|
||||
row.classList.add('selected');
|
||||
document.getElementById('chart-title').textContent='Throughput for session #'+sid;
|
||||
document.getElementById('chart-placeholder').style.display='none';
|
||||
fetch('/api/session/'+sid+'/intervals').then(function(r){return r.json();}).then(function(iv){renderChart(iv);}).catch(function(){
|
||||
document.getElementById('chart-placeholder').style.display='block';
|
||||
document.getElementById('chart-placeholder').textContent='Failed to load interval data.';
|
||||
});
|
||||
}
|
||||
function renderChart(iv){
|
||||
var canvas=document.getElementById('throughput-chart');
|
||||
if(throughputChart)throughputChart.destroy();
|
||||
if(!iv||iv.length===0){document.getElementById('chart-placeholder').style.display='block';document.getElementById('chart-placeholder').textContent='No interval data available for this session.';return;}
|
||||
var labels=iv.map(function(d){return d.second+'s';});
|
||||
var tx=iv.map(function(d){return(d.tx_bytes*8/1e6).toFixed(2);});
|
||||
var rx=iv.map(function(d){return(d.rx_bytes*8/1e6).toFixed(2);});
|
||||
throughputChart=new Chart(canvas,{type:'line',data:{labels:labels,datasets:[
|
||||
{label:'TX Mbps',data:tx,borderColor:'#f78166',backgroundColor:'rgba(247,129,102,0.1)',borderWidth:2,fill:true,tension:0.3,pointRadius:1},
|
||||
{label:'RX Mbps',data:rx,borderColor:'#58a6ff',backgroundColor:'rgba(88,166,255,0.1)',borderWidth:2,fill:true,tension:0.3,pointRadius:1}
|
||||
]},options:{responsive:true,maintainAspectRatio:false,interaction:{intersect:false,mode:'index'},
|
||||
scales:{x:{title:{display:true,text:'Time',color:'#8b949e'},ticks:{color:'#8b949e'},grid:{color:'#21262d'}},
|
||||
y:{title:{display:true,text:'Mbps',color:'#8b949e'},ticks:{color:'#8b949e'},grid:{color:'#21262d'},beginAtZero:true}},
|
||||
plugins:{legend:{labels:{color:'#e1e4e8'}},tooltip:{backgroundColor:'#161b22',borderColor:'#30363d',borderWidth:1,titleColor:'#e1e4e8',bodyColor:'#8b949e'}}}});
|
||||
}
|
||||
</script>
|
||||
</body>
|
||||
</html>"##,
|
||||
ext = "html"
|
||||
)]
|
||||
struct DashboardTemplate {
|
||||
ip: String,
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// JSON response types
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// A single test session as returned by the sessions API.
|
||||
#[derive(Serialize)]
|
||||
struct SessionJson {
|
||||
id: i64,
|
||||
username: String,
|
||||
peer_ip: String,
|
||||
started_at: Option<String>,
|
||||
ended_at: Option<String>,
|
||||
tx_bytes: i64,
|
||||
rx_bytes: i64,
|
||||
protocol: Option<String>,
|
||||
direction: Option<String>,
|
||||
}
|
||||
|
||||
/// Aggregate statistics for an IP address.
|
||||
#[derive(Serialize)]
|
||||
struct StatsJson {
|
||||
total_sessions: i64,
|
||||
total_tx_bytes: i64,
|
||||
total_rx_bytes: i64,
|
||||
avg_tx_mbps: f64,
|
||||
avg_rx_mbps: f64,
|
||||
}
|
||||
|
||||
/// One second of throughput data within a session.
|
||||
#[derive(Serialize)]
|
||||
struct IntervalJson {
|
||||
second: i64,
|
||||
tx_bytes: i64,
|
||||
rx_bytes: i64,
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Error helper
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Uniform error wrapper so handlers can use `?` freely.
|
||||
///
|
||||
/// All errors are rendered as `500 Internal Server Error` with a plain-text
|
||||
/// body. The full error chain is logged via [`tracing`].
|
||||
struct AppError(anyhow::Error);
|
||||
|
||||
impl IntoResponse for AppError {
|
||||
fn into_response(self) -> Response {
|
||||
tracing::error!("web handler error: {:#}", self.0);
|
||||
(StatusCode::INTERNAL_SERVER_ERROR, self.0.to_string()).into_response()
|
||||
}
|
||||
}
|
||||
|
||||
impl<E: Into<anyhow::Error>> From<E> for AppError {
|
||||
fn from(err: E) -> Self {
|
||||
Self(err.into())
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Handlers
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// `GET /` -- render the landing page.
|
||||
async fn index_page() -> Result<Html<String>, AppError> {
|
||||
let rendered = IndexTemplate
|
||||
.render()
|
||||
.map_err(|e| anyhow::anyhow!("template render: {}", e))?;
|
||||
Ok(Html(rendered))
|
||||
}
|
||||
|
||||
/// `GET /dashboard/{ip}` -- render the per-IP dashboard.
|
||||
async fn dashboard_page(Path(ip): Path<String>) -> Result<Html<String>, AppError> {
|
||||
let rendered = DashboardTemplate { ip }
|
||||
.render()
|
||||
.map_err(|e| anyhow::anyhow!("template render: {}", e))?;
|
||||
Ok(Html(rendered))
|
||||
}
|
||||
|
||||
/// `GET /api/ip/{ip}/sessions` -- return the most recent 100 sessions for
|
||||
/// the given peer IP as a JSON array.
|
||||
async fn api_sessions(
|
||||
State(state): State<Arc<WebState>>,
|
||||
Path(ip): Path<String>,
|
||||
) -> Result<axum::Json<Vec<SessionJson>>, AppError> {
|
||||
let sessions = {
|
||||
let conn = state
|
||||
.query_conn
|
||||
.lock()
|
||||
.map_err(|e| anyhow::anyhow!("lock: {}", e))?;
|
||||
let mut stmt = conn.prepare(
|
||||
"SELECT id, username, peer_ip, started_at, ended_at,
|
||||
tx_bytes, rx_bytes, protocol, direction
|
||||
FROM sessions
|
||||
WHERE peer_ip = ?1
|
||||
ORDER BY started_at DESC
|
||||
LIMIT 100",
|
||||
)?;
|
||||
let rows = stmt.query_map(params![ip], |row| {
|
||||
Ok(SessionJson {
|
||||
id: row.get(0)?,
|
||||
username: row.get(1)?,
|
||||
peer_ip: row.get(2)?,
|
||||
started_at: row.get(3)?,
|
||||
ended_at: row.get(4)?,
|
||||
tx_bytes: row.get(5)?,
|
||||
rx_bytes: row.get(6)?,
|
||||
protocol: row.get(7)?,
|
||||
direction: row.get(8)?,
|
||||
})
|
||||
})?;
|
||||
rows.filter_map(Result::ok).collect::<Vec<_>>()
|
||||
};
|
||||
|
||||
Ok(axum::Json(sessions))
|
||||
}
|
||||
|
||||
/// `GET /api/ip/{ip}/stats` -- return aggregate statistics (total bytes,
|
||||
/// session count, average throughput) for the given IP.
|
||||
async fn api_stats(
|
||||
State(state): State<Arc<WebState>>,
|
||||
Path(ip): Path<String>,
|
||||
) -> Result<axum::Json<StatsJson>, AppError> {
|
||||
let stats = {
|
||||
let conn = state
|
||||
.query_conn
|
||||
.lock()
|
||||
.map_err(|e| anyhow::anyhow!("lock: {}", e))?;
|
||||
conn.query_row(
|
||||
"SELECT
|
||||
COUNT(*) AS total_sessions,
|
||||
COALESCE(SUM(tx_bytes), 0) AS total_tx,
|
||||
COALESCE(SUM(rx_bytes), 0) AS total_rx,
|
||||
COALESCE(SUM(
|
||||
CASE WHEN ended_at IS NOT NULL AND started_at IS NOT NULL
|
||||
THEN (julianday(ended_at) - julianday(started_at)) * 86400.0
|
||||
ELSE 0 END
|
||||
), 0) AS total_seconds
|
||||
FROM sessions
|
||||
WHERE peer_ip = ?1",
|
||||
params![ip],
|
||||
|row| {
|
||||
let total_sessions: i64 = row.get(0)?;
|
||||
let total_tx: i64 = row.get(1)?;
|
||||
let total_rx: i64 = row.get(2)?;
|
||||
let total_seconds: f64 = row.get(3)?;
|
||||
|
||||
let avg_tx_mbps = if total_seconds > 0.0 {
|
||||
(total_tx as f64) * 8.0 / total_seconds / 1_000_000.0
|
||||
} else {
|
||||
0.0
|
||||
};
|
||||
let avg_rx_mbps = if total_seconds > 0.0 {
|
||||
(total_rx as f64) * 8.0 / total_seconds / 1_000_000.0
|
||||
} else {
|
||||
0.0
|
||||
};
|
||||
|
||||
Ok(StatsJson {
|
||||
total_sessions,
|
||||
total_tx_bytes: total_tx,
|
||||
total_rx_bytes: total_rx,
|
||||
avg_tx_mbps,
|
||||
avg_rx_mbps,
|
||||
})
|
||||
},
|
||||
)?
|
||||
};
|
||||
|
||||
Ok(axum::Json(stats))
|
||||
}
|
||||
|
||||
/// Quota usage for an IP — daily/weekly/monthly with limits.
|
||||
#[derive(Serialize)]
|
||||
struct QuotaUsageJson {
|
||||
daily_used: i64,
|
||||
daily_limit: i64,
|
||||
weekly_used: i64,
|
||||
weekly_limit: i64,
|
||||
monthly_used: i64,
|
||||
monthly_limit: i64,
|
||||
}
|
||||
|
||||
/// `GET /api/ip/{ip}/quota` -- return current quota usage for the IP.
|
||||
async fn api_quota(
|
||||
State(state): State<Arc<WebState>>,
|
||||
Path(ip): Path<String>,
|
||||
) -> Result<axum::Json<QuotaUsageJson>, AppError> {
|
||||
let conn = state.query_conn.lock().map_err(|e| anyhow::anyhow!("lock: {}", e))?;
|
||||
|
||||
let daily: i64 = conn.query_row(
|
||||
"SELECT COALESCE(SUM(inbound_bytes + outbound_bytes), 0) FROM ip_usage WHERE ip = ?1 AND date = date('now')",
|
||||
params![ip], |row| row.get(0),
|
||||
).unwrap_or(0);
|
||||
|
||||
let weekly: i64 = conn.query_row(
|
||||
"SELECT COALESCE(SUM(inbound_bytes + outbound_bytes), 0) FROM ip_usage WHERE ip = ?1 AND date >= date('now', '-7 days')",
|
||||
params![ip], |row| row.get(0),
|
||||
).unwrap_or(0);
|
||||
|
||||
let monthly: i64 = conn.query_row(
|
||||
"SELECT COALESCE(SUM(inbound_bytes + outbound_bytes), 0) FROM ip_usage WHERE ip = ?1 AND date >= date('now', '-30 days')",
|
||||
params![ip], |row| row.get(0),
|
||||
).unwrap_or(0);
|
||||
|
||||
// Limits: 2GB daily, 8GB weekly, 24GB monthly
|
||||
Ok(axum::Json(QuotaUsageJson {
|
||||
daily_used: daily,
|
||||
daily_limit: 2_147_483_648,
|
||||
weekly_used: weekly,
|
||||
weekly_limit: 8_589_934_592,
|
||||
monthly_used: monthly,
|
||||
monthly_limit: 25_769_803_776,
|
||||
}))
|
||||
}
|
||||
|
||||
/// Full export of all data for an IP — stats + sessions with human-readable fields.
|
||||
#[derive(Serialize)]
|
||||
struct ExportJson {
|
||||
ip: String,
|
||||
exported_at: String,
|
||||
stats: StatsJson,
|
||||
quota: QuotaJson,
|
||||
sessions: Vec<ExportSessionJson>,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
struct QuotaJson {
|
||||
daily_used_bytes: i64,
|
||||
daily_used_human: String,
|
||||
daily_limit_bytes: String,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
struct ExportSessionJson {
|
||||
id: i64,
|
||||
started_at: Option<String>,
|
||||
ended_at: Option<String>,
|
||||
protocol: Option<String>,
|
||||
direction: Option<String>,
|
||||
tx_bytes: i64,
|
||||
rx_bytes: i64,
|
||||
tx_human: String,
|
||||
rx_human: String,
|
||||
duration_secs: f64,
|
||||
avg_tx_mbps: f64,
|
||||
avg_rx_mbps: f64,
|
||||
}
|
||||
|
||||
fn human_bytes(b: i64) -> String {
|
||||
let b = b as f64;
|
||||
if b >= 1_073_741_824.0 {
|
||||
format!("{:.2} GB", b / 1_073_741_824.0)
|
||||
} else if b >= 1_048_576.0 {
|
||||
format!("{:.1} MB", b / 1_048_576.0)
|
||||
} else if b >= 1024.0 {
|
||||
format!("{:.1} KB", b / 1024.0)
|
||||
} else {
|
||||
format!("{} B", b as i64)
|
||||
}
|
||||
}
|
||||
|
||||
/// `GET /api/ip/{ip}/export` -- return a comprehensive JSON export of all
|
||||
/// sessions, stats, and quota usage for an IP. Suitable for download/archival.
|
||||
async fn api_export(
|
||||
State(state): State<Arc<WebState>>,
|
||||
Path(ip): Path<String>,
|
||||
) -> Result<impl IntoResponse, AppError> {
|
||||
let conn = state
|
||||
.query_conn
|
||||
.lock()
|
||||
.map_err(|e| anyhow::anyhow!("lock: {}", e))?;
|
||||
|
||||
// Stats
|
||||
let stats = conn.query_row(
|
||||
"SELECT COUNT(*), COALESCE(SUM(tx_bytes),0), COALESCE(SUM(rx_bytes),0),
|
||||
COALESCE(SUM(CASE WHEN ended_at IS NOT NULL AND started_at IS NOT NULL
|
||||
THEN (julianday(ended_at)-julianday(started_at))*86400.0 ELSE 0 END),0)
|
||||
FROM sessions WHERE peer_ip = ?1",
|
||||
params![ip],
|
||||
|row| {
|
||||
let n: i64 = row.get(0)?;
|
||||
let tx: i64 = row.get(1)?;
|
||||
let rx: i64 = row.get(2)?;
|
||||
let secs: f64 = row.get(3)?;
|
||||
Ok(StatsJson {
|
||||
total_sessions: n,
|
||||
total_tx_bytes: tx,
|
||||
total_rx_bytes: rx,
|
||||
avg_tx_mbps: if secs > 0.0 { tx as f64 * 8.0 / secs / 1e6 } else { 0.0 },
|
||||
avg_rx_mbps: if secs > 0.0 { rx as f64 * 8.0 / secs / 1e6 } else { 0.0 },
|
||||
})
|
||||
},
|
||||
)?;
|
||||
|
||||
// Quota
|
||||
let daily_used: i64 = conn.query_row(
|
||||
"SELECT COALESCE(SUM(inbound_bytes + outbound_bytes), 0) FROM ip_usage
|
||||
WHERE ip = ?1 AND date = date('now')",
|
||||
params![ip],
|
||||
|row| row.get(0),
|
||||
).unwrap_or(0);
|
||||
|
||||
let quota = QuotaJson {
|
||||
daily_used_bytes: daily_used,
|
||||
daily_used_human: human_bytes(daily_used),
|
||||
daily_limit_bytes: "see server config".to_string(),
|
||||
};
|
||||
|
||||
// Sessions with computed fields (duration computed by SQLite)
|
||||
let mut stmt = conn.prepare(
|
||||
"SELECT id, started_at, ended_at, protocol, direction, tx_bytes, rx_bytes,
|
||||
CASE WHEN ended_at IS NOT NULL AND started_at IS NOT NULL
|
||||
THEN (julianday(ended_at) - julianday(started_at)) * 86400.0
|
||||
ELSE 0 END AS dur_secs
|
||||
FROM sessions WHERE peer_ip = ?1 ORDER BY started_at DESC LIMIT 100",
|
||||
)?;
|
||||
let sessions: Vec<ExportSessionJson> = stmt.query_map(params![ip], |row| {
|
||||
let tx: i64 = row.get(5)?;
|
||||
let rx: i64 = row.get(6)?;
|
||||
let dur: f64 = row.get(7)?;
|
||||
Ok(ExportSessionJson {
|
||||
id: row.get(0)?,
|
||||
started_at: row.get(1)?,
|
||||
ended_at: row.get(2)?,
|
||||
protocol: row.get(3)?,
|
||||
direction: row.get(4)?,
|
||||
tx_bytes: tx,
|
||||
rx_bytes: rx,
|
||||
tx_human: human_bytes(tx),
|
||||
rx_human: human_bytes(rx),
|
||||
duration_secs: dur,
|
||||
avg_tx_mbps: if dur > 0.0 { tx as f64 * 8.0 / dur / 1e6 } else { 0.0 },
|
||||
avg_rx_mbps: if dur > 0.0 { rx as f64 * 8.0 / dur / 1e6 } else { 0.0 },
|
||||
})
|
||||
})?.filter_map(Result::ok).collect();
|
||||
|
||||
let export = ExportJson {
|
||||
ip: ip.clone(),
|
||||
exported_at: {
|
||||
// Simple UTC timestamp without chrono
|
||||
use std::time::{SystemTime, UNIX_EPOCH};
|
||||
let secs = SystemTime::now().duration_since(UNIX_EPOCH).unwrap_or_default().as_secs();
|
||||
format!("{}", secs) // Unix timestamp — universally parseable
|
||||
},
|
||||
stats,
|
||||
quota,
|
||||
sessions,
|
||||
};
|
||||
|
||||
let json_string = serde_json::to_string_pretty(&export)
|
||||
.map_err(|e| anyhow::anyhow!("json serialize: {}", e))?;
|
||||
|
||||
Ok((
|
||||
StatusCode::OK,
|
||||
[
|
||||
(axum::http::header::CONTENT_TYPE, "application/json".to_string()),
|
||||
(axum::http::header::CONTENT_DISPOSITION,
|
||||
format!("attachment; filename=\"btest-{}.json\"", ip)),
|
||||
],
|
||||
json_string,
|
||||
))
|
||||
}
|
||||
|
||||
/// `GET /api/session/{id}/intervals` -- return per-second throughput data
|
||||
/// for a session.
|
||||
///
|
||||
/// If the `session_intervals` table does not exist or contains no rows for
|
||||
/// the requested session, an empty JSON array is returned.
|
||||
async fn api_intervals(
|
||||
State(state): State<Arc<WebState>>,
|
||||
Path(id): Path<i64>,
|
||||
) -> Result<axum::Json<Vec<IntervalJson>>, AppError> {
|
||||
let intervals = {
|
||||
let conn = state
|
||||
.query_conn
|
||||
.lock()
|
||||
.map_err(|e| anyhow::anyhow!("lock: {}", e))?;
|
||||
|
||||
// Guard against the table not existing (e.g. first run before
|
||||
// `ensure_web_tables` was ever called on this database file).
|
||||
let table_exists: bool = conn
|
||||
.query_row(
|
||||
"SELECT COUNT(*) FROM sqlite_master \
|
||||
WHERE type = 'table' AND name = 'session_intervals'",
|
||||
[],
|
||||
|row| row.get::<_, i64>(0),
|
||||
)
|
||||
.map(|c| c > 0)
|
||||
.unwrap_or(false);
|
||||
|
||||
if !table_exists {
|
||||
Vec::new()
|
||||
} else {
|
||||
let mut stmt = conn.prepare(
|
||||
"SELECT second, tx_bytes, rx_bytes
|
||||
FROM session_intervals
|
||||
WHERE session_id = ?1
|
||||
ORDER BY second ASC",
|
||||
)?;
|
||||
let rows = stmt.query_map(params![id], |row| {
|
||||
Ok(IntervalJson {
|
||||
second: row.get(0)?,
|
||||
tx_bytes: row.get(1)?,
|
||||
rx_bytes: row.get(2)?,
|
||||
})
|
||||
})?;
|
||||
rows.filter_map(Result::ok).collect::<Vec<_>>()
|
||||
}
|
||||
};
|
||||
|
||||
Ok(axum::Json(intervals))
|
||||
}
|
||||
387
src/server_pro/web/templates/dashboard.html
Normal file
387
src/server_pro/web/templates/dashboard.html
Normal file
@@ -0,0 +1,387 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1">
|
||||
<title>Dashboard — {{ ip }} — btest-rs</title>
|
||||
<style>
|
||||
* { margin: 0; padding: 0; box-sizing: border-box; }
|
||||
body {
|
||||
font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Helvetica, Arial, sans-serif;
|
||||
background: #0f1117;
|
||||
color: #e1e4e8;
|
||||
min-height: 100vh;
|
||||
padding: 1.5rem;
|
||||
}
|
||||
a { color: #58a6ff; text-decoration: none; }
|
||||
a:hover { text-decoration: underline; }
|
||||
|
||||
.header {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
gap: 1rem;
|
||||
margin-bottom: 1.5rem;
|
||||
flex-wrap: wrap;
|
||||
}
|
||||
.header h1 { font-size: 1.5rem; color: #58a6ff; }
|
||||
.header .ip-label {
|
||||
font-size: 1.1rem;
|
||||
color: #8b949e;
|
||||
font-family: monospace;
|
||||
}
|
||||
.header .home-link { margin-left: auto; }
|
||||
|
||||
/* Stats cards */
|
||||
.stats {
|
||||
display: grid;
|
||||
grid-template-columns: repeat(auto-fit, minmax(160px, 1fr));
|
||||
gap: 1rem;
|
||||
margin-bottom: 1.5rem;
|
||||
}
|
||||
.stat-card {
|
||||
background: #161b22;
|
||||
border: 1px solid #30363d;
|
||||
border-radius: 8px;
|
||||
padding: 1rem;
|
||||
}
|
||||
.stat-card .label {
|
||||
color: #8b949e;
|
||||
font-size: 0.8rem;
|
||||
text-transform: uppercase;
|
||||
letter-spacing: 0.05em;
|
||||
}
|
||||
.stat-card .value {
|
||||
font-size: 1.4rem;
|
||||
font-weight: 600;
|
||||
margin-top: 0.25rem;
|
||||
}
|
||||
|
||||
/* Table */
|
||||
.table-wrap {
|
||||
overflow-x: auto;
|
||||
margin-bottom: 1.5rem;
|
||||
}
|
||||
table {
|
||||
width: 100%;
|
||||
border-collapse: collapse;
|
||||
background: #161b22;
|
||||
border-radius: 8px;
|
||||
overflow: hidden;
|
||||
}
|
||||
th, td {
|
||||
padding: 0.6rem 1rem;
|
||||
text-align: left;
|
||||
border-bottom: 1px solid #21262d;
|
||||
white-space: nowrap;
|
||||
}
|
||||
th {
|
||||
background: #0d1117;
|
||||
color: #8b949e;
|
||||
font-size: 0.8rem;
|
||||
text-transform: uppercase;
|
||||
letter-spacing: 0.04em;
|
||||
}
|
||||
tr { cursor: pointer; }
|
||||
tr:hover td { background: #1c2128; }
|
||||
tr.selected td { background: #1f3a5f; }
|
||||
|
||||
.proto-tcp { color: #3fb950; }
|
||||
.proto-udp { color: #d29922; }
|
||||
.dir-tx { color: #f78166; }
|
||||
.dir-rx { color: #58a6ff; }
|
||||
.dir-both { color: #bc8cff; }
|
||||
|
||||
/* Chart area */
|
||||
.chart-section {
|
||||
background: #161b22;
|
||||
border: 1px solid #30363d;
|
||||
border-radius: 8px;
|
||||
padding: 1.5rem;
|
||||
margin-bottom: 1.5rem;
|
||||
}
|
||||
.chart-section h2 {
|
||||
font-size: 1rem;
|
||||
color: #8b949e;
|
||||
margin-bottom: 1rem;
|
||||
}
|
||||
.chart-container {
|
||||
position: relative;
|
||||
width: 100%;
|
||||
max-height: 360px;
|
||||
}
|
||||
.chart-placeholder {
|
||||
text-align: center;
|
||||
color: #484f58;
|
||||
padding: 3rem 0;
|
||||
}
|
||||
|
||||
.footer {
|
||||
text-align: center;
|
||||
color: #484f58;
|
||||
font-size: 0.8rem;
|
||||
margin-top: 2rem;
|
||||
}
|
||||
.no-data {
|
||||
text-align: center;
|
||||
padding: 3rem;
|
||||
color: #484f58;
|
||||
}
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
|
||||
<div class="header">
|
||||
<h1>btest-rs</h1>
|
||||
<span class="ip-label">{{ ip }}</span>
|
||||
<span class="home-link"><a href="/">Home</a></span>
|
||||
</div>
|
||||
|
||||
<!-- Stats summary (filled via API) -->
|
||||
<div class="stats" id="stats-grid">
|
||||
<div class="stat-card">
|
||||
<div class="label">Total Tests</div>
|
||||
<div class="value" id="stat-total-tests">—</div>
|
||||
</div>
|
||||
<div class="stat-card">
|
||||
<div class="label">Total TX</div>
|
||||
<div class="value" id="stat-total-tx">—</div>
|
||||
</div>
|
||||
<div class="stat-card">
|
||||
<div class="label">Total RX</div>
|
||||
<div class="value" id="stat-total-rx">—</div>
|
||||
</div>
|
||||
<div class="stat-card">
|
||||
<div class="label">Avg TX Mbps</div>
|
||||
<div class="value" id="stat-avg-tx">—</div>
|
||||
</div>
|
||||
<div class="stat-card">
|
||||
<div class="label">Avg RX Mbps</div>
|
||||
<div class="value" id="stat-avg-rx">—</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Chart for selected session -->
|
||||
<div class="chart-section">
|
||||
<h2 id="chart-title">Select a test below to view its throughput chart</h2>
|
||||
<div class="chart-container">
|
||||
<canvas id="throughput-chart"></canvas>
|
||||
<div class="chart-placeholder" id="chart-placeholder">Click a row in the table to load the throughput graph for that session.</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Sessions table -->
|
||||
<div class="table-wrap">
|
||||
<table>
|
||||
<thead>
|
||||
<tr>
|
||||
<th>#</th>
|
||||
<th>Date</th>
|
||||
<th>Protocol</th>
|
||||
<th>Direction</th>
|
||||
<th>TX Bytes</th>
|
||||
<th>RX Bytes</th>
|
||||
<th>Duration</th>
|
||||
<th>Avg TX Mbps</th>
|
||||
<th>Avg RX Mbps</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody id="sessions-body">
|
||||
<tr><td colspan="9" class="no-data">Loading sessions...</td></tr>
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
|
||||
<div class="footer">Powered by btest-rs</div>
|
||||
|
||||
<script src="https://cdn.jsdelivr.net/npm/chart.js"></script>
|
||||
<script>
|
||||
var currentIp = "{{ ip }}";
|
||||
var throughputChart = null;
|
||||
|
||||
function formatBytes(b) {
|
||||
if (b === 0) return '0 B';
|
||||
var units = ['B', 'KB', 'MB', 'GB', 'TB'];
|
||||
var i = Math.floor(Math.log(b) / Math.log(1024));
|
||||
if (i >= units.length) i = units.length - 1;
|
||||
return (b / Math.pow(1024, i)).toFixed(1) + ' ' + units[i];
|
||||
}
|
||||
|
||||
function formatMbps(bytesPerSec) {
|
||||
return (bytesPerSec * 8 / 1e6).toFixed(2);
|
||||
}
|
||||
|
||||
function durationStr(startedAt, endedAt) {
|
||||
if (!startedAt || !endedAt) return '--';
|
||||
var ms = new Date(endedAt) - new Date(startedAt);
|
||||
if (ms < 0) return '--';
|
||||
var s = Math.round(ms / 1000);
|
||||
if (s < 60) return s + 's';
|
||||
return Math.floor(s / 60) + 'm ' + (s % 60) + 's';
|
||||
}
|
||||
|
||||
function durationSec(startedAt, endedAt) {
|
||||
if (!startedAt || !endedAt) return 0;
|
||||
var ms = new Date(endedAt) - new Date(startedAt);
|
||||
return Math.max(ms / 1000, 0.001);
|
||||
}
|
||||
|
||||
// Load summary stats
|
||||
fetch('/api/ip/' + encodeURIComponent(currentIp) + '/stats')
|
||||
.then(function(r) { return r.json(); })
|
||||
.then(function(data) {
|
||||
document.getElementById('stat-total-tests').textContent = data.total_sessions || 0;
|
||||
document.getElementById('stat-total-tx').textContent = formatBytes(data.total_tx_bytes || 0);
|
||||
document.getElementById('stat-total-rx').textContent = formatBytes(data.total_rx_bytes || 0);
|
||||
document.getElementById('stat-avg-tx').textContent = data.avg_tx_mbps ? data.avg_tx_mbps.toFixed(2) : '0.00';
|
||||
document.getElementById('stat-avg-rx').textContent = data.avg_rx_mbps ? data.avg_rx_mbps.toFixed(2) : '0.00';
|
||||
})
|
||||
.catch(function() {});
|
||||
|
||||
// Load sessions list
|
||||
fetch('/api/ip/' + encodeURIComponent(currentIp) + '/sessions')
|
||||
.then(function(r) { return r.json(); })
|
||||
.then(function(sessions) {
|
||||
var tbody = document.getElementById('sessions-body');
|
||||
if (!sessions || sessions.length === 0) {
|
||||
tbody.innerHTML = '<tr><td colspan="9" class="no-data">No test sessions found for this IP.</td></tr>';
|
||||
return;
|
||||
}
|
||||
tbody.innerHTML = '';
|
||||
sessions.forEach(function(s, i) {
|
||||
var tr = document.createElement('tr');
|
||||
tr.dataset.sessionId = s.id;
|
||||
tr.onclick = function() { selectSession(s.id, tr); };
|
||||
|
||||
var dur = durationSec(s.started_at, s.ended_at);
|
||||
var avgTx = dur > 0 ? formatMbps(s.tx_bytes / dur) : '0.00';
|
||||
var avgRx = dur > 0 ? formatMbps(s.rx_bytes / dur) : '0.00';
|
||||
var proto = (s.protocol || 'TCP').toUpperCase();
|
||||
var dir = (s.direction || 'BOTH').toUpperCase();
|
||||
var protoClass = proto === 'UDP' ? 'proto-udp' : 'proto-tcp';
|
||||
var dirClass = dir === 'TX' ? 'dir-tx' : dir === 'RX' ? 'dir-rx' : 'dir-both';
|
||||
|
||||
tr.innerHTML =
|
||||
'<td>' + (i + 1) + '</td>' +
|
||||
'<td>' + (s.started_at || '--') + '</td>' +
|
||||
'<td class="' + protoClass + '">' + proto + '</td>' +
|
||||
'<td class="' + dirClass + '">' + dir + '</td>' +
|
||||
'<td>' + formatBytes(s.tx_bytes || 0) + '</td>' +
|
||||
'<td>' + formatBytes(s.rx_bytes || 0) + '</td>' +
|
||||
'<td>' + durationStr(s.started_at, s.ended_at) + '</td>' +
|
||||
'<td>' + avgTx + '</td>' +
|
||||
'<td>' + avgRx + '</td>';
|
||||
tbody.appendChild(tr);
|
||||
});
|
||||
|
||||
// Auto-select the first (most recent) session
|
||||
if (sessions.length > 0) {
|
||||
var firstRow = tbody.querySelector('tr');
|
||||
if (firstRow) selectSession(sessions[0].id, firstRow);
|
||||
}
|
||||
})
|
||||
.catch(function() {
|
||||
document.getElementById('sessions-body').innerHTML =
|
||||
'<tr><td colspan="9" class="no-data">Failed to load sessions.</td></tr>';
|
||||
});
|
||||
|
||||
function selectSession(sessionId, rowEl) {
|
||||
// Highlight selected row
|
||||
var rows = document.querySelectorAll('#sessions-body tr');
|
||||
rows.forEach(function(r) { r.classList.remove('selected'); });
|
||||
rowEl.classList.add('selected');
|
||||
|
||||
document.getElementById('chart-title').textContent = 'Throughput for session #' + sessionId;
|
||||
document.getElementById('chart-placeholder').style.display = 'none';
|
||||
|
||||
fetch('/api/session/' + sessionId + '/intervals')
|
||||
.then(function(r) { return r.json(); })
|
||||
.then(function(intervals) {
|
||||
renderChart(intervals);
|
||||
})
|
||||
.catch(function() {
|
||||
document.getElementById('chart-placeholder').style.display = 'block';
|
||||
document.getElementById('chart-placeholder').textContent = 'Failed to load interval data.';
|
||||
});
|
||||
}
|
||||
|
||||
function renderChart(intervals) {
|
||||
var canvas = document.getElementById('throughput-chart');
|
||||
if (throughputChart) {
|
||||
throughputChart.destroy();
|
||||
}
|
||||
|
||||
if (!intervals || intervals.length === 0) {
|
||||
document.getElementById('chart-placeholder').style.display = 'block';
|
||||
document.getElementById('chart-placeholder').textContent = 'No interval data available for this session.';
|
||||
return;
|
||||
}
|
||||
|
||||
var labels = intervals.map(function(d) { return d.second + 's'; });
|
||||
var txData = intervals.map(function(d) { return (d.tx_bytes * 8 / 1e6).toFixed(2); });
|
||||
var rxData = intervals.map(function(d) { return (d.rx_bytes * 8 / 1e6).toFixed(2); });
|
||||
|
||||
throughputChart = new Chart(canvas, {
|
||||
type: 'line',
|
||||
data: {
|
||||
labels: labels,
|
||||
datasets: [
|
||||
{
|
||||
label: 'TX Mbps',
|
||||
data: txData,
|
||||
borderColor: '#f78166',
|
||||
backgroundColor: 'rgba(247, 129, 102, 0.1)',
|
||||
borderWidth: 2,
|
||||
fill: true,
|
||||
tension: 0.3,
|
||||
pointRadius: 1
|
||||
},
|
||||
{
|
||||
label: 'RX Mbps',
|
||||
data: rxData,
|
||||
borderColor: '#58a6ff',
|
||||
backgroundColor: 'rgba(88, 166, 255, 0.1)',
|
||||
borderWidth: 2,
|
||||
fill: true,
|
||||
tension: 0.3,
|
||||
pointRadius: 1
|
||||
}
|
||||
]
|
||||
},
|
||||
options: {
|
||||
responsive: true,
|
||||
maintainAspectRatio: false,
|
||||
interaction: {
|
||||
intersect: false,
|
||||
mode: 'index'
|
||||
},
|
||||
scales: {
|
||||
x: {
|
||||
title: { display: true, text: 'Time', color: '#8b949e' },
|
||||
ticks: { color: '#8b949e' },
|
||||
grid: { color: '#21262d' }
|
||||
},
|
||||
y: {
|
||||
title: { display: true, text: 'Mbps', color: '#8b949e' },
|
||||
ticks: { color: '#8b949e' },
|
||||
grid: { color: '#21262d' },
|
||||
beginAtZero: true
|
||||
}
|
||||
},
|
||||
plugins: {
|
||||
legend: {
|
||||
labels: { color: '#e1e4e8' }
|
||||
},
|
||||
tooltip: {
|
||||
backgroundColor: '#161b22',
|
||||
borderColor: '#30363d',
|
||||
borderWidth: 1,
|
||||
titleColor: '#e1e4e8',
|
||||
bodyColor: '#8b949e'
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
</script>
|
||||
</body>
|
||||
</html>
|
||||
160
src/server_pro/web/templates/index.html
Normal file
160
src/server_pro/web/templates/index.html
Normal file
@@ -0,0 +1,160 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1">
|
||||
<title>btest-rs Public Bandwidth Test Server</title>
|
||||
<style>
|
||||
* { margin: 0; padding: 0; box-sizing: border-box; }
|
||||
body {
|
||||
font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Helvetica, Arial, sans-serif;
|
||||
background: #0f1117;
|
||||
color: #e1e4e8;
|
||||
min-height: 100vh;
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
align-items: center;
|
||||
justify-content: center;
|
||||
}
|
||||
.container {
|
||||
max-width: 560px;
|
||||
width: 90%;
|
||||
text-align: center;
|
||||
padding: 2rem;
|
||||
}
|
||||
h1 {
|
||||
font-size: 2rem;
|
||||
margin-bottom: 0.5rem;
|
||||
color: #58a6ff;
|
||||
}
|
||||
.subtitle {
|
||||
color: #8b949e;
|
||||
margin-bottom: 2rem;
|
||||
line-height: 1.5;
|
||||
}
|
||||
.search-box {
|
||||
display: flex;
|
||||
gap: 0.5rem;
|
||||
margin-bottom: 1.5rem;
|
||||
}
|
||||
.search-box input {
|
||||
flex: 1;
|
||||
padding: 0.75rem 1rem;
|
||||
border: 1px solid #30363d;
|
||||
border-radius: 6px;
|
||||
background: #161b22;
|
||||
color: #e1e4e8;
|
||||
font-size: 1rem;
|
||||
outline: none;
|
||||
}
|
||||
.search-box input:focus {
|
||||
border-color: #58a6ff;
|
||||
}
|
||||
.search-box input::placeholder {
|
||||
color: #484f58;
|
||||
}
|
||||
.search-box button {
|
||||
padding: 0.75rem 1.5rem;
|
||||
background: #238636;
|
||||
color: #fff;
|
||||
border: none;
|
||||
border-radius: 6px;
|
||||
font-size: 1rem;
|
||||
cursor: pointer;
|
||||
white-space: nowrap;
|
||||
}
|
||||
.search-box button:hover {
|
||||
background: #2ea043;
|
||||
}
|
||||
.info {
|
||||
background: #161b22;
|
||||
border: 1px solid #30363d;
|
||||
border-radius: 8px;
|
||||
padding: 1.5rem;
|
||||
text-align: left;
|
||||
line-height: 1.6;
|
||||
color: #8b949e;
|
||||
}
|
||||
.info h3 {
|
||||
color: #e1e4e8;
|
||||
margin-bottom: 0.5rem;
|
||||
}
|
||||
.info code {
|
||||
background: #0d1117;
|
||||
padding: 0.15rem 0.4rem;
|
||||
border-radius: 4px;
|
||||
font-size: 0.9em;
|
||||
color: #58a6ff;
|
||||
}
|
||||
.auto-link {
|
||||
margin-top: 1rem;
|
||||
font-size: 0.9rem;
|
||||
}
|
||||
.auto-link a {
|
||||
color: #58a6ff;
|
||||
text-decoration: none;
|
||||
}
|
||||
.auto-link a:hover {
|
||||
text-decoration: underline;
|
||||
}
|
||||
.footer {
|
||||
margin-top: 2rem;
|
||||
color: #484f58;
|
||||
font-size: 0.8rem;
|
||||
}
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<div class="container">
|
||||
<h1>btest-rs</h1>
|
||||
<p class="subtitle">Public MikroTik Bandwidth Test Server — view your test results and history.</p>
|
||||
|
||||
<form class="search-box" id="ip-form" onsubmit="return goToDashboard()">
|
||||
<input type="text" id="ip-input" placeholder="Enter your IP address (e.g. 203.0.113.5)" autocomplete="off">
|
||||
<button type="submit">View Results</button>
|
||||
</form>
|
||||
|
||||
<div class="auto-link" id="auto-detect">
|
||||
Detecting your IP...
|
||||
</div>
|
||||
|
||||
<div class="info">
|
||||
<h3>How it works</h3>
|
||||
<p>
|
||||
Run a bandwidth test from your MikroTik router targeting this server.
|
||||
After the test completes, enter your public IP above to see
|
||||
throughput charts, session history, and aggregate statistics.
|
||||
</p>
|
||||
<p style="margin-top: 0.5rem;">
|
||||
Example: <code>/tool bandwidth-test address=this-server protocol=tcp direction=both</code>
|
||||
</p>
|
||||
</div>
|
||||
|
||||
<div class="footer">Powered by btest-rs</div>
|
||||
</div>
|
||||
|
||||
<script>
|
||||
function goToDashboard() {
|
||||
var ip = document.getElementById('ip-input').value.trim();
|
||||
if (ip) {
|
||||
window.location.href = '/dashboard/' + encodeURIComponent(ip);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
// Auto-detect visitor IP and offer a direct link
|
||||
fetch('https://api.ipify.org?format=json')
|
||||
.then(function(r) { return r.json(); })
|
||||
.then(function(data) {
|
||||
if (data.ip) {
|
||||
document.getElementById('ip-input').value = data.ip;
|
||||
document.getElementById('auto-detect').innerHTML =
|
||||
'Detected IP: <a href="/dashboard/' + encodeURIComponent(data.ip) + '">' + data.ip + '</a> — click to view your dashboard';
|
||||
}
|
||||
})
|
||||
.catch(function() {
|
||||
document.getElementById('auto-detect').textContent = '';
|
||||
});
|
||||
</script>
|
||||
</body>
|
||||
</html>
|
||||
Reference in New Issue
Block a user