refactor: extract shared engine helpers, federation clone-before-send, constants
Some checks failed
Mirror to GitHub / mirror (push) Failing after 30s
Build Release Binaries / build-amd64 (push) Failing after 3m48s

Engine deduplication (PRD-engine-dedup.md):
- build_call_config(): shared CallConfig construction (was 23 lines × 2)
- codec_to_profile(): shared CodecId → QualityProfile mapping (was 19 lines × 2)
- run_signal_task(): shared signal handler (was 48 lines × 2)
- Net -39 lines from engine.rs, 6 duplicated blocks → single-line calls

Quick wins from REFACTOR-codebase-audit.md:
- 6 magic number constants extracted (CAPTURE_POLL_MS, RECV_TIMEOUT_MS, etc.)
- DRED_POLL_INTERVAL moved from 2 local defs to 1 module-level const
- federation.rs: forward_to_peers, broadcast_signal, send_signal_to_peer
  now clone peer list and release lock before sending (was holding Mutex
  across async I/O — last lock-during-send pattern eliminated)
- main.rs: close_transport() helper replaces 12 silent .ok() calls with
  debug-level logging

314 tests passing, 0 regressions.

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
Siavash Sameni
2026-04-13 15:22:44 +04:00
parent fdb78e08bd
commit ba12aae439
4 changed files with 317 additions and 204 deletions

View File

@@ -213,16 +213,19 @@ impl FederationManager {
/// `origin_relay_fp` against its own fp and drops self-sourced
/// forwards.
pub async fn broadcast_signal(&self, msg: &wzp_proto::SignalMessage) -> usize {
let links = self.peer_links.lock().await;
let peers: Vec<(String, String, Arc<QuinnTransport>)> = {
let links = self.peer_links.lock().await;
links.iter().map(|(fp, l)| (fp.clone(), l.label.clone(), l.transport.clone())).collect()
}; // lock released
let mut count = 0;
for (fp, link) in links.iter() {
match link.transport.send_signal(msg).await {
for (fp, label, transport) in &peers {
match transport.send_signal(msg).await {
Ok(()) => {
count += 1;
tracing::debug!(peer = %link.label, %fp, "federation: broadcast signal ok");
tracing::debug!(peer = %label, %fp, "federation: broadcast signal ok");
}
Err(e) => {
tracing::warn!(peer = %link.label, %fp, error = %e, "federation: broadcast signal failed");
tracing::warn!(peer = %label, %fp, error = %e, "federation: broadcast signal failed");
}
}
}
@@ -243,10 +246,12 @@ impl FederationManager {
msg: &wzp_proto::SignalMessage,
) -> Result<(), String> {
let normalized = normalize_fp(peer_relay_fp);
let links = self.peer_links.lock().await;
match links.get(&normalized) {
Some(link) => link
.transport
let transport = {
let links = self.peer_links.lock().await;
links.get(&normalized).map(|l| l.transport.clone())
}; // lock released
match transport {
Some(t) => t
.send_signal(msg)
.await
.map_err(|e| format!("send to peer {normalized}: {e}")),
@@ -403,20 +408,22 @@ impl FederationManager {
/// or rate limiting; the body currently forwards on `room_hash` alone
/// because that's what the wire format carries.
pub async fn forward_to_peers(&self, _room_name: &str, room_hash: &[u8; 8], media_data: &Bytes) {
let links = self.peer_links.lock().await;
if links.is_empty() {
return;
}
for (_fp, link) in links.iter() {
let peers: Vec<(String, Arc<QuinnTransport>)> = {
let links = self.peer_links.lock().await;
if links.is_empty() { return; }
links.values().map(|l| (l.label.clone(), l.transport.clone())).collect()
}; // lock released
for (label, transport) in &peers {
let mut tagged = Vec::with_capacity(8 + media_data.len());
tagged.extend_from_slice(room_hash);
tagged.extend_from_slice(media_data);
match link.transport.send_raw_datagram(&tagged) {
match transport.send_raw_datagram(&tagged) {
Ok(()) => {
self.metrics.federation_packets_forwarded
.with_label_values(&[&link.label, "out"]).inc();
.with_label_values(&[label, "out"]).inc();
}
Err(e) => warn!(peer = %link.label, "federation send error: {e}"),
Err(e) => warn!(peer = %label, "federation send error: {e}"),
}
}
}
@@ -483,9 +490,12 @@ async fn run_room_event_dispatcher(
let participants = fm.room_mgr.local_participant_list(&room);
info!(room = %room, count = participants.len(), "global room now active, announcing to peers");
let msg = SignalMessage::GlobalRoomActive { room, participants };
let links = fm.peer_links.lock().await;
for link in links.values() {
let _ = link.transport.send_signal(&msg).await;
let transports: Vec<Arc<QuinnTransport>> = {
let links = fm.peer_links.lock().await;
links.values().map(|l| l.transport.clone()).collect()
};
for t in &transports {
let _ = t.send_signal(&msg).await;
}
}
}
@@ -493,9 +503,12 @@ async fn run_room_event_dispatcher(
if fm.is_global_room(&room) {
info!(room = %room, "global room now inactive, announcing to peers");
let msg = SignalMessage::GlobalRoomInactive { room };
let links = fm.peer_links.lock().await;
for link in links.values() {
let _ = link.transport.send_signal(&msg).await;
let transports: Vec<Arc<QuinnTransport>> = {
let links = fm.peer_links.lock().await;
links.values().map(|l| l.transport.clone()).collect()
};
for t in &transports {
let _ = t.send_signal(&msg).await;
}
}
}

View File

@@ -23,6 +23,13 @@ use wzp_relay::presence::PresenceRegistry;
use wzp_relay::room::{self, RoomManager};
use wzp_relay::session_mgr::SessionManager;
/// Close a transport gracefully, logging any error at debug level.
async fn close_transport(t: &dyn wzp_proto::MediaTransport, context: &str) {
if let Err(e) = t.close().await {
tracing::debug!(context, error = %e, "transport close (non-fatal)");
}
}
/// Parsed CLI result — config + identity path.
struct CliResult {
config: RelayConfig,
@@ -908,7 +915,7 @@ async fn main() -> anyhow::Result<()> {
}
}
}
transport.close().await.ok();
close_transport(&*transport, "cleanup").await;
return;
}
@@ -1475,7 +1482,7 @@ async fn main() -> anyhow::Result<()> {
reg.unregister_local(&client_fp);
}
transport.close().await.ok();
close_transport(&*transport, "cleanup").await;
return;
}
@@ -1499,14 +1506,14 @@ async fn main() -> anyhow::Result<()> {
Err(e) => {
metrics.auth_attempts.with_label_values(&["fail"]).inc();
error!(%addr, "auth failed: {e}");
transport.close().await.ok();
close_transport(&*transport, "cleanup").await;
return;
}
}
}
Ok(Some(_)) => {
error!(%addr, "expected AuthToken as first signal, got something else");
transport.close().await.ok();
close_transport(&*transport, "cleanup").await;
return;
}
Ok(None) => {
@@ -1515,7 +1522,7 @@ async fn main() -> anyhow::Result<()> {
}
Err(e) => {
error!(%addr, "signal recv error during auth: {e}");
transport.close().await.ok();
close_transport(&*transport, "cleanup").await;
return;
}
}
@@ -1537,7 +1544,7 @@ async fn main() -> anyhow::Result<()> {
}
Err(e) => {
error!(%addr, "handshake failed: {e}");
transport.close().await.ok();
close_transport(&*transport, "cleanup").await;
return;
}
};
@@ -1561,7 +1568,7 @@ async fn main() -> anyhow::Result<()> {
};
if !authorized {
warn!(%addr, room = %room_name, fp = %participant_fp, "rejected: not authorized for this call room");
transport.close().await.ok();
close_transport(&*transport, "cleanup").await;
return;
}
info!(%addr, room = %room_name, fp = %participant_fp, "authorized for call room");
@@ -1602,7 +1609,7 @@ async fn main() -> anyhow::Result<()> {
tokio::select! { _ = up => {} _ = dn => {} }
stats_handle.abort();
transport.close().await.ok();
close_transport(&*transport, "cleanup").await;
} else {
// Room mode — enforce max sessions, then join room
let session_id = {
@@ -1611,7 +1618,7 @@ async fn main() -> anyhow::Result<()> {
Ok(id) => id,
Err(e) => {
error!(%addr, room = %room_name, "session rejected: {e}");
transport.close().await.ok();
close_transport(&*transport, "cleanup").await;
return;
}
}
@@ -1626,7 +1633,7 @@ async fn main() -> anyhow::Result<()> {
metrics.active_sessions.dec();
let mut smgr = session_mgr.lock().await;
smgr.remove_session(session_id);
transport.close().await.ok();
close_transport(&*transport, "cleanup").await;
return;
}
}
@@ -1676,7 +1683,7 @@ async fn main() -> anyhow::Result<()> {
metrics.active_sessions.dec();
let mut smgr = session_mgr.lock().await;
smgr.remove_session(session_id);
transport.close().await.ok();
close_transport(&*transport, "cleanup").await;
return;
}
}
@@ -1731,7 +1738,7 @@ async fn main() -> anyhow::Result<()> {
smgr.remove_session(session_id);
}
transport.close().await.ok();
close_transport(&*transport, "cleanup").await;
}
});
}