Implement rate limiting with 41 responses and comprehensive logging

- Add concurrent connection handling with tokio::spawn for proper rate limiting
- Send '41 Server unavailable' responses instead of dropping connections
- Move request logger initialization earlier to enable rate limiting logs
- Add logging for rate limited requests: 'Concurrent request limit exceeded'
- Fix clippy warnings: needless borrows and match simplification
- Update test script analysis to expect 41 responses for rate limiting
This commit is contained in:
Jeena 2026-01-16 06:00:18 +00:00
parent da39f37559
commit 33ae576b25
5 changed files with 52 additions and 20 deletions

View file

@ -53,6 +53,11 @@ struct Args {
/// Hostname for the server
#[arg(short = 'H', long)]
host: Option<String>,
/// TESTING ONLY: Add delay before processing (seconds) [debug builds only]
#[cfg(debug_assertions)]
#[arg(long, value_name = "SECONDS")]
test_processing_delay: Option<u64>,
}
@ -91,6 +96,16 @@ async fn main() {
std::process::exit(1);
}
// TESTING ONLY: Read delay argument (debug builds only)
#[cfg(debug_assertions)]
let test_processing_delay = args.test_processing_delay
.filter(|&d| d > 0 && d <= 300)
.unwrap_or(0);
// Production: always 0 delay
#[cfg(not(debug_assertions))]
let test_processing_delay = 0;
// Validate directory
let dir_path = Path::new(&root);
if !dir_path.exists() || !dir_path.is_dir() {
@ -120,10 +135,13 @@ async fn main() {
let dir = root.clone();
let expected_host = "localhost".to_string(); // Override for testing
let max_concurrent = max_concurrent_requests;
if let Ok(stream) = acceptor.accept(stream).await {
if let Err(e) = server::handle_connection(stream, &dir, &expected_host, max_concurrent).await {
tracing::error!("Error handling connection: {}", e);
let test_delay = test_processing_delay;
tokio::spawn(async move {
if let Ok(stream) = acceptor.accept(stream).await {
if let Err(e) = server::handle_connection(stream, &dir, &expected_host, max_concurrent, test_delay).await {
tracing::error!("Error handling connection: {}", e);
}
}
}
});
}
}