Implement rate limiting with 41 responses and comprehensive logging
- Add concurrent connection handling with tokio::spawn for proper rate limiting - Send '41 Server unavailable' responses instead of dropping connections - Move request logger initialization earlier to enable rate limiting logs - Add logging for rate limited requests: 'Concurrent request limit exceeded' - Fix clippy warnings: needless borrows and match simplification - Update test script analysis to expect 41 responses for rate limiting
This commit is contained in:
parent
da39f37559
commit
33ae576b25
5 changed files with 52 additions and 20 deletions
|
|
@ -16,8 +16,8 @@ pub async fn serve_file(
|
|||
file_path: &Path,
|
||||
) -> io::Result<()> {
|
||||
if file_path.exists() && file_path.is_file() {
|
||||
let mime_type = get_mime_type(&file_path);
|
||||
let content = fs::read(&file_path)?;
|
||||
let mime_type = get_mime_type(file_path);
|
||||
let content = fs::read(file_path)?;
|
||||
let mut response = format!("20 {}\r\n", mime_type).into_bytes();
|
||||
response.extend(content);
|
||||
stream.write_all(&response).await?;
|
||||
|
|
@ -33,6 +33,7 @@ pub async fn handle_connection(
|
|||
dir: &str,
|
||||
expected_host: &str,
|
||||
max_concurrent_requests: usize,
|
||||
test_processing_delay: u64,
|
||||
) -> io::Result<()> {
|
||||
const MAX_REQUEST_SIZE: usize = 4096;
|
||||
const REQUEST_TIMEOUT: Duration = Duration::from_secs(10);
|
||||
|
|
@ -58,9 +59,13 @@ pub async fn handle_connection(
|
|||
// Read successful, continue processing
|
||||
let request = String::from_utf8_lossy(&request_buf).trim().to_string();
|
||||
|
||||
// Initialize logger early for all request types
|
||||
let logger = RequestLogger::new(&stream, request.clone());
|
||||
|
||||
// Check concurrent request limit after TLS handshake and request read
|
||||
let current = ACTIVE_REQUESTS.fetch_add(1, Ordering::Relaxed);
|
||||
if current >= max_concurrent_requests {
|
||||
logger.log_error(41, "Concurrent request limit exceeded");
|
||||
ACTIVE_REQUESTS.fetch_sub(1, Ordering::Relaxed);
|
||||
// Rate limited - send proper 41 response
|
||||
send_response(&mut stream, "41 Server unavailable\r\n").await?;
|
||||
|
|
@ -70,14 +75,12 @@ pub async fn handle_connection(
|
|||
// Process the request
|
||||
// Validate request
|
||||
if request.is_empty() {
|
||||
let logger = RequestLogger::new(&stream, request);
|
||||
logger.log_error(59, "Empty request");
|
||||
ACTIVE_REQUESTS.fetch_sub(1, Ordering::Relaxed);
|
||||
return send_response(&mut stream, "59 Bad Request\r\n").await;
|
||||
}
|
||||
|
||||
if request.len() > 1024 {
|
||||
let logger = RequestLogger::new(&stream, request);
|
||||
logger.log_error(59, "Request too large");
|
||||
ACTIVE_REQUESTS.fetch_sub(1, Ordering::Relaxed);
|
||||
return send_response(&mut stream, "59 Bad Request\r\n").await;
|
||||
|
|
@ -87,15 +90,17 @@ pub async fn handle_connection(
|
|||
let path = match parse_gemini_url(&request, expected_host) {
|
||||
Ok(p) => p,
|
||||
Err(_) => {
|
||||
let logger = RequestLogger::new(&stream, request);
|
||||
logger.log_error(59, "Invalid URL format");
|
||||
ACTIVE_REQUESTS.fetch_sub(1, Ordering::Relaxed);
|
||||
return send_response(&mut stream, "59 Bad Request\r\n").await;
|
||||
}
|
||||
};
|
||||
|
||||
// Initialize logger now that we have the full request URL
|
||||
let logger = RequestLogger::new(&stream, request);
|
||||
// TESTING ONLY: Add delay for rate limiting tests (debug builds only)
|
||||
#[cfg(debug_assertions)]
|
||||
if test_processing_delay > 0 {
|
||||
tokio::time::sleep(tokio::time::Duration::from_secs(test_processing_delay)).await;
|
||||
}
|
||||
|
||||
// Resolve file path with security
|
||||
let file_path = match resolve_file_path(&path, dir) {
|
||||
|
|
@ -109,6 +114,8 @@ pub async fn handle_connection(
|
|||
|
||||
// No delay for normal operation
|
||||
|
||||
// Processing complete
|
||||
|
||||
// Serve the file
|
||||
match serve_file(&mut stream, &file_path).await {
|
||||
Ok(_) => logger.log_success(20),
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue