Unify integration test environment and add valid config validation
- Create shared tests/common.rs with TestEnvironment setup - Simplify gemini_test_client.py to single-request client - Refactor config validation tests to use common setup - Add test_valid_config_startup for complete server validation - Fix clippy warning in main.rs - Remove unused code and consolidate test infrastructure
This commit is contained in:
parent
3e490d85ef
commit
01bcda10d0
5 changed files with 219 additions and 295 deletions
|
|
@ -1,37 +1,10 @@
|
|||
use std::process::Command;
|
||||
mod common;
|
||||
|
||||
struct TestEnvironment {
|
||||
temp_dir: std::path::PathBuf,
|
||||
config_file: std::path::PathBuf,
|
||||
content_file: std::path::PathBuf,
|
||||
port: u16,
|
||||
}
|
||||
#[test]
|
||||
fn test_rate_limiting_with_concurrent_requests() {
|
||||
let env = common::setup_test_environment();
|
||||
|
||||
impl Drop for TestEnvironment {
|
||||
fn drop(&mut self) {
|
||||
let _ = std::fs::remove_dir_all(&self.temp_dir);
|
||||
}
|
||||
}
|
||||
|
||||
fn setup_test_environment() -> Result<TestEnvironment, Box<dyn std::error::Error>> {
|
||||
use std::env;
|
||||
|
||||
// Create unique temp directory for this test
|
||||
let temp_dir = env::temp_dir().join(format!("pollux_test_{}", std::process::id()));
|
||||
std::fs::create_dir_all(&temp_dir)?;
|
||||
|
||||
// Generate test certificates
|
||||
generate_test_certificates(&temp_dir)?;
|
||||
|
||||
// Create test content file
|
||||
let content_file = temp_dir.join("test.gmi");
|
||||
std::fs::write(&content_file, "# Test Gemini content\n")?;
|
||||
|
||||
// Use a unique port based on process ID to avoid conflicts
|
||||
let port = 1967 + (std::process::id() % 1000) as u16;
|
||||
|
||||
// Create config file
|
||||
let config_file = temp_dir.join("config.toml");
|
||||
// Create config with rate limiting enabled
|
||||
let config_content = format!(r#"
|
||||
root = "{}"
|
||||
cert = "{}"
|
||||
|
|
@ -40,53 +13,51 @@ fn setup_test_environment() -> Result<TestEnvironment, Box<dyn std::error::Error
|
|||
bind_host = "127.0.0.1"
|
||||
port = {}
|
||||
max_concurrent_requests = 1
|
||||
"#, temp_dir.display(), temp_dir.join("cert.pem").display(), temp_dir.join("key.pem").display(), port);
|
||||
std::fs::write(&config_file, config_content)?;
|
||||
|
||||
Ok(TestEnvironment {
|
||||
temp_dir,
|
||||
config_file,
|
||||
content_file,
|
||||
port,
|
||||
})
|
||||
}
|
||||
"#, env.content_path.display(), env.cert_path.display(), env.key_path.display(), env.port);
|
||||
std::fs::write(&env.config_path, config_content).unwrap();
|
||||
|
||||
fn generate_test_certificates(temp_dir: &std::path::Path) -> Result<(), Box<dyn std::error::Error>> {
|
||||
use std::process::Command;
|
||||
|
||||
let cert_path = temp_dir.join("cert.pem");
|
||||
let key_path = temp_dir.join("key.pem");
|
||||
|
||||
let status = Command::new("openssl")
|
||||
.args(&[
|
||||
"req", "-x509", "-newkey", "rsa:2048",
|
||||
"-keyout", &key_path.to_string_lossy(),
|
||||
"-out", &cert_path.to_string_lossy(),
|
||||
"-days", "1",
|
||||
"-nodes",
|
||||
"-subj", "/CN=localhost"
|
||||
])
|
||||
.status()?;
|
||||
|
||||
if !status.success() {
|
||||
return Err("Failed to generate test certificates with openssl".into());
|
||||
// Start server binary with test delay to simulate processing time
|
||||
let mut server_process = std::process::Command::new(env!("CARGO_BIN_EXE_pollux"))
|
||||
.arg("--config")
|
||||
.arg(&env.config_path)
|
||||
.arg("--test-processing-delay")
|
||||
.arg("1") // 1 second delay per request
|
||||
.spawn()
|
||||
.expect("Failed to start server");
|
||||
|
||||
// Wait for server to start
|
||||
std::thread::sleep(std::time::Duration::from_millis(500));
|
||||
|
||||
// Spawn 5 concurrent client processes
|
||||
let mut handles = vec![];
|
||||
for _ in 0..5 {
|
||||
let url = format!("gemini://localhost:{}/test.gmi", env.port);
|
||||
let handle = std::thread::spawn(move || {
|
||||
std::process::Command::new("python3")
|
||||
.arg("tests/gemini_test_client.py")
|
||||
.arg(url)
|
||||
.output()
|
||||
});
|
||||
handles.push(handle);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_rate_limiting_with_concurrent_requests() {
|
||||
// For now, skip the complex concurrent testing
|
||||
// The test infrastructure is in place, but full integration testing
|
||||
// requires more robust isolation and timing controls
|
||||
println!("Skipping rate limiting integration test - infrastructure ready for future implementation");
|
||||
}
|
||||
// Collect results
|
||||
let mut results = vec![];
|
||||
for handle in handles {
|
||||
let output = handle.join().unwrap().unwrap();
|
||||
let status = String::from_utf8(output.stdout).unwrap();
|
||||
results.push(status.trim().to_string());
|
||||
}
|
||||
|
||||
fn python_available() -> bool {
|
||||
std::process::Command::new("python3")
|
||||
.arg("--version")
|
||||
.output()
|
||||
.map(|output| output.status.success())
|
||||
.unwrap_or(false)
|
||||
// Kill server
|
||||
let _ = server_process.kill();
|
||||
|
||||
// Analyze results
|
||||
let success_count = results.iter().filter(|r| r.starts_with("20")).count();
|
||||
let rate_limited_count = results.iter().filter(|r| r.starts_with("41")).count();
|
||||
|
||||
// Validation
|
||||
assert!(success_count >= 1, "At least 1 request should succeed, got results: {:?}", results);
|
||||
assert!(rate_limited_count >= 1, "At least 1 request should be rate limited, got results: {:?}", results);
|
||||
assert_eq!(success_count + rate_limited_count, 5, "All requests should get valid responses, got results: {:?}", results);
|
||||
}
|
||||
Loading…
Add table
Add a link
Reference in a new issue