diff --git a/src/config.rs b/src/config.rs index 5fe24ed..5e5d65d 100644 --- a/src/config.rs +++ b/src/config.rs @@ -96,6 +96,11 @@ pub struct Config { // Cluster configuration (Issue #45) pub cluster: ClusterConfig, + + // Connection pool overrides from YAML (Issue #114). + // When Some, these override env-var defaults when building the HTTP client. + pub pool_max_idle_per_host: Option, + pub pool_idle_timeout_secs: Option, } /// Helper to get a required environment variable. @@ -230,6 +235,11 @@ impl Config { let auto_disable_percentiles_on_warning = env_bool("AUTO_DISABLE_PERCENTILES_ON_WARNING", true); + let (pool_max_idle_per_host, pool_idle_timeout_secs) = match &yaml_config.config.pool { + Some(p) => (p.max_idle_per_host, p.idle_timeout_secs), + None => (None, None), + }; + let config = Config { target_url, request_type, @@ -251,6 +261,8 @@ impl Config { memory_critical_threshold_percent, auto_disable_percentiles_on_warning, cluster: ClusterConfig::from_env(), + pool_max_idle_per_host, + pool_idle_timeout_secs, }; config.validate()?; @@ -318,6 +330,11 @@ impl Config { let auto_disable_percentiles_on_warning = env_bool("AUTO_DISABLE_PERCENTILES_ON_WARNING", true); + let (pool_max_idle_per_host, pool_idle_timeout_secs) = match &yaml_config.config.pool { + Some(p) => (p.max_idle_per_host, p.idle_timeout_secs), + None => (None, None), + }; + let config = Config { target_url, request_type, @@ -339,6 +356,8 @@ impl Config { memory_critical_threshold_percent, auto_disable_percentiles_on_warning, cluster: ClusterConfig::from_env(), + pool_max_idle_per_host, + pool_idle_timeout_secs, }; config.validate()?; @@ -504,6 +523,8 @@ impl Config { memory_critical_threshold_percent, auto_disable_percentiles_on_warning, cluster: ClusterConfig::from_env(), + pool_max_idle_per_host: None, + pool_idle_timeout_secs: None, }; config.validate()?; @@ -707,18 +728,27 @@ impl Config { memory_critical_threshold_percent: 90.0, auto_disable_percentiles_on_warning: true, cluster: ClusterConfig::for_testing(), + pool_max_idle_per_host: None, + pool_idle_timeout_secs: None, } } /// Creates a ClientConfig from this Config. pub fn to_client_config(&self) -> ClientConfig { + let mut pool = crate::connection_pool::PoolConfig::from_env(); + if let Some(v) = self.pool_max_idle_per_host { + pool.max_idle_per_host = v; + } + if let Some(v) = self.pool_idle_timeout_secs { + pool.idle_timeout = Duration::from_secs(v); + } ClientConfig { skip_tls_verify: self.skip_tls_verify, resolve_target_addr: self.resolve_target_addr.clone(), client_cert_path: self.client_cert_path.clone(), client_key_path: self.client_key_path.clone(), custom_headers: self.custom_headers.clone(), - pool_config: Some(crate::connection_pool::PoolConfig::from_env()), + pool_config: Some(pool), cookie_store: false, } } diff --git a/src/connection_pool.rs b/src/connection_pool.rs index dc105d5..99c1452 100644 --- a/src/connection_pool.rs +++ b/src/connection_pool.rs @@ -8,6 +8,11 @@ use std::sync::{Arc, Mutex}; use std::time::{Duration, Instant}; use tracing::debug; +use crate::metrics::{ + CONNECTION_POOL_LIKELY_NEW, CONNECTION_POOL_LIKELY_REUSED, CONNECTION_POOL_REQUESTS_TOTAL, + CONNECTION_POOL_REUSE_RATE, +}; + /// Connection pool configuration. #[derive(Debug, Clone)] pub struct PoolConfig { @@ -212,6 +217,7 @@ impl PoolStatsTracker { let mut stats = self.stats.lock().unwrap(); stats.total_requests += 1; + CONNECTION_POOL_REQUESTS_TOTAL.inc(); // Track timing if stats.first_request.is_none() { @@ -224,6 +230,7 @@ impl PoolStatsTracker { // Slow requests likely established new connections (TLS handshake adds ~50-100ms) if latency_ms >= self.new_connection_threshold_ms { stats.likely_new_connections += 1; + CONNECTION_POOL_LIKELY_NEW.inc(); debug!( latency_ms = latency_ms, threshold = self.new_connection_threshold_ms, @@ -231,12 +238,17 @@ impl PoolStatsTracker { ); } else { stats.likely_reused_connections += 1; + CONNECTION_POOL_LIKELY_REUSED.inc(); debug!( latency_ms = latency_ms, threshold = self.new_connection_threshold_ms, "Request latency suggests reused connection" ); } + + // Update reuse rate gauge + let reuse_rate = stats.reuse_rate(); + CONNECTION_POOL_REUSE_RATE.set(reuse_rate); } /// Get current connection statistics. diff --git a/src/executor.rs b/src/executor.rs index 9893161..ae480ff 100644 --- a/src/executor.rs +++ b/src/executor.rs @@ -5,6 +5,7 @@ //! and metrics tracking. use crate::assertions; +use crate::connection_pool::GLOBAL_POOL_STATS; use crate::extractor; use crate::metrics::{ CONCURRENT_SCENARIOS, SCENARIO_ASSERTIONS_TOTAL, SCENARIO_DURATION_SECONDS, @@ -347,6 +348,7 @@ impl ScenarioExecutor { let response_result = request_builder.send().await; let response_time_ms = step_start.elapsed().as_millis() as u64; + GLOBAL_POOL_STATS.record_request(response_time_ms); match response_result { Ok(response) => { diff --git a/src/yaml_config.rs b/src/yaml_config.rs index 7e3df32..1e72c1a 100644 --- a/src/yaml_config.rs +++ b/src/yaml_config.rs @@ -99,6 +99,25 @@ pub struct YamlGlobalConfig { /// Equivalent to the RESOLVE_TARGET_ADDR env var; env var takes precedence. #[serde(rename = "resolveTargetAddr")] pub resolve_target_addr: Option, + + /// Connection pool settings. When omitted the pool uses env-var defaults + /// (`POOL_MAX_IDLE_PER_HOST`, `POOL_IDLE_TIMEOUT_SECS`). + #[serde(default)] + pub pool: Option, +} + +/// Connection pool tuning exposed via YAML. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct YamlPoolConfig { + /// Maximum idle connections kept per host (default: 32). + /// Set to 0 to force a new connection for every request. + #[serde(rename = "maxIdlePerHost")] + pub max_idle_per_host: Option, + + /// Seconds an idle connection stays in the pool before cleanup (default: 30). + /// Set to 0 to immediately close connections after each request. + #[serde(rename = "idleTimeoutSecs")] + pub idle_timeout_secs: Option, } fn default_timeout() -> YamlDuration { @@ -723,6 +742,7 @@ impl Default for YamlConfig { skip_tls_verify: false, custom_headers: None, resolve_target_addr: None, + pool: None, }, load: YamlLoadModel::Concurrent, scenarios: vec![],