From 6deac53b3b110518ada14da59978796e5972996e Mon Sep 17 00:00:00 2001 From: Karthik Nadig Date: Mon, 2 Feb 2026 08:34:00 -0800 Subject: [PATCH 1/3] Add performance testing workflow and related configurations --- .github/workflows/perf-tests.yml | 161 ++++++ .vscode/settings.json | 5 +- crates/pet/Cargo.toml | 1 + crates/pet/tests/e2e_performance.rs | 727 ++++++++++++++++++++++++++++ 4 files changed, 893 insertions(+), 1 deletion(-) create mode 100644 .github/workflows/perf-tests.yml create mode 100644 crates/pet/tests/e2e_performance.rs diff --git a/.github/workflows/perf-tests.yml b/.github/workflows/perf-tests.yml new file mode 100644 index 00000000..4fcb78b5 --- /dev/null +++ b/.github/workflows/perf-tests.yml @@ -0,0 +1,161 @@ +name: Performance Tests + +on: + pull_request: + branches: + - main + - release* + - release/* + - release-* + push: + branches: + - main + workflow_dispatch: + inputs: + compare_baseline: + description: "Compare against baseline metrics" + required: false + default: "true" + +jobs: + performance: + name: E2E Performance Tests + runs-on: ${{ matrix.os }} + strategy: + fail-fast: false + matrix: + include: + - os: windows-latest + target: x86_64-pc-windows-msvc + - os: ubuntu-latest + target: x86_64-unknown-linux-musl + - os: macos-latest + target: x86_64-apple-darwin + - os: macos-14 + target: aarch64-apple-darwin + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Set Python to PATH + uses: actions/setup-python@v5 + with: + python-version: "3.12" + + - name: Add Conda to PATH (Windows) + if: startsWith(matrix.os, 'windows') + run: | + $path = $env:PATH + ";" + $env:CONDA + "\condabin" + echo "PATH=$path" >> $env:GITHUB_ENV + + - name: Add Conda to PATH (Ubuntu) + if: startsWith(matrix.os, 'ubuntu') + run: echo "PATH=$PATH:$CONDA/condabin" >> $GITHUB_ENV + shell: bash + + - name: Install Conda + add to PATH (macOS) + if: startsWith(matrix.os, 'macos') + run: | + if [[ "${{ matrix.target }}" == "aarch64-apple-darwin" ]]; then + curl -o ~/miniconda.sh https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-arm64.sh + else + curl -o ~/miniconda.sh https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-x86_64.sh + fi + bash ~/miniconda.sh -b -p ~/miniconda + echo "PATH=$PATH:$HOME/miniconda/bin" >> $GITHUB_ENV + echo "CONDA=$HOME/miniconda" >> $GITHUB_ENV + shell: bash + + - name: Create test Conda environment + run: conda create -n perf-test-env python=3.12 -y + + - name: Create test venv + run: python -m venv .venv + shell: bash + + - name: Rust Tool Chain setup + uses: dtolnay/rust-toolchain@stable + with: + toolchain: stable + targets: ${{ matrix.target }} + + - name: Cargo Fetch + run: cargo fetch + shell: bash + + - name: Build Release + run: cargo build --release --target ${{ matrix.target }} + shell: bash + + - name: Run Performance Tests + run: cargo test --release --features ci-perf --target ${{ matrix.target }} -- --nocapture --test-threads=1 2>&1 | tee perf-output.txt + env: + RUST_BACKTRACE: 1 + RUST_LOG: warn + shell: bash + + - name: Extract Performance Metrics + id: metrics + run: | + # Extract JSON metrics from test output + if grep -q "JSON metrics:" perf-output.txt; then + # Extract lines after "JSON metrics:" until the closing brace + sed -n '/JSON metrics:/,/^}/p' perf-output.txt | tail -n +2 > metrics.json + + # Parse key metrics + SERVER_STARTUP=$(jq -r '.server_startup_ms // "N/A"' metrics.json) + FULL_REFRESH=$(jq -r '.full_refresh_ms // "N/A"' metrics.json) + ENV_COUNT=$(jq -r '.environments_count // "N/A"' metrics.json) + + echo "server_startup_ms=$SERVER_STARTUP" >> $GITHUB_OUTPUT + echo "full_refresh_ms=$FULL_REFRESH" >> $GITHUB_OUTPUT + echo "environments_count=$ENV_COUNT" >> $GITHUB_OUTPUT + + echo "### Performance Metrics (${{ matrix.os }})" >> $GITHUB_STEP_SUMMARY + echo "| Metric | Value |" >> $GITHUB_STEP_SUMMARY + echo "|--------|-------|" >> $GITHUB_STEP_SUMMARY + echo "| Server Startup | ${SERVER_STARTUP}ms |" >> $GITHUB_STEP_SUMMARY + echo "| Full Refresh | ${FULL_REFRESH}ms |" >> $GITHUB_STEP_SUMMARY + echo "| Environments Found | ${ENV_COUNT} |" >> $GITHUB_STEP_SUMMARY + else + echo "No JSON metrics found in output" + fi + shell: bash + + - name: Upload Performance Results + uses: actions/upload-artifact@v4 + with: + name: perf-results-${{ matrix.os }}-${{ matrix.target }} + path: | + perf-output.txt + metrics.json + if-no-files-found: ignore + + summary: + name: Performance Summary + needs: performance + runs-on: ubuntu-latest + if: always() + steps: + - name: Download all artifacts + uses: actions/download-artifact@v4 + with: + path: perf-results + + - name: Generate Summary Report + run: | + echo "# Performance Test Results" >> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY + echo "| Platform | Server Startup | Full Refresh | Environments |" >> $GITHUB_STEP_SUMMARY + echo "|----------|----------------|--------------|--------------|" >> $GITHUB_STEP_SUMMARY + + for dir in perf-results/*/; do + if [ -f "${dir}metrics.json" ]; then + platform=$(basename "$dir" | sed 's/perf-results-//') + startup=$(jq -r '.server_startup_ms // "N/A"' "${dir}metrics.json") + refresh=$(jq -r '.full_refresh_ms // "N/A"' "${dir}metrics.json") + envs=$(jq -r '.environments_count // "N/A"' "${dir}metrics.json") + echo "| $platform | ${startup}ms | ${refresh}ms | $envs |" >> $GITHUB_STEP_SUMMARY + fi + done + shell: bash diff --git a/.vscode/settings.json b/.vscode/settings.json index 5e5a216b..9262f343 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -9,5 +9,8 @@ }, "git.branchProtection": ["main", "release/*"], "git.branchProtectionPrompt": "alwaysCommitToNewBranch", - "git.branchRandomName.enable": true + "git.branchRandomName.enable": true, + "chat.tools.terminal.autoApprove": { + "cargo test": true + } } diff --git a/crates/pet/Cargo.toml b/crates/pet/Cargo.toml index 9b554432..9a493b19 100644 --- a/crates/pet/Cargo.toml +++ b/crates/pet/Cargo.toml @@ -56,3 +56,4 @@ ci-homebrew-container = [] ci-poetry-global = [] ci-poetry-project = [] ci-poetry-custom = [] +ci-perf = [] diff --git a/crates/pet/tests/e2e_performance.rs b/crates/pet/tests/e2e_performance.rs new file mode 100644 index 00000000..c902b39c --- /dev/null +++ b/crates/pet/tests/e2e_performance.rs @@ -0,0 +1,727 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +//! End-to-end performance tests for the pet JSONRPC server. +//! +//! These tests spawn the pet server as a subprocess and communicate via JSONRPC +//! to measure discovery performance from a client perspective. + +use serde::Deserialize; +use serde_json::{json, Value}; +use std::collections::HashMap; +use std::env; +use std::io::{BufRead, BufReader, Read, Write}; +use std::path::PathBuf; +use std::process::{Child, Command, Stdio}; +use std::sync::atomic::{AtomicU32, Ordering}; +use std::sync::{Arc, Mutex}; +use std::time::{Duration, Instant}; + +mod common; + +/// JSONRPC request ID counter +static REQUEST_ID: AtomicU32 = AtomicU32::new(1); + +/// Performance metrics collected during tests +#[derive(Debug, Clone, Default)] +pub struct PerformanceMetrics { + /// Time to spawn server and get first response (configure) + pub server_startup_ms: u128, + /// Time for full machine refresh + pub full_refresh_ms: u128, + /// Time for workspace-scoped refresh + pub workspace_refresh_ms: Option, + /// Time for kind-specific refresh + pub kind_refresh_ms: HashMap, + /// Number of environments discovered + pub environments_count: usize, + /// Number of managers discovered + pub managers_count: usize, + /// Time to first environment notification + pub time_to_first_env_ms: Option, + /// Resolve times (cold and warm) + pub resolve_times_ms: Vec, +} + +/// Refresh result from server +#[derive(Debug, Clone, Deserialize)] +pub struct RefreshResult { + pub duration: u128, +} + +/// Environment notification from server +#[derive(Debug, Clone, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct Environment { + pub executable: Option, + pub kind: Option, + #[allow(dead_code)] + pub version: Option, +} + +/// Manager notification from server +#[derive(Debug, Clone, Deserialize)] +pub struct Manager { + #[allow(dead_code)] + pub tool: Option, + #[allow(dead_code)] + pub executable: Option, +} + +/// Shared state for handling notifications +struct SharedState { + environments: Mutex>, + managers: Mutex>, + first_env_time: Mutex>, +} + +impl SharedState { + fn new() -> Self { + Self { + environments: Mutex::new(Vec::new()), + managers: Mutex::new(Vec::new()), + first_env_time: Mutex::new(None), + } + } + + fn handle_notification(&self, method: &str, params: Value) { + match method { + "environment" => { + // Record time to first environment + { + let mut first_env = self.first_env_time.lock().unwrap(); + if first_env.is_none() { + *first_env = Some(Instant::now()); + } + } + + if let Ok(env) = serde_json::from_value::(params) { + self.environments.lock().unwrap().push(env); + } + } + "manager" => { + if let Ok(mgr) = serde_json::from_value::(params) { + self.managers.lock().unwrap().push(mgr); + } + } + "log" | "telemetry" => { + // Ignore log and telemetry notifications + } + _ => { + // Unknown notification + } + } + } + + fn clear(&self) { + self.environments.lock().unwrap().clear(); + self.managers.lock().unwrap().clear(); + *self.first_env_time.lock().unwrap() = None; + } +} + +/// JSONRPC client for communicating with the pet server +pub struct PetClient { + process: Child, + state: Arc, + start_time: Instant, +} + +impl PetClient { + /// Spawn the pet server and create a client + pub fn spawn() -> Result { + let pet_exe = get_pet_executable(); + + if !pet_exe.exists() { + return Err(format!( + "pet executable not found at {:?}. Run `cargo build --release` first.", + pet_exe + )); + } + + let start_time = Instant::now(); + + let process = Command::new(&pet_exe) + .arg("server") + .stdin(Stdio::piped()) + .stdout(Stdio::piped()) + .stderr(Stdio::piped()) + .spawn() + .map_err(|e| format!("Failed to spawn pet server: {}", e))?; + + Ok(Self { + process, + state: Arc::new(SharedState::new()), + start_time, + }) + } + + /// Send a JSONRPC request and wait for response + fn send_request(&mut self, method: &str, params: Value) -> Result { + let id = REQUEST_ID.fetch_add(1, Ordering::SeqCst); + let request = json!({ + "jsonrpc": "2.0", + "id": id, + "method": method, + "params": params + }); + + let request_str = serde_json::to_string(&request) + .map_err(|e| format!("Failed to serialize request: {}", e))?; + + let content_length = request_str.len(); + let message = format!("Content-Length: {}\r\n\r\n{}", content_length, request_str); + + // Write request + { + let stdin = self.process.stdin.as_mut().ok_or("Failed to get stdin")?; + stdin + .write_all(message.as_bytes()) + .map_err(|e| format!("Failed to write request: {}", e))?; + stdin + .flush() + .map_err(|e| format!("Failed to flush stdin: {}", e))?; + } + + // Clone state reference for use in the loop + let state = self.state.clone(); + + // Read response - handle notifications until we get our response + let stdout = self.process.stdout.as_mut().ok_or("Failed to get stdout")?; + let mut reader = BufReader::new(stdout); + + loop { + // Read headers until empty line + let mut content_length: Option = None; + loop { + let mut header_line = String::new(); + reader + .read_line(&mut header_line) + .map_err(|e| format!("Failed to read header: {}", e))?; + + let trimmed = header_line.trim(); + if trimmed.is_empty() { + // End of headers + break; + } + + if let Some(len_str) = trimmed.strip_prefix("Content-Length: ") { + content_length = Some( + len_str + .parse() + .map_err(|e| format!("Failed to parse content length: {}", e))?, + ); + } + // Ignore Content-Type and other headers + } + + let content_length = content_length.ok_or("Missing Content-Length header")?; + + // Read body + let mut body = vec![0u8; content_length]; + reader + .read_exact(&mut body) + .map_err(|e| format!("Failed to read body: {}", e))?; + + let body_str = String::from_utf8_lossy(&body); + let value: Value = serde_json::from_str(&body_str) + .map_err(|e| format!("Failed to parse response: {}", e))?; + + // Check if this is a notification or our response + if let Some(notif_method) = value.get("method").and_then(|m| m.as_str()) { + // Handle notifications using the cloned state reference + state.handle_notification( + notif_method, + value.get("params").cloned().unwrap_or(Value::Null), + ); + continue; + } + + // Check if this is our response + if let Some(response_id) = value.get("id").and_then(|i| i.as_u64()) { + if response_id as u32 == id { + if let Some(error) = value.get("error") { + return Err(format!("JSONRPC error: {:?}", error)); + } + return Ok(value.get("result").cloned().unwrap_or(Value::Null)); + } + } + } + } + + /// Configure the server + pub fn configure(&mut self, config: Value) -> Result { + let start = Instant::now(); + self.send_request("configure", config)?; + Ok(start.elapsed()) + } + + /// Refresh environments + pub fn refresh(&mut self, params: Option) -> Result<(RefreshResult, Duration), String> { + // Clear previous results + self.state.clear(); + + let start = Instant::now(); + let result = self.send_request("refresh", params.unwrap_or(json!({})))?; + let elapsed = start.elapsed(); + + let refresh_result: RefreshResult = serde_json::from_value(result) + .map_err(|e| format!("Failed to parse refresh result: {}", e))?; + + Ok((refresh_result, elapsed)) + } + + /// Resolve a Python executable + pub fn resolve(&mut self, executable: &str) -> Result<(Value, Duration), String> { + let start = Instant::now(); + let result = self.send_request("resolve", json!({ "executable": executable }))?; + Ok((result, start.elapsed())) + } + + /// Get collected environments + pub fn get_environments(&self) -> Vec { + self.state.environments.lock().unwrap().clone() + } + + /// Get collected managers + pub fn get_managers(&self) -> Vec { + self.state.managers.lock().unwrap().clone() + } + + /// Get time from start to first environment + pub fn time_to_first_env(&self) -> Option { + self.state + .first_env_time + .lock() + .unwrap() + .map(|t| t.duration_since(self.start_time)) + } + + /// Get startup time + #[allow(dead_code)] + pub fn startup_time(&self) -> Duration { + self.start_time.elapsed() + } +} + +impl Drop for PetClient { + fn drop(&mut self) { + let _ = self.process.kill(); + let _ = self.process.wait(); + } +} + +/// Get the path to the pet executable +fn get_pet_executable() -> PathBuf { + let manifest_dir = PathBuf::from(env!("CARGO_MANIFEST_DIR")); + let target_dir = manifest_dir + .parent() + .unwrap() + .parent() + .unwrap() + .join("target"); + + // Prefer release build for performance tests + let release_exe = if cfg!(windows) { + target_dir.join("release").join("pet.exe") + } else { + target_dir.join("release").join("pet") + }; + + if release_exe.exists() { + return release_exe; + } + + // Fall back to debug build + if cfg!(windows) { + target_dir.join("debug").join("pet.exe") + } else { + target_dir.join("debug").join("pet") + } +} + +/// Get a temporary cache directory for tests +fn get_test_cache_dir() -> PathBuf { + let tmp = env::temp_dir(); + tmp.join("pet-e2e-perf-tests") + .join(format!("cache-{}", std::process::id())) +} + +/// Get workspace directory (current project root) +fn get_workspace_dir() -> PathBuf { + env::var("GITHUB_WORKSPACE") + .map(PathBuf::from) + .unwrap_or_else(|_| { + PathBuf::from(env!("CARGO_MANIFEST_DIR")) + .parent() + .unwrap() + .parent() + .unwrap() + .to_path_buf() + }) +} + +// ============================================================================ +// Performance Tests +// ============================================================================ + +#[cfg_attr(feature = "ci-perf", test)] +#[allow(dead_code)] +fn test_server_startup_performance() { + let start = Instant::now(); + let mut client = PetClient::spawn().expect("Failed to spawn server"); + let spawn_time = start.elapsed(); + + let cache_dir = get_test_cache_dir(); + let workspace_dir = get_workspace_dir(); + + let config = json!({ + "workspaceDirectories": [workspace_dir], + "cacheDirectory": cache_dir + }); + + let configure_time = client.configure(config).expect("Failed to configure"); + + println!("=== Server Startup Performance ==="); + println!("Server spawn time: {:?}", spawn_time); + println!("Configure request time: {:?}", configure_time); + println!("Total startup time: {:?}", spawn_time + configure_time); + + // Assert reasonable startup time (should be under 1 second on most machines) + assert!( + spawn_time.as_millis() < 5000, + "Server spawn took too long: {:?}", + spawn_time + ); + assert!( + configure_time.as_millis() < 1000, + "Configure took too long: {:?}", + configure_time + ); +} + +#[cfg_attr(feature = "ci-perf", test)] +#[allow(dead_code)] +fn test_full_refresh_performance() { + let mut client = PetClient::spawn().expect("Failed to spawn server"); + + let cache_dir = get_test_cache_dir(); + let workspace_dir = get_workspace_dir(); + + let config = json!({ + "workspaceDirectories": [workspace_dir], + "cacheDirectory": cache_dir + }); + + client.configure(config).expect("Failed to configure"); + + // Full machine refresh + let (result, client_elapsed) = client.refresh(None).expect("Failed to refresh"); + let environments = client.get_environments(); + let managers = client.get_managers(); + + println!("=== Full Refresh Performance ==="); + println!("Server-reported duration: {}ms", result.duration); + println!("Client-measured duration: {:?}", client_elapsed); + println!("Environments discovered: {}", environments.len()); + println!("Managers discovered: {}", managers.len()); + + if let Some(time_to_first) = client.time_to_first_env() { + println!("Time to first environment: {:?}", time_to_first); + } + + // Log environment kinds found + let mut kind_counts: HashMap = HashMap::new(); + for env in &environments { + if let Some(kind) = &env.kind { + *kind_counts.entry(kind.clone()).or_insert(0) += 1; + } + } + println!("Environment kinds: {:?}", kind_counts); + + // Assert we found at least some environments (CI should always have Python installed) + assert!( + !environments.is_empty(), + "No environments discovered - this is unexpected" + ); +} + +#[cfg_attr(feature = "ci-perf", test)] +#[allow(dead_code)] +fn test_workspace_scoped_refresh_performance() { + let mut client = PetClient::spawn().expect("Failed to spawn server"); + + let cache_dir = get_test_cache_dir(); + let workspace_dir = get_workspace_dir(); + + let config = json!({ + "workspaceDirectories": [workspace_dir.clone()], + "cacheDirectory": cache_dir + }); + + client.configure(config).expect("Failed to configure"); + + // Workspace-scoped refresh + let (result, client_elapsed) = client + .refresh(Some(json!({ "searchPaths": [workspace_dir] }))) + .expect("Failed to refresh"); + + let environments = client.get_environments(); + + println!("=== Workspace-Scoped Refresh Performance ==="); + println!("Server-reported duration: {}ms", result.duration); + println!("Client-measured duration: {:?}", client_elapsed); + println!("Environments discovered: {}", environments.len()); + + // Workspace-scoped should be faster than full refresh + // (though we don't assert this as it depends on the environment) +} + +#[cfg_attr(feature = "ci-perf", test)] +#[allow(dead_code)] +fn test_kind_specific_refresh_performance() { + let mut client = PetClient::spawn().expect("Failed to spawn server"); + + let cache_dir = get_test_cache_dir(); + let workspace_dir = get_workspace_dir(); + + let config = json!({ + "workspaceDirectories": [workspace_dir], + "cacheDirectory": cache_dir + }); + + client.configure(config).expect("Failed to configure"); + + // Test different environment kinds + let kinds = ["Conda", "Venv", "VirtualEnv", "Pyenv"]; + + println!("=== Kind-Specific Refresh Performance ==="); + + for kind in kinds { + let (result, client_elapsed) = client + .refresh(Some(json!({ "searchKind": kind }))) + .expect(&format!("Failed to refresh for kind {}", kind)); + + let environments = client.get_environments(); + + println!( + "{}: {}ms (server), {:?} (client), {} envs", + kind, + result.duration, + client_elapsed, + environments.len() + ); + } +} + +#[cfg_attr(feature = "ci-perf", test)] +#[allow(dead_code)] +fn test_resolve_performance() { + let mut client = PetClient::spawn().expect("Failed to spawn server"); + + let cache_dir = get_test_cache_dir(); + let workspace_dir = get_workspace_dir(); + + let config = json!({ + "workspaceDirectories": [workspace_dir], + "cacheDirectory": cache_dir + }); + + client.configure(config).expect("Failed to configure"); + + // First, discover environments + client.refresh(None).expect("Failed to refresh"); + let environments = client.get_environments(); + + if environments.is_empty() { + println!("No environments found to test resolve performance"); + return; + } + + println!("=== Resolve Performance ==="); + + // Find an environment with an executable to resolve + let env_with_exe = environments.iter().find(|e| e.executable.is_some()); + + if let Some(env) = env_with_exe { + let exe = env.executable.as_ref().unwrap(); + + // Cold resolve (first time) + let (_, cold_time) = client.resolve(exe).expect("Failed to resolve (cold)"); + println!("Cold resolve time: {:?}", cold_time); + + // Warm resolve (cached) + let (_, warm_time) = client.resolve(exe).expect("Failed to resolve (warm)"); + println!("Warm resolve time: {:?}", warm_time); + + // Warm should be faster than cold (if caching is working) + if warm_time < cold_time { + println!( + "Cache speedup: {:.2}x", + cold_time.as_micros() as f64 / warm_time.as_micros() as f64 + ); + } + } else { + println!("No environment with executable found"); + } +} + +#[cfg_attr(feature = "ci-perf", test)] +#[allow(dead_code)] +fn test_concurrent_resolve_performance() { + let mut client = PetClient::spawn().expect("Failed to spawn server"); + + let cache_dir = get_test_cache_dir(); + let workspace_dir = get_workspace_dir(); + + let config = json!({ + "workspaceDirectories": [workspace_dir], + "cacheDirectory": cache_dir + }); + + client.configure(config).expect("Failed to configure"); + + // First, discover environments + client.refresh(None).expect("Failed to refresh"); + let environments = client.get_environments(); + + // Get up to 5 environments with executables + let exes: Vec = environments + .iter() + .filter_map(|e| e.executable.clone()) + .take(5) + .collect(); + + if exes.is_empty() { + println!("No environments with executables found"); + return; + } + + println!("=== Sequential Resolve Performance ==="); + println!("Resolving {} executables sequentially", exes.len()); + + let start = Instant::now(); + for exe in &exes { + client.resolve(exe).expect("Failed to resolve"); + } + let sequential_time = start.elapsed(); + println!("Sequential time: {:?}", sequential_time); + println!( + "Average per resolve: {:?}", + sequential_time / exes.len() as u32 + ); +} + +#[cfg_attr(feature = "ci-perf", test)] +#[allow(dead_code)] +fn test_refresh_warm_vs_cold_cache() { + // Clean cache directory + let cache_dir = get_test_cache_dir(); + let _ = std::fs::remove_dir_all(&cache_dir); + std::fs::create_dir_all(&cache_dir).expect("Failed to create cache dir"); + + let workspace_dir = get_workspace_dir(); + + println!("=== Cold vs Warm Cache Performance ==="); + + // Cold cache test + { + let mut client = PetClient::spawn().expect("Failed to spawn server"); + let config = json!({ + "workspaceDirectories": [workspace_dir.clone()], + "cacheDirectory": cache_dir.clone() + }); + client.configure(config).expect("Failed to configure"); + + let (result, elapsed) = client.refresh(None).expect("Failed to refresh"); + println!( + "Cold cache: {}ms (server), {:?} (client)", + result.duration, elapsed + ); + } + + // Warm cache test (reuse same cache directory) + { + let mut client = PetClient::spawn().expect("Failed to spawn server"); + let config = json!({ + "workspaceDirectories": [workspace_dir], + "cacheDirectory": cache_dir + }); + client.configure(config).expect("Failed to configure"); + + let (result, elapsed) = client.refresh(None).expect("Failed to refresh"); + println!( + "Warm cache: {}ms (server), {:?} (client)", + result.duration, elapsed + ); + } +} + +#[cfg_attr(feature = "ci-perf", test)] +#[allow(dead_code)] +fn test_performance_summary() { + let mut metrics = PerformanceMetrics::default(); + + let cache_dir = get_test_cache_dir(); + let _ = std::fs::remove_dir_all(&cache_dir); + std::fs::create_dir_all(&cache_dir).expect("Failed to create cache dir"); + + let workspace_dir = get_workspace_dir(); + + // Measure server startup + let spawn_start = Instant::now(); + let mut client = PetClient::spawn().expect("Failed to spawn server"); + + let config = json!({ + "workspaceDirectories": [workspace_dir.clone()], + "cacheDirectory": cache_dir + }); + + client.configure(config).expect("Failed to configure"); + metrics.server_startup_ms = spawn_start.elapsed().as_millis(); + + // Measure full refresh + let (result, _) = client.refresh(None).expect("Failed to refresh"); + metrics.full_refresh_ms = result.duration; + metrics.environments_count = client.get_environments().len(); + metrics.managers_count = client.get_managers().len(); + + if let Some(ttfe) = client.time_to_first_env() { + metrics.time_to_first_env_ms = Some(ttfe.as_millis()); + } + + // Measure workspace refresh + let (result, _) = client + .refresh(Some(json!({ "searchPaths": [workspace_dir] }))) + .expect("Failed to refresh"); + metrics.workspace_refresh_ms = Some(result.duration); + + // Print summary + println!("\n========================================"); + println!(" PERFORMANCE TEST SUMMARY "); + println!("========================================"); + println!("Server startup: {}ms", metrics.server_startup_ms); + println!("Full refresh: {}ms", metrics.full_refresh_ms); + if let Some(ws) = metrics.workspace_refresh_ms { + println!("Workspace refresh: {}ms", ws); + } + if let Some(ttfe) = metrics.time_to_first_env_ms { + println!("Time to first env: {}ms", ttfe); + } + println!("Environments found: {}", metrics.environments_count); + println!("Managers found: {}", metrics.managers_count); + println!("========================================\n"); + + // Output as JSON for CI parsing + let json_output = serde_json::to_string_pretty(&json!({ + "server_startup_ms": metrics.server_startup_ms, + "full_refresh_ms": metrics.full_refresh_ms, + "workspace_refresh_ms": metrics.workspace_refresh_ms, + "time_to_first_env_ms": metrics.time_to_first_env_ms, + "environments_count": metrics.environments_count, + "managers_count": metrics.managers_count + })) + .unwrap(); + + println!("JSON metrics:\n{}", json_output); +} From f5dedacdaed38e0671ad2618fd49b2598f3452e4 Mon Sep 17 00:00:00 2001 From: Karthik Nadig Date: Mon, 2 Feb 2026 09:06:50 -0800 Subject: [PATCH 2/3] Add performance baseline workflow and enhance performance test reporting - Introduced a new GitHub Actions workflow for performance baseline testing. - Updated performance test workflow to compare against baseline metrics. - Integrated tracing for performance profiling in various modules. - Added new dependencies for tracing and enhanced logging capabilities. --- .github/workflows/perf-baseline.yml | 102 +++++++++ .github/workflows/perf-tests.yml | 340 ++++++++++++++++++++++------ Cargo.lock | 130 +++++++++++ crates/pet/Cargo.toml | 2 + crates/pet/src/find.rs | 10 + crates/pet/src/jsonrpc.rs | 11 +- crates/pet/src/lib.rs | 63 +++++- crates/pet/src/locators.rs | 15 +- 8 files changed, 597 insertions(+), 76 deletions(-) create mode 100644 .github/workflows/perf-baseline.yml diff --git a/.github/workflows/perf-baseline.yml b/.github/workflows/perf-baseline.yml new file mode 100644 index 00000000..f6aa1c43 --- /dev/null +++ b/.github/workflows/perf-baseline.yml @@ -0,0 +1,102 @@ +name: Performance Baseline + +on: + push: + branches: + - main + +permissions: + contents: read + +jobs: + performance: + name: Performance Baseline + runs-on: ${{ matrix.os }} + strategy: + fail-fast: false + matrix: + include: + - os: ubuntu-latest + target: x86_64-unknown-linux-musl + - os: windows-latest + target: x86_64-pc-windows-msvc + - os: macos-latest + target: x86_64-apple-darwin + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Set Python to PATH + uses: actions/setup-python@v5 + with: + python-version: "3.12" + + - name: Add Conda to PATH (Windows) + if: startsWith(matrix.os, 'windows') + run: | + $path = $env:PATH + ";" + $env:CONDA + "\condabin" + echo "PATH=$path" >> $env:GITHUB_ENV + + - name: Add Conda to PATH (Linux) + if: startsWith(matrix.os, 'ubuntu') + run: echo "PATH=$PATH:$CONDA/condabin" >> $GITHUB_ENV + shell: bash + + - name: Install Conda + add to PATH (macOS) + if: startsWith(matrix.os, 'macos') + run: | + curl -o ~/miniconda.sh https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-x86_64.sh + bash ~/miniconda.sh -b -p ~/miniconda + echo "PATH=$PATH:$HOME/miniconda/bin" >> $GITHUB_ENV + echo "CONDA=$HOME/miniconda" >> $GITHUB_ENV + shell: bash + + - name: Create test Conda environment + run: conda create -n perf-test-env python=3.12 -y + + - name: Create test venv + run: python -m venv .venv + shell: bash + + - name: Rust Tool Chain setup + uses: dtolnay/rust-toolchain@stable + with: + toolchain: stable + targets: ${{ matrix.target }} + + - name: Cargo Fetch + run: cargo fetch + shell: bash + + - name: Build Release + run: cargo build --release --target ${{ matrix.target }} + shell: bash + + - name: Run Performance Tests + run: cargo test --release --features ci-perf --target ${{ matrix.target }} --test e2e_performance test_performance_summary -- --nocapture 2>&1 | tee perf-output.txt + env: + RUST_BACKTRACE: 1 + RUST_LOG: warn + shell: bash + + - name: Extract Performance Metrics + id: metrics + run: | + # Extract JSON metrics from test output + if grep -q "JSON metrics:" perf-output.txt; then + # Extract lines after "JSON metrics:" until the closing brace + sed -n '/JSON metrics:/,/^}/p' perf-output.txt | tail -n +2 > metrics.json + echo "Metrics extracted:" + cat metrics.json + else + echo '{"server_startup_ms": 0, "full_refresh_ms": 0, "environments_count": 0}' > metrics.json + echo "No metrics found, created empty metrics" + fi + shell: bash + + - name: Upload Performance Baseline Artifact + uses: actions/upload-artifact@v4 + with: + name: perf-baseline-${{ matrix.os }} + path: metrics.json + retention-days: 90 diff --git a/.github/workflows/perf-tests.yml b/.github/workflows/perf-tests.yml index 4fcb78b5..d2d93b7e 100644 --- a/.github/workflows/perf-tests.yml +++ b/.github/workflows/perf-tests.yml @@ -7,15 +7,12 @@ on: - release* - release/* - release-* - push: - branches: - - main workflow_dispatch: - inputs: - compare_baseline: - description: "Compare against baseline metrics" - required: false - default: "true" + +permissions: + actions: read + contents: read + pull-requests: write jobs: performance: @@ -31,8 +28,6 @@ jobs: target: x86_64-unknown-linux-musl - os: macos-latest target: x86_64-apple-darwin - - os: macos-14 - target: aarch64-apple-darwin steps: - name: Checkout uses: actions/checkout@v4 @@ -56,11 +51,7 @@ jobs: - name: Install Conda + add to PATH (macOS) if: startsWith(matrix.os, 'macos') run: | - if [[ "${{ matrix.target }}" == "aarch64-apple-darwin" ]]; then - curl -o ~/miniconda.sh https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-arm64.sh - else - curl -o ~/miniconda.sh https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-x86_64.sh - fi + curl -o ~/miniconda.sh https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-x86_64.sh bash ~/miniconda.sh -b -p ~/miniconda echo "PATH=$PATH:$HOME/miniconda/bin" >> $GITHUB_ENV echo "CONDA=$HOME/miniconda" >> $GITHUB_ENV @@ -88,7 +79,7 @@ jobs: shell: bash - name: Run Performance Tests - run: cargo test --release --features ci-perf --target ${{ matrix.target }} -- --nocapture --test-threads=1 2>&1 | tee perf-output.txt + run: cargo test --release --features ci-perf --target ${{ matrix.target }} --test e2e_performance test_performance_summary -- --nocapture 2>&1 | tee perf-output.txt env: RUST_BACKTRACE: 1 RUST_LOG: warn @@ -101,61 +92,284 @@ jobs: if grep -q "JSON metrics:" perf-output.txt; then # Extract lines after "JSON metrics:" until the closing brace sed -n '/JSON metrics:/,/^}/p' perf-output.txt | tail -n +2 > metrics.json - - # Parse key metrics - SERVER_STARTUP=$(jq -r '.server_startup_ms // "N/A"' metrics.json) - FULL_REFRESH=$(jq -r '.full_refresh_ms // "N/A"' metrics.json) - ENV_COUNT=$(jq -r '.environments_count // "N/A"' metrics.json) - - echo "server_startup_ms=$SERVER_STARTUP" >> $GITHUB_OUTPUT - echo "full_refresh_ms=$FULL_REFRESH" >> $GITHUB_OUTPUT - echo "environments_count=$ENV_COUNT" >> $GITHUB_OUTPUT - - echo "### Performance Metrics (${{ matrix.os }})" >> $GITHUB_STEP_SUMMARY - echo "| Metric | Value |" >> $GITHUB_STEP_SUMMARY - echo "|--------|-------|" >> $GITHUB_STEP_SUMMARY - echo "| Server Startup | ${SERVER_STARTUP}ms |" >> $GITHUB_STEP_SUMMARY - echo "| Full Refresh | ${FULL_REFRESH}ms |" >> $GITHUB_STEP_SUMMARY - echo "| Environments Found | ${ENV_COUNT} |" >> $GITHUB_STEP_SUMMARY + echo "Metrics extracted:" + cat metrics.json else - echo "No JSON metrics found in output" + echo '{"server_startup_ms": 0, "full_refresh_ms": 0, "environments_count": 0}' > metrics.json + echo "No metrics found, created empty metrics" fi shell: bash - - name: Upload Performance Results + - name: Upload PR Performance Results uses: actions/upload-artifact@v4 with: - name: perf-results-${{ matrix.os }}-${{ matrix.target }} - path: | - perf-output.txt - metrics.json - if-no-files-found: ignore - - summary: - name: Performance Summary - needs: performance - runs-on: ubuntu-latest - if: always() - steps: - - name: Download all artifacts - uses: actions/download-artifact@v4 + name: perf-pr-${{ matrix.os }} + path: metrics.json + + - name: Download Baseline Performance + uses: dawidd6/action-download-artifact@v6 + id: download-baseline + continue-on-error: true with: - path: perf-results + workflow: perf-baseline.yml + branch: main + name: perf-baseline-${{ matrix.os }} + path: baseline-perf + + - name: Generate Performance Report (Linux) + if: startsWith(matrix.os, 'ubuntu') + id: perf-linux + run: | + # Extract PR metrics + PR_STARTUP=$(jq -r '.server_startup_ms // 0' metrics.json) + PR_REFRESH=$(jq -r '.full_refresh_ms // 0' metrics.json) + PR_ENVS=$(jq -r '.environments_count // 0' metrics.json) + + # Extract baseline metrics (default to 0 if not available) + if [ -f baseline-perf/metrics.json ]; then + BASELINE_STARTUP=$(jq -r '.server_startup_ms // 0' baseline-perf/metrics.json) + BASELINE_REFRESH=$(jq -r '.full_refresh_ms // 0' baseline-perf/metrics.json) + BASELINE_ENVS=$(jq -r '.environments_count // 0' baseline-perf/metrics.json) + else + BASELINE_STARTUP=0 + BASELINE_REFRESH=0 + BASELINE_ENVS=0 + fi + + # Calculate diff (positive means slowdown, negative means speedup) + STARTUP_DIFF=$(echo "$PR_STARTUP - $BASELINE_STARTUP" | bc) + REFRESH_DIFF=$(echo "$PR_REFRESH - $BASELINE_REFRESH" | bc) + + # Calculate percentage change + if [ "$BASELINE_STARTUP" != "0" ]; then + STARTUP_PCT=$(echo "scale=1; ($STARTUP_DIFF / $BASELINE_STARTUP) * 100" | bc) + else + STARTUP_PCT="N/A" + fi + + if [ "$BASELINE_REFRESH" != "0" ]; then + REFRESH_PCT=$(echo "scale=1; ($REFRESH_DIFF / $BASELINE_REFRESH) * 100" | bc) + else + REFRESH_PCT="N/A" + fi + + # Determine delta indicators (for perf, negative is good = faster) + if (( $(echo "$REFRESH_DIFF < -100" | bc -l) )); then + DELTA_INDICATOR=":rocket:" + elif (( $(echo "$REFRESH_DIFF < 0" | bc -l) )); then + DELTA_INDICATOR=":white_check_mark:" + elif (( $(echo "$REFRESH_DIFF > 500" | bc -l) )); then + DELTA_INDICATOR=":warning:" + elif (( $(echo "$REFRESH_DIFF > 100" | bc -l) )); then + DELTA_INDICATOR=":small_red_triangle:" + else + DELTA_INDICATOR=":heavy_minus_sign:" + fi + + # Set outputs + echo "pr_startup=$PR_STARTUP" >> $GITHUB_OUTPUT + echo "pr_refresh=$PR_REFRESH" >> $GITHUB_OUTPUT + echo "baseline_startup=$BASELINE_STARTUP" >> $GITHUB_OUTPUT + echo "baseline_refresh=$BASELINE_REFRESH" >> $GITHUB_OUTPUT + echo "startup_diff=$STARTUP_DIFF" >> $GITHUB_OUTPUT + echo "refresh_diff=$REFRESH_DIFF" >> $GITHUB_OUTPUT + echo "startup_pct=$STARTUP_PCT" >> $GITHUB_OUTPUT + echo "refresh_pct=$REFRESH_PCT" >> $GITHUB_OUTPUT + echo "delta_indicator=$DELTA_INDICATOR" >> $GITHUB_OUTPUT + + # Write step summary + echo "## Performance Report (Linux)" >> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY + echo "| Metric | PR | Baseline | Delta | Change |" >> $GITHUB_STEP_SUMMARY + echo "|--------|-----|----------|-------|--------|" >> $GITHUB_STEP_SUMMARY + echo "| Server Startup | ${PR_STARTUP}ms | ${BASELINE_STARTUP}ms | ${STARTUP_DIFF}ms | ${STARTUP_PCT}% |" >> $GITHUB_STEP_SUMMARY + echo "| Full Refresh | ${PR_REFRESH}ms | ${BASELINE_REFRESH}ms | ${REFRESH_DIFF}ms | ${REFRESH_PCT}% ${DELTA_INDICATOR} |" >> $GITHUB_STEP_SUMMARY + echo "| Environments | ${PR_ENVS} | ${BASELINE_ENVS} | - | - |" >> $GITHUB_STEP_SUMMARY + shell: bash + + - name: Generate Performance Report (Windows) + if: startsWith(matrix.os, 'windows') + id: perf-windows + run: | + # Extract PR metrics + $prMetrics = Get-Content -Path "metrics.json" -Raw | ConvertFrom-Json + $prStartup = $prMetrics.server_startup_ms + $prRefresh = $prMetrics.full_refresh_ms + $prEnvs = $prMetrics.environments_count + + # Extract baseline metrics (default to 0 if not available) + if (Test-Path "baseline-perf/metrics.json") { + $baselineMetrics = Get-Content -Path "baseline-perf/metrics.json" -Raw | ConvertFrom-Json + $baselineStartup = $baselineMetrics.server_startup_ms + $baselineRefresh = $baselineMetrics.full_refresh_ms + $baselineEnvs = $baselineMetrics.environments_count + } else { + $baselineStartup = 0 + $baselineRefresh = 0 + $baselineEnvs = 0 + } + + # Calculate diff + $startupDiff = $prStartup - $baselineStartup + $refreshDiff = $prRefresh - $baselineRefresh + + # Calculate percentage change + if ($baselineStartup -gt 0) { + $startupPct = [math]::Round(($startupDiff / $baselineStartup) * 100, 1) + } else { + $startupPct = "N/A" + } + + if ($baselineRefresh -gt 0) { + $refreshPct = [math]::Round(($refreshDiff / $baselineRefresh) * 100, 1) + } else { + $refreshPct = "N/A" + } + + # Determine delta indicator + if ($refreshDiff -lt -100) { + $deltaIndicator = ":rocket:" + } elseif ($refreshDiff -lt 0) { + $deltaIndicator = ":white_check_mark:" + } elseif ($refreshDiff -gt 500) { + $deltaIndicator = ":warning:" + } elseif ($refreshDiff -gt 100) { + $deltaIndicator = ":small_red_triangle:" + } else { + $deltaIndicator = ":heavy_minus_sign:" + } + + # Set outputs + echo "pr_startup=$prStartup" >> $env:GITHUB_OUTPUT + echo "pr_refresh=$prRefresh" >> $env:GITHUB_OUTPUT + echo "baseline_startup=$baselineStartup" >> $env:GITHUB_OUTPUT + echo "baseline_refresh=$baselineRefresh" >> $env:GITHUB_OUTPUT + echo "startup_diff=$startupDiff" >> $env:GITHUB_OUTPUT + echo "refresh_diff=$refreshDiff" >> $env:GITHUB_OUTPUT + echo "startup_pct=$startupPct" >> $env:GITHUB_OUTPUT + echo "refresh_pct=$refreshPct" >> $env:GITHUB_OUTPUT + echo "delta_indicator=$deltaIndicator" >> $env:GITHUB_OUTPUT + + # Write step summary + echo "## Performance Report (Windows)" >> $env:GITHUB_STEP_SUMMARY + echo "" >> $env:GITHUB_STEP_SUMMARY + echo "| Metric | PR | Baseline | Delta | Change |" >> $env:GITHUB_STEP_SUMMARY + echo "|--------|-----|----------|-------|--------|" >> $env:GITHUB_STEP_SUMMARY + echo "| Server Startup | ${prStartup}ms | ${baselineStartup}ms | ${startupDiff}ms | ${startupPct}% |" >> $env:GITHUB_STEP_SUMMARY + echo "| Full Refresh | ${prRefresh}ms | ${baselineRefresh}ms | ${refreshDiff}ms | ${refreshPct}% ${deltaIndicator} |" >> $env:GITHUB_STEP_SUMMARY + echo "| Environments | ${prEnvs} | ${baselineEnvs} | - | - |" >> $env:GITHUB_STEP_SUMMARY + shell: pwsh - - name: Generate Summary Report + - name: Generate Performance Report (macOS) + if: startsWith(matrix.os, 'macos') + id: perf-macos run: | - echo "# Performance Test Results" >> $GITHUB_STEP_SUMMARY + # Extract PR metrics + PR_STARTUP=$(jq -r '.server_startup_ms // 0' metrics.json) + PR_REFRESH=$(jq -r '.full_refresh_ms // 0' metrics.json) + PR_ENVS=$(jq -r '.environments_count // 0' metrics.json) + + # Extract baseline metrics (default to 0 if not available) + if [ -f baseline-perf/metrics.json ]; then + BASELINE_STARTUP=$(jq -r '.server_startup_ms // 0' baseline-perf/metrics.json) + BASELINE_REFRESH=$(jq -r '.full_refresh_ms // 0' baseline-perf/metrics.json) + BASELINE_ENVS=$(jq -r '.environments_count // 0' baseline-perf/metrics.json) + else + BASELINE_STARTUP=0 + BASELINE_REFRESH=0 + BASELINE_ENVS=0 + fi + + # Calculate diff + STARTUP_DIFF=$((PR_STARTUP - BASELINE_STARTUP)) + REFRESH_DIFF=$((PR_REFRESH - BASELINE_REFRESH)) + + # Set outputs + echo "pr_startup=$PR_STARTUP" >> $GITHUB_OUTPUT + echo "pr_refresh=$PR_REFRESH" >> $GITHUB_OUTPUT + echo "baseline_startup=$BASELINE_STARTUP" >> $GITHUB_OUTPUT + echo "baseline_refresh=$BASELINE_REFRESH" >> $GITHUB_OUTPUT + echo "startup_diff=$STARTUP_DIFF" >> $GITHUB_OUTPUT + echo "refresh_diff=$REFRESH_DIFF" >> $GITHUB_OUTPUT + + # Write step summary + echo "## Performance Report (macOS)" >> $GITHUB_STEP_SUMMARY echo "" >> $GITHUB_STEP_SUMMARY - echo "| Platform | Server Startup | Full Refresh | Environments |" >> $GITHUB_STEP_SUMMARY - echo "|----------|----------------|--------------|--------------|" >> $GITHUB_STEP_SUMMARY - - for dir in perf-results/*/; do - if [ -f "${dir}metrics.json" ]; then - platform=$(basename "$dir" | sed 's/perf-results-//') - startup=$(jq -r '.server_startup_ms // "N/A"' "${dir}metrics.json") - refresh=$(jq -r '.full_refresh_ms // "N/A"' "${dir}metrics.json") - envs=$(jq -r '.environments_count // "N/A"' "${dir}metrics.json") - echo "| $platform | ${startup}ms | ${refresh}ms | $envs |" >> $GITHUB_STEP_SUMMARY - fi - done + echo "| Metric | PR | Baseline | Delta |" >> $GITHUB_STEP_SUMMARY + echo "|--------|-----|----------|-------|" >> $GITHUB_STEP_SUMMARY + echo "| Server Startup | ${PR_STARTUP}ms | ${BASELINE_STARTUP}ms | ${STARTUP_DIFF}ms |" >> $GITHUB_STEP_SUMMARY + echo "| Full Refresh | ${PR_REFRESH}ms | ${BASELINE_REFRESH}ms | ${REFRESH_DIFF}ms |" >> $GITHUB_STEP_SUMMARY + echo "| Environments | ${PR_ENVS} | ${BASELINE_ENVS} | - |" >> $GITHUB_STEP_SUMMARY shell: bash + + - name: Post Performance Comment (Linux) + if: startsWith(matrix.os, 'ubuntu') && github.event_name == 'pull_request' + uses: marocchino/sticky-pull-request-comment@v2 + with: + header: perf-linux + message: | + ## Performance Report (Linux) ${{ steps.perf-linux.outputs.delta_indicator }} + + | Metric | PR | Baseline | Delta | Change | + |--------|-----|----------|-------|--------| + | Server Startup | ${{ steps.perf-linux.outputs.pr_startup }}ms | ${{ steps.perf-linux.outputs.baseline_startup }}ms | ${{ steps.perf-linux.outputs.startup_diff }}ms | ${{ steps.perf-linux.outputs.startup_pct }}% | + | Full Refresh | ${{ steps.perf-linux.outputs.pr_refresh }}ms | ${{ steps.perf-linux.outputs.baseline_refresh }}ms | ${{ steps.perf-linux.outputs.refresh_diff }}ms | ${{ steps.perf-linux.outputs.refresh_pct }}% | + + --- +
+ Legend + + - :rocket: Significant speedup (>100ms faster) + - :white_check_mark: Faster than baseline + - :heavy_minus_sign: No significant change + - :small_red_triangle: Slower than baseline (>100ms) + - :warning: Significant slowdown (>500ms) +
+ + - name: Post Performance Comment (Windows) + if: startsWith(matrix.os, 'windows') && github.event_name == 'pull_request' + uses: marocchino/sticky-pull-request-comment@v2 + with: + header: perf-windows + message: | + ## Performance Report (Windows) ${{ steps.perf-windows.outputs.delta_indicator }} + + | Metric | PR | Baseline | Delta | Change | + |--------|-----|----------|-------|--------| + | Server Startup | ${{ steps.perf-windows.outputs.pr_startup }}ms | ${{ steps.perf-windows.outputs.baseline_startup }}ms | ${{ steps.perf-windows.outputs.startup_diff }}ms | ${{ steps.perf-windows.outputs.startup_pct }}% | + | Full Refresh | ${{ steps.perf-windows.outputs.pr_refresh }}ms | ${{ steps.perf-windows.outputs.baseline_refresh }}ms | ${{ steps.perf-windows.outputs.refresh_diff }}ms | ${{ steps.perf-windows.outputs.refresh_pct }}% | + + --- +
+ Legend + + - :rocket: Significant speedup (>100ms faster) + - :white_check_mark: Faster than baseline + - :heavy_minus_sign: No significant change + - :small_red_triangle: Slower than baseline (>100ms) + - :warning: Significant slowdown (>500ms) +
+ + - name: Post Performance Comment (macOS) + if: startsWith(matrix.os, 'macos') && github.event_name == 'pull_request' + uses: marocchino/sticky-pull-request-comment@v2 + with: + header: perf-macos + message: | + ## Performance Report (macOS) + + | Metric | PR | Baseline | Delta | + |--------|-----|----------|-------| + | Server Startup | ${{ steps.perf-macos.outputs.pr_startup }}ms | ${{ steps.perf-macos.outputs.baseline_startup }}ms | ${{ steps.perf-macos.outputs.startup_diff }}ms | + | Full Refresh | ${{ steps.perf-macos.outputs.pr_refresh }}ms | ${{ steps.perf-macos.outputs.baseline_refresh }}ms | ${{ steps.perf-macos.outputs.refresh_diff }}ms | + + --- +
+ Legend + + - :rocket: Significant speedup (>100ms faster) + - :white_check_mark: Faster than baseline + - :heavy_minus_sign: No significant change + - :small_red_triangle: Slower than baseline (>100ms) + - :warning: Significant slowdown (>500ms) +
diff --git a/Cargo.lock b/Cargo.lock index c91c6a03..22067f5f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -389,6 +389,15 @@ version = "0.4.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "90ed8c1e510134f979dbc4f070f87d4313098b704861a105fe34231c70a3901c" +[[package]] +name = "matchers" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d1525a2a28c7f4fa0fc98bb91ae755d1e2d1505079e05539e35bc876b5d65ae9" +dependencies = [ + "regex-automata", +] + [[package]] name = "memchr" version = "2.7.4" @@ -404,6 +413,15 @@ dependencies = [ "cc", ] +[[package]] +name = "nu-ansi-term" +version = "0.50.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7957b9740744892f114936ab4a57b3f487491bbeafaf8083688b16841a4240e5" +dependencies = [ + "windows-sys 0.59.0", +] + [[package]] name = "once_cell" version = "1.19.0" @@ -446,6 +464,8 @@ dependencies = [ "regex", "serde", "serde_json", + "tracing", + "tracing-subscriber", "winresource", ] @@ -778,6 +798,12 @@ dependencies = [ "winreg", ] +[[package]] +name = "pin-project-lite" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b3cff922bd51709b605d9ead9aa71031d81447142d828eb4a6eba76fe619f9b" + [[package]] name = "proc-macro2" version = "1.0.101" @@ -940,6 +966,21 @@ dependencies = [ "digest", ] +[[package]] +name = "sharded-slab" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f40ca3c46823713e0d4209592e8d6e826aa57e928f09752619fc696c499637f6" +dependencies = [ + "lazy_static", +] + +[[package]] +name = "smallvec" +version = "1.15.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67b1b7a3b5fe4f1376887184045fcf45c69e92af734b7aaddc05fb777b6fbd03" + [[package]] name = "strsim" version = "0.11.1" @@ -979,6 +1020,15 @@ dependencies = [ "winapi-util", ] +[[package]] +name = "thread_local" +version = "1.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f60246a4944f24f6e018aa17cdeffb7818b76356965d03b07d6a9886e8962185" +dependencies = [ + "cfg-if", +] + [[package]] name = "toml" version = "0.8.14" @@ -1052,6 +1102,80 @@ version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d163a63c116ce562a22cda521fcc4d79152e7aba014456fb5eb442f6d6a10109" +[[package]] +name = "tracing" +version = "0.1.44" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "63e71662fa4b2a2c3a26f570f037eb95bb1f85397f3cd8076caed2f026a6d100" +dependencies = [ + "pin-project-lite", + "tracing-attributes", + "tracing-core", +] + +[[package]] +name = "tracing-attributes" +version = "0.1.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7490cfa5ec963746568740651ac6781f701c9c5ea257c58e057f3ba8cf69e8da" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "tracing-core" +version = "0.1.36" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db97caf9d906fbde555dd62fa95ddba9eecfd14cb388e4f491a66d74cd5fb79a" +dependencies = [ + "once_cell", + "valuable", +] + +[[package]] +name = "tracing-log" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee855f1f400bd0e5c02d150ae5de3840039a3f54b025156404e34c23c03f47c3" +dependencies = [ + "log", + "once_cell", + "tracing-core", +] + +[[package]] +name = "tracing-serde" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "704b1aeb7be0d0a84fc9828cae51dab5970fee5088f83d1dd7ee6f6246fc6ff1" +dependencies = [ + "serde", + "tracing-core", +] + +[[package]] +name = "tracing-subscriber" +version = "0.3.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2f30143827ddab0d256fd843b7a66d164e9f271cfa0dde49142c5ca0ca291f1e" +dependencies = [ + "matchers", + "nu-ansi-term", + "once_cell", + "regex-automata", + "serde", + "serde_json", + "sharded-slab", + "smallvec", + "thread_local", + "tracing", + "tracing-core", + "tracing-log", + "tracing-serde", +] + [[package]] name = "typenum" version = "1.17.0" @@ -1070,6 +1194,12 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" +[[package]] +name = "valuable" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba73ea9cf16a25df0c8caa16c51acb937d5712a8429db78a3ee29d5dcacd3a65" + [[package]] name = "version_check" version = "0.9.4" diff --git a/crates/pet/Cargo.toml b/crates/pet/Cargo.toml index 9a493b19..375a5f1d 100644 --- a/crates/pet/Cargo.toml +++ b/crates/pet/Cargo.toml @@ -40,6 +40,8 @@ pet-telemetry = { path = "../pet-telemetry" } pet-global-virtualenvs = { path = "../pet-global-virtualenvs" } pet-uv = { path = "../pet-uv" } log = "0.4.21" +tracing = "0.1" +tracing-subscriber = { version = "0.3", features = ["env-filter", "json"] } clap = { version = "4.5.4", features = ["derive", "cargo"] } serde = { version = "1.0.152", features = ["derive"] } serde_json = "1.0.93" diff --git a/crates/pet/src/find.rs b/crates/pet/src/find.rs index 8d2650b0..9361ff0e 100644 --- a/crates/pet/src/find.rs +++ b/crates/pet/src/find.rs @@ -22,6 +22,7 @@ use std::path::PathBuf; use std::sync::Mutex; use std::time::Duration; use std::{sync::Arc, thread}; +use tracing::{info_span, instrument}; use crate::locators::identify_python_environment_using_locators; @@ -40,6 +41,7 @@ pub enum SearchScope { Workspace, } +#[instrument(skip(reporter, configuration, locators, environment), fields(search_scope = ?search_scope))] pub fn find_and_report_envs( reporter: &dyn Reporter, configuration: Configuration, @@ -72,6 +74,7 @@ pub fn find_and_report_envs( // 1. Find using known global locators. s.spawn(|| { // Find in all the finders + let _span = info_span!("locators_phase").entered(); let start = std::time::Instant::now(); if search_global { thread::scope(|s| { @@ -90,6 +93,8 @@ pub fn find_and_report_envs( let locator = locator.clone(); let summary = summary.clone(); s.spawn(move || { + let locator_name = format!("{:?}", locator.get_kind()); + let _span = info_span!("locator_find", locator = %locator_name).entered(); let start = std::time::Instant::now(); trace!("Searching using locator: {:?}", locator.get_kind()); locator.find(reporter); @@ -115,6 +120,7 @@ pub fn find_and_report_envs( }); // Step 2: Search in PATH variable s.spawn(|| { + let _span = info_span!("path_search_phase").entered(); let start = std::time::Instant::now(); if search_global { let global_env_search_paths: Vec = @@ -144,6 +150,7 @@ pub fn find_and_report_envs( let environment_directories_for_step3 = environment_directories.clone(); let summary_for_step3 = summary.clone(); s.spawn(move || { + let _span = info_span!("global_virtualenvs_phase").entered(); let start = std::time::Instant::now(); if search_global { let mut possible_environments = vec![]; @@ -202,6 +209,7 @@ pub fn find_and_report_envs( // that could the discovery. let summary_for_step4 = summary.clone(); s.spawn(move || { + let _span = info_span!("workspace_search_phase").entered(); let start = std::time::Instant::now(); thread::scope(|s| { // Find environments in the workspace folders. @@ -253,6 +261,7 @@ pub fn find_and_report_envs( summary } +#[instrument(skip(reporter, locators, global_env_search_paths, environment_directories), fields(workspace = %workspace_folder.display()))] pub fn find_python_environments_in_workspace_folder_recursive( workspace_folder: &PathBuf, reporter: &dyn Reporter, @@ -391,6 +400,7 @@ fn find_python_environments_in_paths_with_locators( } } +#[instrument(skip(locators, reporter, global_env_search_paths), fields(executable_count = executables.len()))] pub fn identify_python_executables_using_locators( executables: Vec, locators: &Arc>>, diff --git a/crates/pet/src/jsonrpc.rs b/crates/pet/src/jsonrpc.rs index eaf87446..beadc7c1 100644 --- a/crates/pet/src/jsonrpc.rs +++ b/crates/pet/src/jsonrpc.rs @@ -9,6 +9,7 @@ use crate::locators::create_locators; use lazy_static::lazy_static; use log::{error, info, trace}; use pet::resolve::resolve_environment; +use pet::initialize_tracing; use pet_conda::Conda; use pet_conda::CondaLocator; use pet_core::python_environment::PythonEnvironment; @@ -46,6 +47,7 @@ use std::{ thread, time::SystemTime, }; +use tracing::info_span; lazy_static! { /// Used to ensure we can have only one refreh at a time. @@ -63,7 +65,9 @@ pub struct Context { static MISSING_ENVS_REPORTED: AtomicBool = AtomicBool::new(false); pub fn start_jsonrpc_server() { - jsonrpc::initialize_logger(log::LevelFilter::Trace); + // Initialize tracing for performance profiling (controlled by RUST_LOG env var) + // Note: This includes log compatibility, so we don't call jsonrpc::initialize_logger + initialize_tracing(false); // These are globals for the the lifetime of the server. // Hence passed around as Arcs via the context. @@ -172,6 +176,11 @@ pub fn handle_refresh(context: Arc, id: u32, params: Value) { }); // Start in a new thread, we can have multiple requests. thread::spawn(move || { + let _span = info_span!("handle_refresh", + search_kind = ?refresh_options.search_kind, + has_search_paths = refresh_options.search_paths.is_some() + ).entered(); + // Ensure we can have only one refresh at a time. let lock = REFRESH_LOCK.lock().unwrap(); diff --git a/crates/pet/src/lib.rs b/crates/pet/src/lib.rs index 68f9aed4..dbc6a120 100644 --- a/crates/pet/src/lib.rs +++ b/crates/pet/src/lib.rs @@ -17,11 +17,51 @@ use pet_reporter::{self, cache::CacheReporter, stdio}; use resolve::resolve_environment; use std::path::PathBuf; use std::{collections::BTreeMap, env, sync::Arc, time::SystemTime}; +use tracing_subscriber::{fmt, layer::SubscriberExt, util::SubscriberInitExt, EnvFilter}; pub mod find; pub mod locators; pub mod resolve; +/// Initialize tracing subscriber for performance profiling. +/// Set RUST_LOG=info or RUST_LOG=pet=debug for more detailed traces. +/// Set PET_TRACE_FORMAT=json for JSON output (useful for analysis tools). +/// +/// Note: This replaces the env_logger initialization since tracing-subscriber +/// provides a log compatibility layer via tracing-log. +pub fn initialize_tracing(verbose: bool) { + use std::sync::Once; + static INIT: Once = Once::new(); + + INIT.call_once(|| { + let filter = if verbose { + EnvFilter::try_from_default_env().unwrap_or_else(|_| EnvFilter::new("pet=debug")) + } else { + EnvFilter::try_from_default_env().unwrap_or_else(|_| EnvFilter::new("warn")) + }; + + let use_json = env::var("PET_TRACE_FORMAT") + .map(|v| v == "json") + .unwrap_or(false); + + if use_json { + tracing_subscriber::registry() + .with(filter) + .with(fmt::layer().json()) + .init(); + } else { + tracing_subscriber::registry() + .with(filter) + .with( + fmt::layer() + .with_target(true) + .with_timer(fmt::time::uptime()), + ) + .init(); + } + }); +} + #[derive(Debug, Clone)] pub struct FindOptions { pub print_list: bool, @@ -35,11 +75,13 @@ pub struct FindOptions { } pub fn find_and_report_envs_stdio(options: FindOptions) { - stdio::initialize_logger(if options.verbose { - log::LevelFilter::Trace - } else { - log::LevelFilter::Warn - }); + // Initialize tracing for performance profiling (includes log compatibility) + initialize_tracing(options.verbose); + + // Note: We don't call stdio::initialize_logger here anymore since + // tracing-subscriber provides log compatibility via tracing-log crate. + // stdio::initialize_logger would conflict with our tracing subscriber. + let now = SystemTime::now(); let config = create_config(&options); let search_scope = if options.workspace_only { @@ -196,11 +238,12 @@ fn find_envs( } pub fn resolve_report_stdio(executable: PathBuf, verbose: bool, cache_directory: Option) { - stdio::initialize_logger(if verbose { - log::LevelFilter::Trace - } else { - log::LevelFilter::Warn - }); + // Initialize tracing for performance profiling (includes log compatibility) + initialize_tracing(verbose); + + // Note: We don't call stdio::initialize_logger here anymore since + // tracing-subscriber provides log compatibility via tracing-log crate. + let now = SystemTime::now(); if let Some(cache_directory) = cache_directory.clone() { diff --git a/crates/pet/src/locators.rs b/crates/pet/src/locators.rs index ea0ea61c..00448b2a 100644 --- a/crates/pet/src/locators.rs +++ b/crates/pet/src/locators.rs @@ -25,6 +25,7 @@ use pet_virtualenv::VirtualEnv; use pet_virtualenvwrapper::VirtualEnvWrapper; use std::path::PathBuf; use std::sync::Arc; +use tracing::{info_span, instrument}; pub fn create_locators( conda_locator: Arc, @@ -95,6 +96,7 @@ pub fn create_locators( /// Identify the Python environment using the locators. /// search_path : Generally refers to original folder that was being searched when the env was found. +#[instrument(skip(locators, global_env_search_paths), fields(executable = %env.executable.display()))] pub fn identify_python_environment_using_locators( env: &PythonEnv, locators: &[Arc], @@ -105,9 +107,16 @@ pub fn identify_python_environment_using_locators( "Identifying Python environment using locators: {:?}", executable ); - if let Some(env) = locators.iter().find_map(|loc| loc.try_from(env)) { - return Some(env); + + // Try each locator and record which one matches + for loc in locators.iter() { + let locator_name = format!("{:?}", loc.get_kind()); + let _span = info_span!("try_from_locator", locator = %locator_name).entered(); + if let Some(env) = loc.try_from(env) { + return Some(env); + } } + trace!( "Failed to identify Python environment using locators, now trying to resolve: {:?}", executable @@ -116,6 +125,8 @@ pub fn identify_python_environment_using_locators( // Yikes, we have no idea what this is. // Lets get the actual interpreter info and try to figure this out. // We try to get the interpreter info, hoping that the real exe returned might be identifiable. + let _resolve_span = + info_span!("resolve_python_env", executable = %executable.display()).entered(); if let Some(resolved_env) = ResolvedPythonEnv::from(&executable) { let env = resolved_env.to_python_env(); if let Some(env) = locators.iter().find_map(|loc| loc.try_from(&env)) { From 8a7bcfaed4725ae68fc81ad425f3ca839f3e60ba Mon Sep 17 00:00:00 2001 From: Karthik Nadig Date: Mon, 2 Feb 2026 09:07:07 -0800 Subject: [PATCH 3/3] Fix formatting issues in performance report scripts for consistency --- .github/workflows/perf-tests.yml | 54 ++++++++++++++++---------------- crates/pet/src/jsonrpc.rs | 9 +++--- 2 files changed, 32 insertions(+), 31 deletions(-) diff --git a/.github/workflows/perf-tests.yml b/.github/workflows/perf-tests.yml index d2d93b7e..4497c4d9 100644 --- a/.github/workflows/perf-tests.yml +++ b/.github/workflows/perf-tests.yml @@ -124,7 +124,7 @@ jobs: PR_STARTUP=$(jq -r '.server_startup_ms // 0' metrics.json) PR_REFRESH=$(jq -r '.full_refresh_ms // 0' metrics.json) PR_ENVS=$(jq -r '.environments_count // 0' metrics.json) - + # Extract baseline metrics (default to 0 if not available) if [ -f baseline-perf/metrics.json ]; then BASELINE_STARTUP=$(jq -r '.server_startup_ms // 0' baseline-perf/metrics.json) @@ -135,24 +135,24 @@ jobs: BASELINE_REFRESH=0 BASELINE_ENVS=0 fi - + # Calculate diff (positive means slowdown, negative means speedup) STARTUP_DIFF=$(echo "$PR_STARTUP - $BASELINE_STARTUP" | bc) REFRESH_DIFF=$(echo "$PR_REFRESH - $BASELINE_REFRESH" | bc) - + # Calculate percentage change if [ "$BASELINE_STARTUP" != "0" ]; then STARTUP_PCT=$(echo "scale=1; ($STARTUP_DIFF / $BASELINE_STARTUP) * 100" | bc) else STARTUP_PCT="N/A" fi - + if [ "$BASELINE_REFRESH" != "0" ]; then REFRESH_PCT=$(echo "scale=1; ($REFRESH_DIFF / $BASELINE_REFRESH) * 100" | bc) else REFRESH_PCT="N/A" fi - + # Determine delta indicators (for perf, negative is good = faster) if (( $(echo "$REFRESH_DIFF < -100" | bc -l) )); then DELTA_INDICATOR=":rocket:" @@ -165,7 +165,7 @@ jobs: else DELTA_INDICATOR=":heavy_minus_sign:" fi - + # Set outputs echo "pr_startup=$PR_STARTUP" >> $GITHUB_OUTPUT echo "pr_refresh=$PR_REFRESH" >> $GITHUB_OUTPUT @@ -176,7 +176,7 @@ jobs: echo "startup_pct=$STARTUP_PCT" >> $GITHUB_OUTPUT echo "refresh_pct=$REFRESH_PCT" >> $GITHUB_OUTPUT echo "delta_indicator=$DELTA_INDICATOR" >> $GITHUB_OUTPUT - + # Write step summary echo "## Performance Report (Linux)" >> $GITHUB_STEP_SUMMARY echo "" >> $GITHUB_STEP_SUMMARY @@ -196,7 +196,7 @@ jobs: $prStartup = $prMetrics.server_startup_ms $prRefresh = $prMetrics.full_refresh_ms $prEnvs = $prMetrics.environments_count - + # Extract baseline metrics (default to 0 if not available) if (Test-Path "baseline-perf/metrics.json") { $baselineMetrics = Get-Content -Path "baseline-perf/metrics.json" -Raw | ConvertFrom-Json @@ -208,24 +208,24 @@ jobs: $baselineRefresh = 0 $baselineEnvs = 0 } - + # Calculate diff $startupDiff = $prStartup - $baselineStartup $refreshDiff = $prRefresh - $baselineRefresh - + # Calculate percentage change if ($baselineStartup -gt 0) { $startupPct = [math]::Round(($startupDiff / $baselineStartup) * 100, 1) } else { $startupPct = "N/A" } - + if ($baselineRefresh -gt 0) { $refreshPct = [math]::Round(($refreshDiff / $baselineRefresh) * 100, 1) } else { $refreshPct = "N/A" } - + # Determine delta indicator if ($refreshDiff -lt -100) { $deltaIndicator = ":rocket:" @@ -238,7 +238,7 @@ jobs: } else { $deltaIndicator = ":heavy_minus_sign:" } - + # Set outputs echo "pr_startup=$prStartup" >> $env:GITHUB_OUTPUT echo "pr_refresh=$prRefresh" >> $env:GITHUB_OUTPUT @@ -249,7 +249,7 @@ jobs: echo "startup_pct=$startupPct" >> $env:GITHUB_OUTPUT echo "refresh_pct=$refreshPct" >> $env:GITHUB_OUTPUT echo "delta_indicator=$deltaIndicator" >> $env:GITHUB_OUTPUT - + # Write step summary echo "## Performance Report (Windows)" >> $env:GITHUB_STEP_SUMMARY echo "" >> $env:GITHUB_STEP_SUMMARY @@ -268,7 +268,7 @@ jobs: PR_STARTUP=$(jq -r '.server_startup_ms // 0' metrics.json) PR_REFRESH=$(jq -r '.full_refresh_ms // 0' metrics.json) PR_ENVS=$(jq -r '.environments_count // 0' metrics.json) - + # Extract baseline metrics (default to 0 if not available) if [ -f baseline-perf/metrics.json ]; then BASELINE_STARTUP=$(jq -r '.server_startup_ms // 0' baseline-perf/metrics.json) @@ -279,11 +279,11 @@ jobs: BASELINE_REFRESH=0 BASELINE_ENVS=0 fi - + # Calculate diff STARTUP_DIFF=$((PR_STARTUP - BASELINE_STARTUP)) REFRESH_DIFF=$((PR_REFRESH - BASELINE_REFRESH)) - + # Set outputs echo "pr_startup=$PR_STARTUP" >> $GITHUB_OUTPUT echo "pr_refresh=$PR_REFRESH" >> $GITHUB_OUTPUT @@ -291,7 +291,7 @@ jobs: echo "baseline_refresh=$BASELINE_REFRESH" >> $GITHUB_OUTPUT echo "startup_diff=$STARTUP_DIFF" >> $GITHUB_OUTPUT echo "refresh_diff=$REFRESH_DIFF" >> $GITHUB_OUTPUT - + # Write step summary echo "## Performance Report (macOS)" >> $GITHUB_STEP_SUMMARY echo "" >> $GITHUB_STEP_SUMMARY @@ -309,16 +309,16 @@ jobs: header: perf-linux message: | ## Performance Report (Linux) ${{ steps.perf-linux.outputs.delta_indicator }} - + | Metric | PR | Baseline | Delta | Change | |--------|-----|----------|-------|--------| | Server Startup | ${{ steps.perf-linux.outputs.pr_startup }}ms | ${{ steps.perf-linux.outputs.baseline_startup }}ms | ${{ steps.perf-linux.outputs.startup_diff }}ms | ${{ steps.perf-linux.outputs.startup_pct }}% | | Full Refresh | ${{ steps.perf-linux.outputs.pr_refresh }}ms | ${{ steps.perf-linux.outputs.baseline_refresh }}ms | ${{ steps.perf-linux.outputs.refresh_diff }}ms | ${{ steps.perf-linux.outputs.refresh_pct }}% | - + ---
Legend - + - :rocket: Significant speedup (>100ms faster) - :white_check_mark: Faster than baseline - :heavy_minus_sign: No significant change @@ -333,16 +333,16 @@ jobs: header: perf-windows message: | ## Performance Report (Windows) ${{ steps.perf-windows.outputs.delta_indicator }} - + | Metric | PR | Baseline | Delta | Change | |--------|-----|----------|-------|--------| | Server Startup | ${{ steps.perf-windows.outputs.pr_startup }}ms | ${{ steps.perf-windows.outputs.baseline_startup }}ms | ${{ steps.perf-windows.outputs.startup_diff }}ms | ${{ steps.perf-windows.outputs.startup_pct }}% | | Full Refresh | ${{ steps.perf-windows.outputs.pr_refresh }}ms | ${{ steps.perf-windows.outputs.baseline_refresh }}ms | ${{ steps.perf-windows.outputs.refresh_diff }}ms | ${{ steps.perf-windows.outputs.refresh_pct }}% | - + ---
Legend - + - :rocket: Significant speedup (>100ms faster) - :white_check_mark: Faster than baseline - :heavy_minus_sign: No significant change @@ -357,16 +357,16 @@ jobs: header: perf-macos message: | ## Performance Report (macOS) - + | Metric | PR | Baseline | Delta | |--------|-----|----------|-------| | Server Startup | ${{ steps.perf-macos.outputs.pr_startup }}ms | ${{ steps.perf-macos.outputs.baseline_startup }}ms | ${{ steps.perf-macos.outputs.startup_diff }}ms | | Full Refresh | ${{ steps.perf-macos.outputs.pr_refresh }}ms | ${{ steps.perf-macos.outputs.baseline_refresh }}ms | ${{ steps.perf-macos.outputs.refresh_diff }}ms | - + ---
Legend - + - :rocket: Significant speedup (>100ms faster) - :white_check_mark: Faster than baseline - :heavy_minus_sign: No significant change diff --git a/crates/pet/src/jsonrpc.rs b/crates/pet/src/jsonrpc.rs index beadc7c1..5719033b 100644 --- a/crates/pet/src/jsonrpc.rs +++ b/crates/pet/src/jsonrpc.rs @@ -8,8 +8,8 @@ use crate::find::SearchScope; use crate::locators::create_locators; use lazy_static::lazy_static; use log::{error, info, trace}; -use pet::resolve::resolve_environment; use pet::initialize_tracing; +use pet::resolve::resolve_environment; use pet_conda::Conda; use pet_conda::CondaLocator; use pet_core::python_environment::PythonEnvironment; @@ -176,11 +176,12 @@ pub fn handle_refresh(context: Arc, id: u32, params: Value) { }); // Start in a new thread, we can have multiple requests. thread::spawn(move || { - let _span = info_span!("handle_refresh", + let _span = info_span!("handle_refresh", search_kind = ?refresh_options.search_kind, has_search_paths = refresh_options.search_paths.is_some() - ).entered(); - + ) + .entered(); + // Ensure we can have only one refresh at a time. let lock = REFRESH_LOCK.lock().unwrap();