diff --git a/.github/workflows/performance.yml b/.github/workflows/performance.yml new file mode 100644 index 00000000..9ffb9f3e --- /dev/null +++ b/.github/workflows/performance.yml @@ -0,0 +1,43 @@ +name: Performance Tests + +on: + pull_request: + paths: + - 'main.py' + - 'tests/test_performance_*.py' + push: + branches: [main] + +jobs: + benchmark: + runs-on: ubuntu-latest + permissions: + contents: read + steps: + - uses: actions/checkout@v4 + + - name: Install uv + uses: astral-sh/setup-uv@v4 + with: + enable-cache: true + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: '3.13' + + - name: Install dependencies + run: uv sync --all-extras + + - name: Run performance tests + run: | + uv run pytest tests/test_performance_regression.py \ + --benchmark-only \ + --benchmark-autosave \ + -v + + - name: Store benchmark results + uses: actions/upload-artifact@v4 + with: + name: benchmark-results + path: .benchmarks/ diff --git a/pyproject.toml b/pyproject.toml index 4cc94100..484682a8 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -14,4 +14,5 @@ dev = [ "pytest>=7.0.0", "pytest-mock>=3.10.0", "pytest-xdist>=3.0.0", + "pytest-benchmark>=4.0.0", ] diff --git a/tests/test_performance_regression.py b/tests/test_performance_regression.py new file mode 100644 index 00000000..a39e2558 --- /dev/null +++ b/tests/test_performance_regression.py @@ -0,0 +1,60 @@ +""" +Performance regression tests with baseline thresholds. + +These benchmarks guard against unintentional slowdowns on hot paths. +Run with: uv run pytest tests/test_performance_regression.py --benchmark-only +""" + +import sys +import os + +import pytest +import httpx + +sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) +import main + +# Maximum acceptable mean execution time (seconds) for hot-path operations. +_MAX_MEAN_S = 0.001 # 1 ms + + +class TestPerformanceRegression: + """Performance regression tests with baseline thresholds.""" + + def test_validate_hostname_performance(self, benchmark): + """Hostname validation should complete in <1ms on average.""" + # Use a private IP so no real DNS lookup occurs, making the test fast and deterministic + main.validate_hostname.cache_clear() + result = benchmark(main.validate_hostname, "192.168.1.1") + assert result is False # Private IP is rejected + assert benchmark.stats["mean"] < _MAX_MEAN_S + + def test_validate_hostname_cached_performance(self, benchmark): + """Cached hostname validation should be significantly faster than <1ms.""" + # Prime the cache with a known private IP + main.validate_hostname.cache_clear() + main.validate_hostname("10.0.0.1") + # Now benchmark the cached call + result = benchmark(main.validate_hostname, "10.0.0.1") + assert result is False + assert benchmark.stats["mean"] < _MAX_MEAN_S + + def test_rate_limit_parsing_performance(self, benchmark): + """Rate limit header parsing should complete in <1ms on average.""" + headers = { + "X-RateLimit-Limit": "5000", + "X-RateLimit-Remaining": "4999", + "X-RateLimit-Reset": "1640000000", + } + mock_response = httpx.Response(200, headers=headers) + + result = benchmark(main._parse_rate_limit_headers, mock_response) + assert result is None # Function returns None + assert benchmark.stats["mean"] < _MAX_MEAN_S + + def test_rate_limit_parsing_empty_headers_performance(self, benchmark): + """Rate limit parsing with no rate-limit headers should complete in <1ms.""" + mock_response = httpx.Response(200, headers={}) + result = benchmark(main._parse_rate_limit_headers, mock_response) + assert result is None + assert benchmark.stats["mean"] < _MAX_MEAN_S