From dd12621aa1fdde5ec2d3ff00cfe58967b64ff8c2 Mon Sep 17 00:00:00 2001 From: saudzahirr Date: Mon, 9 Mar 2026 12:36:09 +0500 Subject: [PATCH 1/3] Implement license key generation with batch metadata and error handling --- backend/app/core/exceptions.py | 13 + backend/app/schemas/response.py | 3 + backend/app/services/license_key_generator.py | 195 ++++++++++++ backend/tests/core/test_exceptions.py | 20 ++ .../tests/unit/test_license_key_generator.py | 297 ++++++++++++++++++ migrations/06_license_key_updates.sql | 48 +++ .../down/06_license_key_updates_down.sql | 31 ++ 7 files changed, 607 insertions(+) create mode 100644 backend/app/services/license_key_generator.py create mode 100644 backend/tests/unit/test_license_key_generator.py create mode 100644 migrations/06_license_key_updates.sql create mode 100644 migrations/down/06_license_key_updates_down.sql diff --git a/backend/app/core/exceptions.py b/backend/app/core/exceptions.py index ce52e4b..ad898b3 100644 --- a/backend/app/core/exceptions.py +++ b/backend/app/core/exceptions.py @@ -157,3 +157,16 @@ def __init__(self, message: str = "License has expired") -> None: message=message, http_status=status.HTTP_409_CONFLICT, ) + + +class LicenseKeyGenerationError(APIException): + """License key generation failed after exhausting retries.""" + + def __init__( + self, message: str = "Failed to generate a unique license key" + ) -> None: + super().__init__( + error_code=ErrorCode.LICENSE_KEY_GENERATION_ERROR, + message=message, + http_status=status.HTTP_500_INTERNAL_SERVER_ERROR, + ) diff --git a/backend/app/schemas/response.py b/backend/app/schemas/response.py index 6dd9645..aa7cc42 100644 --- a/backend/app/schemas/response.py +++ b/backend/app/schemas/response.py @@ -30,6 +30,9 @@ class ErrorCode(str, Enum): LICENSE_REVOKED = "LICENSE_REVOKED" LICENSE_EXPIRED = "LICENSE_EXPIRED" + # License-key generation errors + LICENSE_KEY_GENERATION_ERROR = "LICENSE_KEY_GENERATION_ERROR" + # Business logic errors BUSINESS_LOGIC_ERROR = "BUSINESS_LOGIC_ERROR" INVALID_STATE = "INVALID_STATE" diff --git a/backend/app/services/license_key_generator.py b/backend/app/services/license_key_generator.py new file mode 100644 index 0000000..3ff950f --- /dev/null +++ b/backend/app/services/license_key_generator.py @@ -0,0 +1,195 @@ +"""License key generator — cryptographically random key generation. + +Generates license keys in the format ``XXXX-XXXX-XXXX-XXXX`` where each +character is drawn from the uppercase alphanumeric alphabet ``A-Z0-9`` +using :mod:`secrets` for cryptographic randomness (~82 bits of entropy +per key). + +Supports optional batch metadata and collision detection with bounded +retries. +""" + +from __future__ import annotations + +import logging +import re +import secrets +import string +from dataclasses import dataclass, field + +from app.core.exceptions import LicenseKeyGenerationError + + +logger = logging.getLogger(__name__) + +ALPHABET: str = string.ascii_uppercase + string.digits +"""Uppercase alphanumeric character set used for key generation.""" + +SEGMENT_LENGTH: int = 4 +"""Number of characters per key segment.""" + +NUM_SEGMENTS: int = 4 +"""Number of segments in a license key.""" + +MAX_RETRIES: int = 10 +"""Maximum collision-retry attempts before raising an error.""" + +LICENSE_KEY_PATTERN: re.Pattern[str] = re.compile( + r"^[A-Z0-9]{4}-[A-Z0-9]{4}-[A-Z0-9]{4}-[A-Z0-9]{4}$" +) +"""Compiled regex that every generated key must match.""" + + +@dataclass(frozen=True, slots=True) +class BatchMetadata: + """Optional metadata attached to a batch of generated license keys. + + Attributes: + batch_id: Identifier grouping keys that belong to the same + issuance batch. + campaign: Marketing or distribution campaign associated with + the batch. + issued_by: Identifier (user-id, email, or service name) of the + entity that triggered key generation. + """ + + batch_id: str | None = None + campaign: str | None = None + issued_by: str | None = None + + +@dataclass(slots=True) +class GeneratedLicenseKey: + """Container for a generated license key and its batch metadata. + + Attributes: + key: The formatted license key string + (e.g. ``"A1B2-C3D4-E5F6-G7H8"``). + metadata: Optional :class:`BatchMetadata` associated with the + key. + """ + + key: str + metadata: BatchMetadata = field(default_factory=BatchMetadata) + + +def _generate_segment() -> str: + """Generate a single random segment of :data:`SEGMENT_LENGTH` characters. + + Returns: + str: A string of ``SEGMENT_LENGTH`` uppercase-alphanumeric + characters chosen via :func:`secrets.choice`. + """ + return "".join(secrets.choice(ALPHABET) for _ in range(SEGMENT_LENGTH)) + + +def _generate_raw_key() -> str: + """Generate a raw license key with hyphen-separated segments. + + Returns: + str: A key in ``XXXX-XXXX-XXXX-XXXX`` format. + """ + return "-".join(_generate_segment() for _ in range(NUM_SEGMENTS)) + + +def generate_license_key( + existing_keys: set[str] | None = None, metadata: BatchMetadata | None = None +) -> GeneratedLicenseKey: + """Generate a single unique license key with collision detection. + + Attempts up to :data:`MAX_RETRIES` times to produce a key that does + not collide with *existing_keys*. Each generated key is validated + against :data:`LICENSE_KEY_PATTERN` before the collision check. + + Args: + existing_keys: A set of previously issued keys used for + collision detection. Pass ``None`` or an empty set when + uniqueness checking is not required. + metadata: Optional :class:`BatchMetadata` to attach to the + returned result. + + Returns: + GeneratedLicenseKey: The generated key together with its + metadata. + + Raises: + LicenseKeyGenerationError: If a unique key cannot be produced + within :data:`MAX_RETRIES` attempts. + """ + if existing_keys is None: + existing_keys = set() + + effective_metadata = metadata or BatchMetadata() + + for attempt in range(1, MAX_RETRIES + 1): + key = _generate_raw_key() + + if not LICENSE_KEY_PATTERN.match(key): + logger.warning( + "Generated key failed format validation on attempt %d", attempt + ) + continue + + if key not in existing_keys: + logger.debug( + "License key generated on attempt %d (batch_id=%s)", + attempt, + effective_metadata.batch_id, + ) + return GeneratedLicenseKey(key=key, metadata=effective_metadata) + + logger.warning( + "Collision detected on attempt %d/%d", attempt, MAX_RETRIES + ) + + raise LicenseKeyGenerationError( + f"Could not generate a unique license key after {MAX_RETRIES} retries" + ) + + +def generate_license_keys_batch( + count: int, + existing_keys: set[str] | None = None, + metadata: BatchMetadata | None = None, +) -> list[GeneratedLicenseKey]: + """Generate a batch of unique license keys. + + Each key in the batch is guaranteed to be unique against both the + provided *existing_keys* and all previously generated keys within + the same batch invocation. + + Args: + count: Number of keys to generate. Must be >= 1. + existing_keys: A set of previously issued keys used for + collision detection. Pass ``None`` or an empty set when + external uniqueness checking is not required. + metadata: Optional :class:`BatchMetadata` to attach to every + key in the batch. + + Returns: + list[GeneratedLicenseKey]: A list of *count* unique keys, each + carrying the supplied metadata. + + Raises: + ValueError: If *count* is less than 1. + """ + if count < 1: + raise ValueError("count must be >= 1") + + if existing_keys is None: + existing_keys = set() + + combined_keys: set[str] = set(existing_keys) + results: list[GeneratedLicenseKey] = [] + + for i in range(count): + generated = generate_license_key( + existing_keys=combined_keys, metadata=metadata + ) + combined_keys.add(generated.key) + results.append(generated) + logger.debug( + "Batch key %d/%d generated: %s", i + 1, count, generated.key + ) + + return results diff --git a/backend/tests/core/test_exceptions.py b/backend/tests/core/test_exceptions.py index 732e4d9..53b0161 100644 --- a/backend/tests/core/test_exceptions.py +++ b/backend/tests/core/test_exceptions.py @@ -17,6 +17,7 @@ BusinessLogicException, ConflictException, LicenseExpiredException, + LicenseKeyGenerationError, LicenseNotFoundException, LicenseRevokedException, NotFoundException, @@ -90,6 +91,11 @@ ErrorCode.LICENSE_EXPIRED, id="license_expired_exception", ), + pytest.param( + LicenseKeyGenerationError, + ErrorCode.LICENSE_KEY_GENERATION_ERROR, + id="license_key_generation_error", + ), ], ) def test_exception_has_correct_error_code(exception_class, expected_code): @@ -165,6 +171,11 @@ def test_exception_has_correct_error_code(exception_class, expected_code): status.HTTP_409_CONFLICT, id="license_expired_exception", ), + pytest.param( + LicenseKeyGenerationError, + status.HTTP_500_INTERNAL_SERVER_ERROR, + id="license_key_generation_error", + ), ], ) def test_exception_has_correct_http_status(exception_class, expected_status): @@ -304,6 +315,9 @@ def test_exception_stores_custom_details(exception_class, message, details): ), pytest.param(LicenseRevokedException, id="license_revoked_exception"), pytest.param(LicenseExpiredException, id="license_expired_exception"), + pytest.param( + LicenseKeyGenerationError, id="license_key_generation_error" + ), ], ) def test_simple_exception_accepts_custom_message(exception_class): @@ -356,6 +370,9 @@ def test_parameterized_exception_accepts_custom_message(exception_class): ), pytest.param(LicenseRevokedException, id="license_revoked_exception"), pytest.param(LicenseExpiredException, id="license_expired_exception"), + pytest.param( + LicenseKeyGenerationError, id="license_key_generation_error" + ), ], ) def test_exception_default_message_has_reasonable_length(exception_class): @@ -401,6 +418,9 @@ def test_api_exception_is_subclass_of_exception(): ), pytest.param(LicenseRevokedException, id="license_revoked_exception"), pytest.param(LicenseExpiredException, id="license_expired_exception"), + pytest.param( + LicenseKeyGenerationError, id="license_key_generation_error" + ), ], ) def test_concrete_exception_is_subclass_of_api_exception(exception_class): diff --git a/backend/tests/unit/test_license_key_generator.py b/backend/tests/unit/test_license_key_generator.py new file mode 100644 index 0000000..eb48afa --- /dev/null +++ b/backend/tests/unit/test_license_key_generator.py @@ -0,0 +1,297 @@ +"""Unit tests for the license key generator module. + +Tests cover: +- Single key format validation against the required pattern. +- Cryptographic randomness (keys are non-deterministic). +- Batch generation with intra-batch uniqueness guarantees. +- Collision detection and bounded retry behaviour. +- LicenseKeyGenerationError raised after retry exhaustion. +- BatchMetadata attachment and propagation. +- Edge cases: count validation, empty/None existing-key sets. +""" + +from __future__ import annotations + +from unittest.mock import patch + +import pytest + +from app.core.exceptions import LicenseKeyGenerationError +from app.schemas.response import ErrorCode +from app.services.license_key_generator import ( + ALPHABET, + LICENSE_KEY_PATTERN, + MAX_RETRIES, + BatchMetadata, + GeneratedLicenseKey, + generate_license_key, + generate_license_keys_batch, +) + + +@pytest.mark.unit +class TestLicenseKeyFormat: + """Verify generated keys match the ``XXXX-XXXX-XXXX-XXXX`` pattern.""" + + def test_single_key_matches_pattern(self): + result = generate_license_key() + assert LICENSE_KEY_PATTERN.match(result.key), ( + f"Key {result.key!r} does not match the required pattern" + ) + + def test_key_length_is_19_characters(self): + result = generate_license_key() + assert len(result.key) == 19 + + def test_key_has_four_segments(self): + result = generate_license_key() + segments = result.key.split("-") + assert len(segments) == 4 + + def test_each_segment_has_four_characters(self): + result = generate_license_key() + for segment in result.key.split("-"): + assert len(segment) == 4 + + def test_key_uses_only_uppercase_alphanumeric(self): + result = generate_license_key() + raw = result.key.replace("-", "") + assert all(c in ALPHABET for c in raw) + + def test_multiple_keys_all_match_pattern(self): + for _ in range(50): + result = generate_license_key() + assert LICENSE_KEY_PATTERN.match(result.key) + + +@pytest.mark.unit +class TestKeyUniqueness: + """Verify keys are unique across invocations.""" + + def test_generated_keys_are_unique(self): + keys = {generate_license_key().key for _ in range(100)} + assert len(keys) == 100 + + def test_collision_with_existing_keys_triggers_retry(self): + first = generate_license_key() + existing = {first.key} + second = generate_license_key(existing_keys=existing) + assert second.key != first.key + assert second.key not in existing + + +@pytest.mark.unit +class TestCollisionRetryExhaustion: + """Verify LicenseKeyGenerationError is raised when retries are exhausted.""" + + def test_raises_after_max_retries(self): + colliding_key = "AAAA-BBBB-CCCC-DDDD" + existing = {colliding_key} + + with patch( + "app.services.license_key_generator._generate_raw_key", + return_value=colliding_key, + ): + with pytest.raises(LicenseKeyGenerationError) as exc_info: + generate_license_key(existing_keys=existing) + + assert str(MAX_RETRIES) in str(exc_info.value) + + def test_error_has_correct_error_code(self): + colliding_key = "XXXX-YYYY-ZZZZ-1234" + existing = {colliding_key} + + with patch( + "app.services.license_key_generator._generate_raw_key", + return_value=colliding_key, + ): + with pytest.raises(LicenseKeyGenerationError) as exc_info: + generate_license_key(existing_keys=existing) + + assert ( + exc_info.value.error_code + == ErrorCode.LICENSE_KEY_GENERATION_ERROR + ) + + def test_error_has_500_status(self): + colliding_key = "AAAA-BBBB-CCCC-DDDD" + existing = {colliding_key} + + with patch( + "app.services.license_key_generator._generate_raw_key", + return_value=colliding_key, + ): + with pytest.raises(LicenseKeyGenerationError) as exc_info: + generate_license_key(existing_keys=existing) + + assert exc_info.value.http_status == 500 + + def test_succeeds_on_last_retry(self): + colliding_key = "AAAA-BBBB-CCCC-DDDD" + unique_key = "ZZZZ-9999-YYYY-8888" + existing = {colliding_key} + + side_effects = [colliding_key] * (MAX_RETRIES - 1) + [unique_key] + + with patch( + "app.services.license_key_generator._generate_raw_key", + side_effect=side_effects, + ): + result = generate_license_key(existing_keys=existing) + assert result.key == unique_key + + +@pytest.mark.unit +class TestBatchMetadata: + """Verify batch metadata is correctly attached to generated keys.""" + + def test_default_metadata_is_empty(self): + result = generate_license_key() + assert result.metadata.batch_id is None + assert result.metadata.campaign is None + assert result.metadata.issued_by is None + + def test_metadata_attached_to_single_key(self): + meta = BatchMetadata( + batch_id="batch-001", + campaign="summer-sale", + issued_by="admin@example.com", + ) + result = generate_license_key(metadata=meta) + + assert result.metadata.batch_id == "batch-001" + assert result.metadata.campaign == "summer-sale" + assert result.metadata.issued_by == "admin@example.com" + + def test_metadata_propagated_in_batch(self): + meta = BatchMetadata( + batch_id="batch-002", campaign="launch", issued_by="system" + ) + results = generate_license_keys_batch(count=5, metadata=meta) + + for result in results: + assert result.metadata.batch_id == "batch-002" + assert result.metadata.campaign == "launch" + assert result.metadata.issued_by == "system" + + def test_partial_metadata(self): + meta = BatchMetadata(batch_id="batch-003") + result = generate_license_key(metadata=meta) + + assert result.metadata.batch_id == "batch-003" + assert result.metadata.campaign is None + assert result.metadata.issued_by is None + + def test_metadata_is_frozen(self): + meta = BatchMetadata(batch_id="batch-004") + with pytest.raises(AttributeError): + meta.batch_id = "modified" # type: ignore[misc] + + +@pytest.mark.unit +class TestBatchGeneration: + """Verify batch generation produces correct counts and unique keys.""" + + def test_batch_returns_correct_count(self): + results = generate_license_keys_batch(count=10) + assert len(results) == 10 + + def test_batch_keys_are_unique(self): + results = generate_license_keys_batch(count=50) + keys = [r.key for r in results] + assert len(set(keys)) == 50 + + def test_batch_keys_all_match_pattern(self): + results = generate_license_keys_batch(count=20) + for result in results: + assert LICENSE_KEY_PATTERN.match(result.key) + + def test_batch_excludes_existing_keys(self): + existing = {"AAAA-BBBB-CCCC-DDDD", "EEEE-FFFF-0000-1111"} + results = generate_license_keys_batch(count=5, existing_keys=existing) + + for result in results: + assert result.key not in existing + + def test_batch_count_zero_raises_value_error(self): + with pytest.raises(ValueError, match="count must be >= 1"): + generate_license_keys_batch(count=0) + + def test_batch_count_negative_raises_value_error(self): + with pytest.raises(ValueError, match="count must be >= 1"): + generate_license_keys_batch(count=-1) + + def test_batch_single_item(self): + results = generate_license_keys_batch(count=1) + assert len(results) == 1 + assert LICENSE_KEY_PATTERN.match(results[0].key) + + +@pytest.mark.unit +class TestExistingKeysHandling: + """Verify edge cases around the existing_keys parameter.""" + + def test_none_existing_keys_accepted(self): + result = generate_license_key(existing_keys=None) + assert LICENSE_KEY_PATTERN.match(result.key) + + def test_empty_set_existing_keys_accepted(self): + result = generate_license_key(existing_keys=set()) + assert LICENSE_KEY_PATTERN.match(result.key) + + def test_batch_with_none_existing_keys(self): + results = generate_license_keys_batch(count=3, existing_keys=None) + assert len(results) == 3 + + +@pytest.mark.unit +class TestGeneratedLicenseKeyDataclass: + """Verify the GeneratedLicenseKey container behaves correctly.""" + + def test_key_is_accessible(self): + result = generate_license_key() + assert isinstance(result.key, str) + + def test_metadata_is_accessible(self): + result = generate_license_key() + assert isinstance(result.metadata, BatchMetadata) + + def test_return_type(self): + result = generate_license_key() + assert isinstance(result, GeneratedLicenseKey) + + +@pytest.mark.unit +class TestLicenseKeyPattern: + """Verify the LICENSE_KEY_PATTERN regex itself.""" + + @pytest.mark.parametrize( + "key", + [ + pytest.param("ABCD-EFGH-1234-5678", id="valid_mixed"), + pytest.param("AAAA-BBBB-CCCC-DDDD", id="valid_all_alpha"), + pytest.param("1111-2222-3333-4444", id="valid_all_numeric"), + pytest.param("A1B2-C3D4-E5F6-G7H8", id="valid_alternating"), + ], + ) + def test_valid_keys_match(self, key): + assert LICENSE_KEY_PATTERN.match(key) + + @pytest.mark.parametrize( + "key", + [ + pytest.param("abcd-efgh-1234-5678", id="lowercase_rejected"), + pytest.param("ABCD-EFGH-1234", id="three_segments_rejected"), + pytest.param( + "ABCDE-FGHI-1234-5678", id="five_char_segment_rejected" + ), + pytest.param("ABCD EFGH 1234 5678", id="spaces_rejected"), + pytest.param("ABCD-EFGH-1234-567!", id="special_char_rejected"), + pytest.param("", id="empty_string_rejected"), + pytest.param( + "ABCD-EFGH-1234-5678-9ABC", id="five_segments_rejected" + ), + ], + ) + def test_invalid_keys_rejected(self, key): + assert LICENSE_KEY_PATTERN.match(key) is None diff --git a/migrations/06_license_key_updates.sql b/migrations/06_license_key_updates.sql new file mode 100644 index 0000000..6248431 --- /dev/null +++ b/migrations/06_license_key_updates.sql @@ -0,0 +1,48 @@ +-- ============================================================ +-- Migration : License Key Updates — Batch Metadata & Key Width +-- Platform : LaaS (License as a Service) +-- Database : PostgreSQL 18 +-- Run order : 06 — after 05_rls.sql +-- Depends on: 03_app.sql +-- ============================================================ +-- +-- PURPOSE +-- 1. Widens node_locked_license_data.license_key from TEXT to +-- VARCHAR(24) to enforce the 19-char format upper bound. +-- 2. Adds optional batch metadata columns (batch_id, campaign, +-- issued_by) to the licenses table. +-- +-- IDEMPOTENCY +-- Safe to re-run. ALTER COLUMN TYPE is a no-op when already +-- the target type; ADD COLUMN uses IF NOT EXISTS. +-- +-- TRANSACTION +-- Wrapped in BEGIN / COMMIT — all-or-nothing. +-- ============================================================ + +BEGIN; + +SET LOCAL ROLE app_owner; + +-- ------------------------------------------------------- +-- 1. Widen license_key to VARCHAR(24) +-- ------------------------------------------------------- +ALTER TABLE app."node_locked_license_data" + ALTER COLUMN "license_key" TYPE VARCHAR(24); + +COMMENT ON COLUMN app."node_locked_license_data"."license_key" + IS 'Cryptographically random activation key (format XXXX-XXXX-XXXX-XXXX, 19 chars). VARCHAR(24) allows headroom for future format changes.'; + +-- ------------------------------------------------------- +-- 2. Add batch metadata columns to licenses +-- ------------------------------------------------------- +ALTER TABLE app."licenses" + ADD COLUMN IF NOT EXISTS "batch_id" TEXT, + ADD COLUMN IF NOT EXISTS "campaign" TEXT, + ADD COLUMN IF NOT EXISTS "issued_by" TEXT; + +COMMENT ON COLUMN app."licenses"."batch_id" IS 'Optional identifier grouping licenses that belong to the same issuance batch.'; +COMMENT ON COLUMN app."licenses"."campaign" IS 'Optional marketing or distribution campaign associated with the license.'; +COMMENT ON COLUMN app."licenses"."issued_by" IS 'Optional identifier (user-id, email, or service name) of the entity that triggered license creation.'; + +COMMIT; diff --git a/migrations/down/06_license_key_updates_down.sql b/migrations/down/06_license_key_updates_down.sql new file mode 100644 index 0000000..0a02523 --- /dev/null +++ b/migrations/down/06_license_key_updates_down.sql @@ -0,0 +1,31 @@ +-- ============================================================ +-- Downgrade : License Key Updates +-- Platform : LaaS (License as a Service) +-- Database : PostgreSQL 18 +-- Run order : 06 — downgrade counterpart of 06_license_key_updates.sql +-- ============================================================ +-- +-- PURPOSE +-- Reverts the changes from 06_license_key_updates.sql: +-- 1. Restores license_key back to TEXT. +-- 2. Drops batch metadata columns from licenses. +-- +-- IDEMPOTENCY +-- Safe to re-run. DROP COLUMN uses IF EXISTS. +-- ============================================================ + +BEGIN; + +SET LOCAL ROLE app_owner; + +-- Restore license_key to TEXT +ALTER TABLE app."node_locked_license_data" + ALTER COLUMN "license_key" TYPE TEXT; + +-- Drop batch metadata columns +ALTER TABLE app."licenses" + DROP COLUMN IF EXISTS "batch_id", + DROP COLUMN IF EXISTS "campaign", + DROP COLUMN IF EXISTS "issued_by"; + +COMMIT; From 360adef95c2b2f3769da2ce9431127689cc22e89 Mon Sep 17 00:00:00 2001 From: saudzahirr Date: Mon, 9 Mar 2026 12:49:06 +0500 Subject: [PATCH 2/3] Add unit test for invalid license key format triggering retry --- backend/tests/unit/test_license_key_generator.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/backend/tests/unit/test_license_key_generator.py b/backend/tests/unit/test_license_key_generator.py index eb48afa..d2a26ef 100644 --- a/backend/tests/unit/test_license_key_generator.py +++ b/backend/tests/unit/test_license_key_generator.py @@ -140,6 +140,17 @@ def test_succeeds_on_last_retry(self): result = generate_license_key(existing_keys=existing) assert result.key == unique_key + def test_invalid_format_triggers_retry(self): + invalid_key = "aaaa-bbbb-cccc-dddd" + valid_key = "AAAA-BBBB-CCCC-DDDD" + + with patch( + "app.services.license_key_generator._generate_raw_key", + side_effect=[invalid_key, valid_key], + ): + result = generate_license_key() + assert result.key == valid_key + @pytest.mark.unit class TestBatchMetadata: From b16931e716ae67aae4d1d0ee397cd683ccb1da4b Mon Sep 17 00:00:00 2001 From: saudzahirr Date: Mon, 9 Mar 2026 12:53:58 +0500 Subject: [PATCH 3/3] Refactor Codacy configuration and enhance license key generation logging - add preflight check for license key length in migration scripts - fix migration tests - update backend tests and add docstrings - update ruff configuration - dependency cleanup and add missing deps --- .github/workflows/code_test.yml | 10 +- .github/workflows/sonarcloud.yml | 10 +- .gitignore | 3 + backend/app/api/routes/auth.py | 10 +- backend/app/api/routes/login.py | 4 - backend/app/core/ed25519.py | 30 + backend/app/core/exception_handlers.py | 10 +- backend/app/core/security.py | 22 +- backend/app/crud/vendor.py | 34 +- backend/app/domain/activation.py | 60 ++ backend/app/domain/fingerprint.py | 28 + backend/app/domain/license.py | 77 +++ backend/app/internal/.gitkeep | 0 backend/app/internal/base32_crockford.py | 177 ++++++ backend/app/main.py | 2 +- backend/app/pre_start.py | 4 +- backend/app/services/license_gen.py | 26 + backend/app/services/license_key_generator.py | 195 ------- backend/pyproject.toml | 73 ++- backend/tests/__init__.py | 11 - backend/tests/api/routes/test_auth.py | 541 ++++++++++++++++++ backend/tests/api/routes/test_health.py | 38 +- backend/tests/api/test_deps.py | 504 ++++++++++++++++ backend/tests/api/test_main.py | 29 + backend/tests/api/test_middlewares.py | 107 ++++ backend/tests/conftest.py | 379 +++++++++++- backend/tests/core/test_config.py | 73 +++ backend/tests/core/test_ed25519.py | 136 +++++ backend/tests/core/test_exception_handlers.py | 378 +++++++++--- backend/tests/core/test_exceptions.py | 323 ++++++++--- backend/tests/core/test_security.py | 125 ++++ backend/tests/crud/test_vendor.py | 165 ++++++ backend/tests/domain/test_activation.py | 144 +++++ backend/tests/domain/test_fingerprint.py | 109 ++++ backend/tests/domain/test_license.py | 167 ++++++ backend/tests/integration/test_auth.py | 339 ----------- .../tests/internal/test_base32_crockford.py | 341 +++++++++++ backend/tests/schemas/test_auth.py | 205 +++++++ backend/tests/schemas/test_response.py | 247 +++++--- .../{integration => services}/__init__.py | 0 backend/tests/services/test_auth.py | 419 ++++++++++++++ backend/tests/services/test_license_gen.py | 127 ++++ backend/tests/test_main.py | 228 ++++++++ backend/tests/test_pre_start.py | 219 +++++++ backend/tests/unit/__init__.py | 0 backend/tests/unit/test_auth.py | 307 ---------- .../tests/unit/test_license_key_generator.py | 308 ---------- migrations/03_app.sql | 8 +- migrations/04_audit.sql | 2 +- migrations/06_license_key_updates.sql | 48 -- migrations/07_audit_triggers.sql | 12 +- .../down/06_license_key_updates_down.sql | 31 - migrations/tests/helpers.py | 8 +- migrations/tests/test_constraints.py | 2 +- migrations/tests/test_indexes.py | 3 +- migrations/tests/test_partitioning.py | 2 +- migrations/tests/test_privileges.py | 4 +- migrations/tests/test_rls_isolation.py | 4 +- migrations/tests/test_rls_structure.py | 6 +- migrations/tests/test_seed_data.py | 4 +- uv.lock | 156 ++++- 61 files changed, 5401 insertions(+), 1633 deletions(-) delete mode 100644 backend/app/api/routes/login.py create mode 100644 backend/app/core/ed25519.py create mode 100644 backend/app/domain/activation.py create mode 100644 backend/app/domain/fingerprint.py create mode 100644 backend/app/domain/license.py delete mode 100644 backend/app/internal/.gitkeep create mode 100644 backend/app/internal/base32_crockford.py create mode 100644 backend/app/services/license_gen.py delete mode 100644 backend/app/services/license_key_generator.py create mode 100644 backend/tests/api/routes/test_auth.py create mode 100644 backend/tests/api/test_deps.py create mode 100644 backend/tests/api/test_main.py create mode 100644 backend/tests/api/test_middlewares.py create mode 100644 backend/tests/core/test_config.py create mode 100644 backend/tests/core/test_ed25519.py create mode 100644 backend/tests/core/test_security.py create mode 100644 backend/tests/crud/test_vendor.py create mode 100644 backend/tests/domain/test_activation.py create mode 100644 backend/tests/domain/test_fingerprint.py create mode 100644 backend/tests/domain/test_license.py delete mode 100644 backend/tests/integration/test_auth.py create mode 100644 backend/tests/internal/test_base32_crockford.py create mode 100644 backend/tests/schemas/test_auth.py rename backend/tests/{integration => services}/__init__.py (100%) create mode 100644 backend/tests/services/test_auth.py create mode 100644 backend/tests/services/test_license_gen.py create mode 100644 backend/tests/test_main.py create mode 100644 backend/tests/test_pre_start.py delete mode 100644 backend/tests/unit/__init__.py delete mode 100644 backend/tests/unit/test_auth.py delete mode 100644 backend/tests/unit/test_license_key_generator.py delete mode 100644 migrations/06_license_key_updates.sql delete mode 100644 migrations/down/06_license_key_updates_down.sql diff --git a/.github/workflows/code_test.yml b/.github/workflows/code_test.yml index dc684cb..80c8177 100644 --- a/.github/workflows/code_test.yml +++ b/.github/workflows/code_test.yml @@ -2,12 +2,12 @@ name: Tests on: push: - branches: [ main ] + branches: [main] pull_request: - branches: [ main ] + branches: [main] schedule: - - cron: "0 0 * * 0" # Runs every Sunday at midnight UTC + - cron: "0 0 * * 0" # Runs every Sunday at midnight UTC workflow_dispatch: @@ -34,6 +34,8 @@ jobs: test_backend: needs: fetch-python-versions runs-on: ubuntu-latest + env: + UV_NO_DEV: true permissions: id-token: write @@ -52,7 +54,7 @@ jobs: python-version: ${{ matrix.python-version }} - name: Install dependencies - run: uv sync --group dev + run: uv sync --group test - name: Unit tests working-directory: backend diff --git a/.github/workflows/sonarcloud.yml b/.github/workflows/sonarcloud.yml index a31328b..a114e1b 100644 --- a/.github/workflows/sonarcloud.yml +++ b/.github/workflows/sonarcloud.yml @@ -4,23 +4,25 @@ permissions: contents: read on: push: - branches: [ main ] + branches: [main] pull_request: - branches: [ main ] + branches: [main] jobs: sonarqube: name: SonarQube runs-on: ubuntu-latest + env: + UV_NO_DEV: true steps: - uses: actions/checkout@v6 with: - fetch-depth: 0 # Shallow clones should be disabled for a better relevancy of analysis + fetch-depth: 0 # Shallow clones should be disabled for a better relevancy of analysis - uses: astral-sh/setup-uv@v7 - name: Install dependencies - run: uv sync --group dev + run: uv sync --group test - name: Run tests and generate coverage working-directory: backend diff --git a/.gitignore b/.gitignore index 541f87b..0e797f8 100644 --- a/.gitignore +++ b/.gitignore @@ -16,3 +16,6 @@ dist/ *.log coverage.xml .coverage.* +.fastembed_cache +.database +.amdb diff --git a/backend/app/api/routes/auth.py b/backend/app/api/routes/auth.py index 3aa1000..19cfbbc 100644 --- a/backend/app/api/routes/auth.py +++ b/backend/app/api/routes/auth.py @@ -17,11 +17,7 @@ router = APIRouter() -@router.post( - "/signup", - status_code=status.HTTP_201_CREATED, - response_model=SuccessResponse[SignupResponse], -) +@router.post("/signup", status_code=status.HTTP_201_CREATED) def signup( body: SignupRequest, cursor: CursorDep, settings: SettingsDep ) -> SuccessResponse[SignupResponse]: @@ -40,7 +36,7 @@ def signup( return SuccessResponse(data=result) -@router.post("/login", response_model=SuccessResponse[TokenPair]) +@router.post("/login") def login( body: LoginRequest, cursor: CursorDep, settings: SettingsDep ) -> SuccessResponse[TokenPair]: @@ -59,7 +55,7 @@ def login( return SuccessResponse(data=result) -@router.post("/refresh", response_model=SuccessResponse[TokenPair]) +@router.post("/refresh") def refresh( body: RefreshRequest, cursor: CursorDep, settings: SettingsDep ) -> SuccessResponse[TokenPair]: diff --git a/backend/app/api/routes/login.py b/backend/app/api/routes/login.py deleted file mode 100644 index e6f2f82..0000000 --- a/backend/app/api/routes/login.py +++ /dev/null @@ -1,4 +0,0 @@ -from fastapi import APIRouter - - -router = APIRouter() diff --git a/backend/app/core/ed25519.py b/backend/app/core/ed25519.py new file mode 100644 index 0000000..39bbdec --- /dev/null +++ b/backend/app/core/ed25519.py @@ -0,0 +1,30 @@ +from __future__ import annotations + +from cryptography.hazmat.primitives.asymmetric.ed25519 import ( + Ed25519PrivateKey, + Ed25519PublicKey, +) +from cryptography.hazmat.primitives.serialization import ( + load_pem_private_key, + load_pem_public_key, +) +from cryptography.utils import Buffer + + +__all__ = ["load_private_ed25519_key", "load_public_ed25519_key"] + + +def load_private_ed25519_key( + pem: Buffer, password: bytes | None = None +) -> Ed25519PrivateKey: + key = load_pem_private_key(pem, password=password) + if not isinstance(key, Ed25519PrivateKey): + raise TypeError("Provided key is not an Ed25519 private key") + return key + + +def load_public_ed25519_key(pem: bytes) -> Ed25519PublicKey: + key = load_pem_public_key(pem) + if not isinstance(key, Ed25519PublicKey): + raise TypeError("Provided key is not an Ed25519 public key") + return key diff --git a/backend/app/core/exception_handlers.py b/backend/app/core/exception_handlers.py index 4fe854a..892e6a2 100644 --- a/backend/app/core/exception_handlers.py +++ b/backend/app/core/exception_handlers.py @@ -38,7 +38,7 @@ def api_exception_handler(request: Request, exc: APIException) -> JSONResponse: code=exc.error_code, message=exc.message, http_status=exc.http_status, - details=_build_error_details(exc.details), + details=build_error_details(exc.details), request_id=request_id, ) ) @@ -64,7 +64,7 @@ def validation_exception_handler( details = [] for error in exc.errors(): field_path = ".".join(str(loc) for loc in error["loc"][1:]) - field = field_path if field_path else None + field = field_path or None details.append({"field": field, "message": error["msg"]}) logger.warning( @@ -84,7 +84,7 @@ def validation_exception_handler( code=ErrorCode.VALIDATION_FAILED, message="Validation error", http_status=http_422_unprocessable_content, - details=_build_error_details(details), + details=build_error_details(details), request_id=request_id, ) ) @@ -105,7 +105,7 @@ def general_exception_handler(request: Request, exc: Exception) -> JSONResponse: request_id = getattr(request.state, "request_id", str(uuid.uuid4())) # Log the full traceback server-side - logger.exception( + logger.exception( # noqa: LOG004 - this will be used by the exceptions "Unexpected error: %s", exc, extra={"request_id": request_id} ) @@ -127,7 +127,7 @@ def general_exception_handler(request: Request, exc: Exception) -> JSONResponse: ) -def _build_error_details( +def build_error_details( details: list[dict | ErrorDetail] | dict | ErrorDetail | None, ) -> list[ErrorDetail]: """ diff --git a/backend/app/core/security.py b/backend/app/core/security.py index 48d52d7..5caed1b 100644 --- a/backend/app/core/security.py +++ b/backend/app/core/security.py @@ -1,24 +1,24 @@ from datetime import datetime, timedelta, timezone from typing import Any +from uuid import UUID import jwt from pwdlib import PasswordHash -from pwdlib.hashers.argon2 import Argon2Hasher -from pwdlib.hashers.bcrypt import BcryptHasher +from pwdlib.hashers import argon2, bcrypt from app.core.config import Settings # BcryptHasher is listed first so new passwords are hashed with bcrypt. # Argon2Hasher is kept for verification of legacy hashes. -password_hash = PasswordHash((BcryptHasher(), Argon2Hasher())) +PASSWORD_HASH = PasswordHash((bcrypt.BcryptHasher(), argon2.Argon2Hasher())) -ALGORITHM = "HS256" +JWT_ALGORITHM: str = "HS256" def create_access_token( - vendor_id: str, + vendor_id: str | UUID, settings: Settings, *, expires_delta: timedelta | None = None, @@ -36,11 +36,11 @@ def create_access_token( "exp": expire, "token_type": "access", } - return jwt.encode(to_encode, settings.SECRET_KEY, algorithm=ALGORITHM) + return jwt.encode(to_encode, settings.SECRET_KEY, algorithm=JWT_ALGORITHM) def create_refresh_token( - vendor_id: str, + vendor_id: str | UUID, settings: Settings, *, expires_delta: timedelta | None = None, @@ -58,7 +58,7 @@ def create_refresh_token( "exp": expire, "token_type": "refresh", } - return jwt.encode(to_encode, settings.SECRET_KEY, algorithm=ALGORITHM) + return jwt.encode(to_encode, settings.SECRET_KEY, algorithm=JWT_ALGORITHM) def decode_token(token: str, settings: Settings) -> dict[str, Any]: @@ -67,14 +67,14 @@ def decode_token(token: str, settings: Settings) -> dict[str, Any]: Returns: dict[str, Any]: The decoded token payload. """ - return jwt.decode(token, settings.SECRET_KEY, algorithms=[ALGORITHM]) + return jwt.decode(token, settings.SECRET_KEY, algorithms=[JWT_ALGORITHM]) def verify_password( plain_password: str, hashed_password: str ) -> tuple[bool, str | None]: - return password_hash.verify_and_update(plain_password, hashed_password) + return PASSWORD_HASH.verify_and_update(plain_password, hashed_password) def get_password_hash(password: str) -> str: - return password_hash.hash(password) + return PASSWORD_HASH.hash(password) diff --git a/backend/app/crud/vendor.py b/backend/app/crud/vendor.py index 318504c..a9cbbb9 100644 --- a/backend/app/crud/vendor.py +++ b/backend/app/crud/vendor.py @@ -13,11 +13,13 @@ def get_vendor_by_email(cursor: Cursor, email: str) -> dict[str, Any] | None: Returns: dict[str, Any] | None: The vendor row or None. """ - cursor.execute( - 'SELECT "id", "email", "password_hash" ' - 'FROM app."vendors" ' - 'WHERE LOWER("email") = LOWER(%s) ' - 'AND "deleted_at" IS NULL', + cursor.execute( # SQL + """ + SELECT "id", "email", "password_hash" + FROM app."vendors" + WHERE LOWER("email") = LOWER(%s) + AND "deleted_at" IS NULL + """, (email,), ) row = cursor.fetchone() @@ -32,10 +34,12 @@ def get_vendor_by_id(cursor: Cursor, vendor_id: str) -> dict[str, Any] | None: Returns: dict[str, Any] | None: The vendor row or None. """ - cursor.execute( - 'SELECT "id", "email" ' - 'FROM app."vendors" ' - 'WHERE "id" = %s AND "deleted_at" IS NULL', + cursor.execute( # SQL + """ + SELECT "id", "email" + FROM app."vendors" + WHERE "id" = %s AND "deleted_at" IS NULL + """, (vendor_id,), ) row = cursor.fetchone() @@ -56,11 +60,13 @@ def create_vendor( Returns: dict[str, Any] | None: The created vendor row, or None on conflict. """ - cursor.execute( - 'INSERT INTO app."vendors" ("email", "password_hash") ' - "VALUES (%s, %s) " - 'ON CONFLICT ((LOWER("email"))) DO NOTHING ' - 'RETURNING "id", "email"', + cursor.execute( # SQL + """ + INSERT INTO app."vendors" ("email", "password_hash") + VALUES (%s, %s) + ON CONFLICT (LOWER("email")) DO NOTHING + RETURNING "id", "email" + """, (email, password_hash), ) row = cursor.fetchone() diff --git a/backend/app/domain/activation.py b/backend/app/domain/activation.py new file mode 100644 index 0000000..a936372 --- /dev/null +++ b/backend/app/domain/activation.py @@ -0,0 +1,60 @@ +from __future__ import annotations + +from typing import ClassVar +from uuid import UUID + +import uuid6 +from pydantic import BaseModel, ConfigDict, computed_field, field_validator + +from app.internal import base32_crockford + + +__all__ = ["ActivationCode"] + + +class ActivationCode(BaseModel): + model_config = ConfigDict(validate_assignment=True) + + LENGTH: ClassVar[int] = 30 + GROUP: ClassVar[int] = 5 + + code: str + + @field_validator("code") + @classmethod + def validate_code(cls, v: str) -> str: + # TODO: need the proper error handling here. + normalized = base32_crockford.normalize(v) + + if len(normalized) != cls.LENGTH: + raise ValueError("Activation code must contain 30 symbols") + + base32_crockford.decode(normalized, checksum=True) + + return "-".join( + normalized[i : i + cls.GROUP] + for i in range(0, cls.LENGTH, cls.GROUP) + ) + + @computed_field + @property + def uuid(self) -> UUID: + flat = base32_crockford.normalize(self.code) + n = base32_crockford.decode(flat, checksum=True) + return UUID(int=n) + + @classmethod + def generate(cls, uuid: UUID | None = None) -> ActivationCode: + if uuid is None: + uuid = uuid6.uuid7() + if not isinstance(uuid, UUID): + raise TypeError(f"uuid cannot be of type {uuid.__class__.__name__}") + if uuid.version != 7: # noqa: PLR2004 + raise TypeError( + f"uuid must be a UUID version 7, got {uuid.version}" + ) + + encoded = base32_crockford.encode(uuid.int, checksum=True) + encoded = encoded.rjust(cls.LENGTH, "0") + + return cls(code=encoded) diff --git a/backend/app/domain/fingerprint.py b/backend/app/domain/fingerprint.py new file mode 100644 index 0000000..af5a324 --- /dev/null +++ b/backend/app/domain/fingerprint.py @@ -0,0 +1,28 @@ +import hashlib +import json +from functools import cached_property +from typing import Annotated + +from pydantic import BaseModel, ConfigDict, Field, computed_field + + +__all__ = ["Device"] + + +class Device(BaseModel): + model_config = ConfigDict(frozen=True) + + cpu_id: Annotated[str, Field(json_schema_extra={"score": 10})] + motherboard_id: Annotated[str, Field(json_schema_extra={"score": 10})] + motherboard_serial: Annotated[str, Field(json_schema_extra={"score": 10})] + machine_id: Annotated[str, Field(json_schema_extra={"score": 10})] + primary_disk_serial: Annotated[str, Field(json_schema_extra={"score": 10})] + + @computed_field + @cached_property + def fingerprint(self) -> str: + payload = json.dumps( + self.model_dump(exclude_computed_fields=True), sort_keys=True + ) + checksum = f"sha256:{hashlib.sha256(payload.encode()).hexdigest()}" + return checksum diff --git a/backend/app/domain/license.py b/backend/app/domain/license.py new file mode 100644 index 0000000..716c49d --- /dev/null +++ b/backend/app/domain/license.py @@ -0,0 +1,77 @@ +from __future__ import annotations + +import json +from typing import Annotated + +import base58 +from pydantic import ( + UUID7, + BaseModel, + ConfigDict, + Field, + computed_field, + field_validator, + model_validator, +) + +from app.core.ed25519 import load_private_ed25519_key + + +__all__ = ["BaseLicense", "NodeLockedLicense"] + + +class BaseLicense(BaseModel): + model_config = ConfigDict(validate_assignment=True) + + id: UUID7 + vendor_id: UUID7 + client_id: UUID7 + expires_at: Annotated[float, Field(gt=0)] + max_grace_secs: Annotated[int, Field(ge=0)] + created_at: Annotated[float, Field(gt=0)] + meta_data: Annotated[dict[str, str] | None, Field(default=None)] + + def canonical_json(self) -> str: + return json.dumps(self.model_dump(), sort_keys=True, default=str) + + @model_validator(mode="after") + def check_expiry(self: BaseLicense) -> BaseLicense: + if self.expires_at <= self.created_at: + raise ValueError("expiry date cannot be before creation date") + return self + + +class NodeLockedLicense(BaseLicense): + device_fingerprint: str + session_limit: Annotated[int, Field(gt=0)] + + +class LicenseFile(BaseModel): + """Represents a signed `license.dat` file. + + Attributes: + data: The license payload that was signed. + signature: Base58-encoded Ed25519 signature over the canonical + JSON representation of *data*. Uses the standard Bitcoin + base58 alphabet (signatures are not user-facing). + """ + + model_config = ConfigDict(validate_assignment=True) + + license_data: BaseLicense + private_key_pem: Annotated[bytes, Field(exclude=True)] + + @computed_field + @property + def signature(self) -> str: + key = load_private_ed25519_key(self.private_key_pem) + payload = self.license_data.canonical_json() + signature = base58.b58encode(key.sign(payload.encode())).decode() + return signature + + @field_validator("private_key_pem") + @classmethod + def private_key_pem_validator(cls, v: bytes) -> bytes: + # this should fail in case the key is not a valid Ed25519 private key + load_private_ed25519_key(v) + return v diff --git a/backend/app/internal/.gitkeep b/backend/app/internal/.gitkeep deleted file mode 100644 index e69de29..0000000 diff --git a/backend/app/internal/base32_crockford.py b/backend/app/internal/base32_crockford.py new file mode 100644 index 0000000..a5957dc --- /dev/null +++ b/backend/app/internal/base32_crockford.py @@ -0,0 +1,177 @@ +""" +base32-crockford +================ + +A Python module implementing the alternate base32 encoding as described +by Douglas Crockford at: http://www.crockford.com/wrmg/base32.html. + +He designed the encoding to: + + * Be human and machine readable + * Be compact + * Be error resistant + * Be pronounceable + +It uses a symbol set of 10 digits and 22 letters, excluding I, L O and +U. Decoding is not case sensitive, and 'i' and 'l' are converted to '1' +and 'o' is converted to '0'. Encoding uses only upper-case characters. + +Hyphens may be present in symbol strings to improve readability, and +are removed when decoding. + +A check symbol can be appended to a symbol string to detect errors +within the string. + +This code is licensed under the BSD-3 clause +Copyright (c) 2015, Jason Bittel +Copyright (c) 2026, M Laraib Ali +""" + +import re + + +__all__ = ["decode", "encode", "normalize"] + + +# The encoded symbol space does not include I, L, O or U +SYMBOLS = "0123456789ABCDEFGHJKMNPQRSTVWXYZ" +# These five symbols are exclusively for checksum values +CHECK_SYMBOLS = "*~$=U" + +ENCODE_SYMBOLS = dict(enumerate(SYMBOLS + CHECK_SYMBOLS)) +DECODE_SYMBOLS = {ch: i for i, ch in ENCODE_SYMBOLS.items()} +NORMALIZE_MAP = str.maketrans("IiLlOo", "111100") +VALID_SYMBOLS = re.compile(f"^[{SYMBOLS}]+[{re.escape(CHECK_SYMBOLS)}]?$") + +BASE = len(SYMBOLS) +CHECK_BASE = len(SYMBOLS + CHECK_SYMBOLS) + + +def encode( + number: int | str, *, checksum: bool = False, split_length: int = 0 +) -> str: + """Encode an integer into a symbol string. + + Args: + - number: Integer value to encode (must be non-negative) + - checksum: If True, a check symbol will be calculated and appended + - split_length: If specified, the string will be divided into clusters + of that size separated by hyphens (0 = no splitting) + + Returns: + - encoded symbol string + + Raises: + ValueError: If number is negative or split is negative + """ + number = int(number) + if number < 0: + raise ValueError(f"number {number} is not a positive integer") + + split_length = int(split_length) + if split_length < 0: + raise ValueError(f"split {split_length} is not a positive integer") + + check_symbol = "" + if checksum: + check_symbol = ENCODE_SYMBOLS[number % CHECK_BASE] + + if number == 0: + return "0" + check_symbol + + symbol_string = "" + while number > 0: + remainder = number % BASE + number //= BASE + symbol_string = ENCODE_SYMBOLS[remainder] + symbol_string + symbol_string += check_symbol + + if split_length: + chunks = [ + symbol_string[pos : pos + split_length] + for pos in range(0, len(symbol_string), split_length) + ] + symbol_string = "-".join(chunks) + + return symbol_string + + +def decode( + symbol_string: str, *, checksum: bool = False, strict: bool = False +) -> int: + """Decode an encoded symbol string. + + Args: + - symbol_string: The encoded symbol string to decode + - checksum: If True, the string is assumed to have a trailing check + symbol which will be validated + - strict: If True, raises ValueError if normalization is required + + Returns: + - decoded integer value + + Raises: + ValueError: If checksum validation fails, if strict mode is enabled and + normalization is needed, or if the string contains invalid characters + """ + symbol_string = normalize(symbol_string, strict=strict) + check_symbol = None + if checksum: + check_symbol = symbol_string[-1] + symbol_string = symbol_string[:-1] + + number = 0 + for symbol in symbol_string: + number = number * BASE + DECODE_SYMBOLS[symbol] + + if checksum: + check_value = DECODE_SYMBOLS[check_symbol] + modulo = number % CHECK_BASE + if check_value != modulo: + raise ValueError( + f"invalid check symbol '{check_symbol}' for string " + f"'{symbol_string}{check_symbol}'" + ) + + return number + + +def normalize(symbol_string: str, *, strict: bool = False) -> str: + """Normalize an encoded symbol string. + + Normalization provides error correction and prepares the string for + decoding. These transformations are applied: + + 1. Hyphens are removed + 2. 'I', 'i', 'L' or 'l' are converted to '1' + 3. 'O' or 'o' are converted to '0' + 4. All characters are converted to uppercase + + Args: + - symbol_string: The symbol string to normalize + - strict: If True, raises ValueError if any transformations are applied + + Returns: + - normalized symbol string + + Raises: + TypeError: If an invalid string type is provided + ValueError: If the normalized string contains invalid characters, or if + strict mode is enabled and normalization was needed + """ + if not isinstance(symbol_string, str): + raise TypeError( + f"string is of invalid type {symbol_string.__class__.__name__}" + ) + + norm_string = ( + symbol_string.replace("-", "").translate(NORMALIZE_MAP).upper() + ) + + if not VALID_SYMBOLS.match(norm_string): + raise ValueError(f"string '{norm_string}' contains invalid characters") + + if strict and norm_string != symbol_string: + raise ValueError(f"string '{symbol_string}' requires normalization") + + return norm_string diff --git a/backend/app/main.py b/backend/app/main.py index 07241ca..47b85c4 100644 --- a/backend/app/main.py +++ b/backend/app/main.py @@ -31,7 +31,7 @@ async def lifespan(app: FastAPI) -> AsyncIterator[None]: # noqa: RUF029 logger.info("Settings initialized") logger.info("Initializing database pool") - pool = ConnectionPool(str(settings.DATABASE_DSN)) + pool = ConnectionPool(str(settings.DATABASE_DSN), open=True) # Connectivity check try: diff --git a/backend/app/pre_start.py b/backend/app/pre_start.py index 3b8262a..d401bf5 100644 --- a/backend/app/pre_start.py +++ b/backend/app/pre_start.py @@ -6,6 +6,7 @@ before_log, retry, retry_if_exception_type, + retry_if_not_exception_type, stop_after_attempt, wait_fixed, ) @@ -25,8 +26,7 @@ before=before_log(logger, logging.INFO), after=after_log(logger, logging.WARNING), retry=retry_if_exception_type(Exception) - & ~retry_if_exception_type(NotImplementedError) - & ~retry_if_exception_type(RuntimeError), + & retry_if_not_exception_type((NotImplementedError, RuntimeError)), ) def init() -> None: """code to do the pre-start service""" diff --git a/backend/app/services/license_gen.py b/backend/app/services/license_gen.py new file mode 100644 index 0000000..a15f2b4 --- /dev/null +++ b/backend/app/services/license_gen.py @@ -0,0 +1,26 @@ +from __future__ import annotations + +import base58 +from cryptography.exceptions import InvalidSignature + +from app.core.ed25519 import load_public_ed25519_key +from app.core.exceptions import LicenseKeyGenerationError +from app.domain.license import BaseLicense + + +__all__ = ["verify_license"] + + +def verify_license( + lic: BaseLicense, signature: str, public_key_pem: bytes +) -> None: + payload = lic.canonical_json().encode() + sig_bytes = base58.b58decode(signature.encode()) + public_key = load_public_ed25519_key(public_key_pem) + try: + public_key.verify(sig_bytes, payload) + except InvalidSignature as exc: + # TODO: this should throw validation error instead of generation error + raise LicenseKeyGenerationError( + "License signature verification failed" + ) from exc diff --git a/backend/app/services/license_key_generator.py b/backend/app/services/license_key_generator.py deleted file mode 100644 index 3ff950f..0000000 --- a/backend/app/services/license_key_generator.py +++ /dev/null @@ -1,195 +0,0 @@ -"""License key generator — cryptographically random key generation. - -Generates license keys in the format ``XXXX-XXXX-XXXX-XXXX`` where each -character is drawn from the uppercase alphanumeric alphabet ``A-Z0-9`` -using :mod:`secrets` for cryptographic randomness (~82 bits of entropy -per key). - -Supports optional batch metadata and collision detection with bounded -retries. -""" - -from __future__ import annotations - -import logging -import re -import secrets -import string -from dataclasses import dataclass, field - -from app.core.exceptions import LicenseKeyGenerationError - - -logger = logging.getLogger(__name__) - -ALPHABET: str = string.ascii_uppercase + string.digits -"""Uppercase alphanumeric character set used for key generation.""" - -SEGMENT_LENGTH: int = 4 -"""Number of characters per key segment.""" - -NUM_SEGMENTS: int = 4 -"""Number of segments in a license key.""" - -MAX_RETRIES: int = 10 -"""Maximum collision-retry attempts before raising an error.""" - -LICENSE_KEY_PATTERN: re.Pattern[str] = re.compile( - r"^[A-Z0-9]{4}-[A-Z0-9]{4}-[A-Z0-9]{4}-[A-Z0-9]{4}$" -) -"""Compiled regex that every generated key must match.""" - - -@dataclass(frozen=True, slots=True) -class BatchMetadata: - """Optional metadata attached to a batch of generated license keys. - - Attributes: - batch_id: Identifier grouping keys that belong to the same - issuance batch. - campaign: Marketing or distribution campaign associated with - the batch. - issued_by: Identifier (user-id, email, or service name) of the - entity that triggered key generation. - """ - - batch_id: str | None = None - campaign: str | None = None - issued_by: str | None = None - - -@dataclass(slots=True) -class GeneratedLicenseKey: - """Container for a generated license key and its batch metadata. - - Attributes: - key: The formatted license key string - (e.g. ``"A1B2-C3D4-E5F6-G7H8"``). - metadata: Optional :class:`BatchMetadata` associated with the - key. - """ - - key: str - metadata: BatchMetadata = field(default_factory=BatchMetadata) - - -def _generate_segment() -> str: - """Generate a single random segment of :data:`SEGMENT_LENGTH` characters. - - Returns: - str: A string of ``SEGMENT_LENGTH`` uppercase-alphanumeric - characters chosen via :func:`secrets.choice`. - """ - return "".join(secrets.choice(ALPHABET) for _ in range(SEGMENT_LENGTH)) - - -def _generate_raw_key() -> str: - """Generate a raw license key with hyphen-separated segments. - - Returns: - str: A key in ``XXXX-XXXX-XXXX-XXXX`` format. - """ - return "-".join(_generate_segment() for _ in range(NUM_SEGMENTS)) - - -def generate_license_key( - existing_keys: set[str] | None = None, metadata: BatchMetadata | None = None -) -> GeneratedLicenseKey: - """Generate a single unique license key with collision detection. - - Attempts up to :data:`MAX_RETRIES` times to produce a key that does - not collide with *existing_keys*. Each generated key is validated - against :data:`LICENSE_KEY_PATTERN` before the collision check. - - Args: - existing_keys: A set of previously issued keys used for - collision detection. Pass ``None`` or an empty set when - uniqueness checking is not required. - metadata: Optional :class:`BatchMetadata` to attach to the - returned result. - - Returns: - GeneratedLicenseKey: The generated key together with its - metadata. - - Raises: - LicenseKeyGenerationError: If a unique key cannot be produced - within :data:`MAX_RETRIES` attempts. - """ - if existing_keys is None: - existing_keys = set() - - effective_metadata = metadata or BatchMetadata() - - for attempt in range(1, MAX_RETRIES + 1): - key = _generate_raw_key() - - if not LICENSE_KEY_PATTERN.match(key): - logger.warning( - "Generated key failed format validation on attempt %d", attempt - ) - continue - - if key not in existing_keys: - logger.debug( - "License key generated on attempt %d (batch_id=%s)", - attempt, - effective_metadata.batch_id, - ) - return GeneratedLicenseKey(key=key, metadata=effective_metadata) - - logger.warning( - "Collision detected on attempt %d/%d", attempt, MAX_RETRIES - ) - - raise LicenseKeyGenerationError( - f"Could not generate a unique license key after {MAX_RETRIES} retries" - ) - - -def generate_license_keys_batch( - count: int, - existing_keys: set[str] | None = None, - metadata: BatchMetadata | None = None, -) -> list[GeneratedLicenseKey]: - """Generate a batch of unique license keys. - - Each key in the batch is guaranteed to be unique against both the - provided *existing_keys* and all previously generated keys within - the same batch invocation. - - Args: - count: Number of keys to generate. Must be >= 1. - existing_keys: A set of previously issued keys used for - collision detection. Pass ``None`` or an empty set when - external uniqueness checking is not required. - metadata: Optional :class:`BatchMetadata` to attach to every - key in the batch. - - Returns: - list[GeneratedLicenseKey]: A list of *count* unique keys, each - carrying the supplied metadata. - - Raises: - ValueError: If *count* is less than 1. - """ - if count < 1: - raise ValueError("count must be >= 1") - - if existing_keys is None: - existing_keys = set() - - combined_keys: set[str] = set(existing_keys) - results: list[GeneratedLicenseKey] = [] - - for i in range(count): - generated = generate_license_key( - existing_keys=combined_keys, metadata=metadata - ) - combined_keys.add(generated.key) - results.append(generated) - logger.debug( - "Batch key %d/%d generated: %s", i + 1, count, generated.key - ) - - return results diff --git a/backend/pyproject.toml b/backend/pyproject.toml index 35e7e4b..27d4863 100644 --- a/backend/pyproject.toml +++ b/backend/pyproject.toml @@ -5,25 +5,28 @@ description = "" requires-python = ">=3.10,<4.0" dependencies = [ "fastapi[standard]<1.0.0,>=0.114.2", - "python-multipart<1.0.0,>=0.0.7", "tenacity<9.0.0,>=8.2.3", - "pydantic>2.0", "httpx<1.0.0,>=0.25.1", - "psycopg[binary]<4.0.0,>=3.1.13", - "pydantic-settings<3.0.0,>=2.2.1", + "psycopg[binary,pool]<4.0.0,>=3.1.13", "pwdlib[argon2,bcrypt]>=0.3.0", - "psycopg-pool>=3.3.0,<4.0.0", "pyjwt>=2.11.0,<3.0.0", + "cryptography>=44.0.0", + "base58>=2.1.1", + "uuid6>=2025.0.1", ] [dependency-groups] dev = [ - "pytest<8.0.0,>=7.4.3", - "testcontainers[postgres]>=4.0.0", - "ruff<1.0.0,>=0.2.2", + "ruff>=0.2.2,<1.0.0", "prek>=0.2.24,<1.0.0", - "coverage<8.0.0,>=7.4.3", - "pytest-xdist>=3.5.0,<4.0.0", + { include-group = "test" }, +] +test = [ + "faker>=40.11.0", + "pytest>=7.4.4", + "pytest-cov>=7.0.0", + "pytest-xdist>=3.8.0", + "testcontainers[postgres]>=4.14.1", ] [build-system] @@ -49,28 +52,38 @@ isort.lines-after-imports = 2 isort.split-on-trailing-comma = false select = [ - "ANN", # flake8-annotations (required strict type annotations for public functions) - "S", # flake8-bandit (checks basic security issues in code) - "BLE", # flake8-blind-except (checks the except blocks that do not specify exception) - "FBT", # flake8-boolean-trap (ensure that boolean args can be used with kw only) - "E", # pycodestyle errors (PEP 8 style guide violations) - "W", # pycodestyle warnings (e.g., extra spaces, indentation issues) - "DOC", # pydoclint issues (e.g., extra or missing return, yield, warnings) - "A", # flake8-builtins (check variable and function names to not shadow builtins) - "N", # Naming convention checks (e.g., PEP 8 variable and function names) - "F", # Pyflakes errors (e.g., unused imports, undefined variables) - "I", # isort (Ensures imports are sorted properly) - "B", # flake8-bugbear (Detects likely bugs and bad practices) - "TID", # flake8-tidy-imports (Checks for banned or misplaced imports) - "UP", # pyupgrade (Automatically updates old Python syntax) - "YTT", # flake8-2020 (Detects outdated Python 2/3 compatibility issues) - "FLY", # flynt (Converts old-style string formatting to f-strings) - "PIE", # flake8-pie - "PL", # pylint - "RUF", # Ruff-specific rules (Additional optimizations and best practices) + "FAST", # fastapi related rules + "ASYNC", # flake8-aysnc rules + "ANN", # flake8-annotations (required strict type annotations for public functions) + "S", # flake8-bandit (checks basic security issues in code) + "BLE", # flake8-blind-except (checks the except blocks that do not specify exception) + "FBT", # flake8-boolean-trap (ensure that boolean args can be used with kw only) + "E", # pycodestyle errors (PEP 8 style guide violations) + "W", # pycodestyle warnings (e.g., extra spaces, indentation issues) + "DOC", # pydoclint issues (e.g., extra or missing return, yield, warnings) + "A", # flake8-builtins (check variable and function names to not shadow builtins) + "N", # Naming convention checks (e.g., PEP 8 variable and function names) + "F", # Pyflakes errors (e.g., unused imports, undefined variables) + "I", # isort (Ensures imports are sorted properly) + "B", # flake8-bugbear (Detects likely bugs and bad practices) + "TID", # flake8-tidy-imports (Checks for banned or misplaced imports) + "UP", # pyupgrade (Automatically updates old Python syntax) + "YTT", # flake8-2020 (Detects outdated Python 2/3 compatibility issues) + "FLY", # flynt (Converts old-style string formatting to f-strings) + "PIE", # flake8-pie + "PL", # pylint + "C4", # flake8-comprehensions for simplification + "LOG", # flake8-logging best practices + "G", # flake8-logging-format + "PT", # flake8-pytest-style + "FURB", # refurb rules + "RUF", # Ruff-specific rules (Additional optimizations and best practices) ] -ignore = [] +ignore = [ + "E501", # line too long (for docstrings) + "DOC202", # pydoclint (docstring extraneous returns) +] [tool.ruff.lint.per-file-ignores] "tests/**/*.py" = [ diff --git a/backend/tests/__init__.py b/backend/tests/__init__.py index 37ff788..e69de29 100644 --- a/backend/tests/__init__.py +++ b/backend/tests/__init__.py @@ -1,11 +0,0 @@ -import os - - -# Environment variables must be set before importing the app -os.environ.setdefault("SECRET_KEY", "test-secret-key") -os.environ.setdefault("PROJECT_NAME", "permit-test") -os.environ.setdefault("POSTGRES_SERVER", "localhost") -os.environ.setdefault("POSTGRES_PORT", "5432") -os.environ.setdefault("POSTGRES_USER", "test") -os.environ.setdefault("POSTGRES_PASSWORD", "test") -os.environ.setdefault("POSTGRES_DB", "test") diff --git a/backend/tests/api/routes/test_auth.py b/backend/tests/api/routes/test_auth.py new file mode 100644 index 0000000..a2ec9de --- /dev/null +++ b/backend/tests/api/routes/test_auth.py @@ -0,0 +1,541 @@ +from __future__ import annotations + +from datetime import timedelta + +import pytest +from fastapi.testclient import TestClient + +from app.core.config import Settings +from app.core.security import create_access_token +from app.main import app + + +API_V1 = "/api/v1" + + +def build_auth_payload(faker, **overrides: str) -> dict[str, str]: + """ + Builds an auth request payload with Faker-generated credentials and client + identity. + + Used by: + - test_signup_creates_vendor_201: creates a valid signup request body. + - test_signup_duplicate_email_returns_409: reuses the same signup + payload to trigger a duplicate conflict. + - test_signup_validation_errors_return_422: mutates a valid payload into + invalid auth-route cases. + - test_login_returns_token_pair: provisions a vendor before exercising + the login route. + - test_login_rejects_invalid_credentials: produces the baseline valid + credentials before one field is varied. + - test_refresh_issues_new_tokens: provisions a loginable vendor before + exercising refresh. + - test_refresh_rejects_invalid_tokens: supplies the shared client id + used by refresh route requests. + - test_valid_token_returns_vendor_id: creates the vendor used to access + the protected test route. + + Args: + - faker: `Faker` session fixture used to generate realistic auth inputs. + - overrides: `str` keyword replacements for specific payload fields. + + Returns: + dict[str, str]: A JSON-serializable auth payload containing `email`, + `password`, and `client_id`. + """ + payload = { + "email": faker.email(), + "password": faker.password(length=16, special_chars=True), + "client_id": faker.uuid4(), + } + payload.update(overrides) + return payload + + +def signup(client: TestClient, payload: dict[str, str]): + """ + Sends a signup request to the auth route. + + Used by: + - test_signup_creates_vendor_201: exercises the happy-path signup + contract. + - test_signup_duplicate_email_returns_409: performs the first and second + signup attempts. + - test_login_returns_token_pair: provisions a vendor before login. + - test_login_rejects_invalid_credentials: creates the baseline vendor + record. + - test_refresh_issues_new_tokens: creates the vendor whose refresh token + is later exchanged. + - test_refresh_rejects_invalid_tokens: creates a vendor when the + invalid-token scenario still requires existing state. + - test_valid_token_returns_vendor_id: provisions the vendor used for the + protected endpoint call. + + Args: + - client: `TestClient` bound to the FastAPI application under test. + - payload: `dict[str, str]` request body for the signup route. + + Returns: + - Response: The raw FastAPI test-client response from + `POST /api/v1/auth/signup`. + """ + return client.post(f"{API_V1}/auth/signup", json=payload) + + +def login(client: TestClient, payload: dict[str, str]): + """ + Sends a login request to the auth route. + + Used by: + - test_login_returns_token_pair: exercises the happy-path login + contract. + - test_login_rejects_invalid_credentials: submits invalid credentials + against the real route. + - test_refresh_issues_new_tokens: obtains the refresh token used for the + follow-up refresh call. + - test_valid_token_returns_vendor_id: obtains the access token used for + the protected endpoint call. + + Args: + - client: `TestClient` bound to the FastAPI application under test. + - payload: `dict[str, str]` request body for the login route. + + Returns: + - Response: The raw FastAPI test-client response from + `POST /api/v1/auth/login`. + """ + return client.post(f"{API_V1}/auth/login", json=payload) + + +@pytest.mark.integration +@pytest.mark.api +def test_signup_creates_vendor_201(faker) -> None: + """ + Verifies that `api.routes.auth.signup_route` creates a vendor and matters + because the public auth API must expose the persisted vendor identity after + a successful signup. + + Covers: + - `app.api.routes.auth.signup_route` + - the success path through `app.services.auth.signup` + + Rationale: + This is a straightforward integration test that exercises the real route + through `TestClient` without patching internal auth logic. + + Fixtures: + - faker: Session-scoped `Faker` instance used to generate a unique email, + password, and client id. + """ + payload = build_auth_payload(faker) + with TestClient(app) as client: + response = signup(client, payload) + + assert response.status_code == 201, ( + f"Expected signup status 201, got {response.status_code}: {response.json()}" + ) + body = response.json() + assert "data" in body, f"Expected signup response data, got {body}" + assert body["data"]["vendor"]["email"] == payload["email"], ( + "Expected signup response email to match the submitted email" + ) + assert "id" in body["data"]["vendor"], ( + f"Expected created vendor id in response, got {body['data']['vendor']}" + ) + + +@pytest.mark.integration +@pytest.mark.api +def test_signup_duplicate_email_returns_409(faker) -> None: + """ + Verifies that `api.routes.auth.signup_route` rejects a second signup + because duplicate vendor creation must fail at the API boundary rather than + silently creating competing accounts. + + Covers: + - `app.api.routes.auth.signup_route` + - duplicate-email handling in `app.services.auth.signup` + + Rationale: + This test uses two real route calls because the duplicate-email rule is + part of the observable API contract. + + Fixtures: + - faker: Session-scoped `Faker` instance used to generate a unique auth + payload. + """ + payload = build_auth_payload(faker) + with TestClient(app) as client: + first_response = signup(client, payload) + second_response = signup(client, payload) + + assert first_response.status_code == 201, ( + f"Expected initial signup status 201, got {first_response.status_code}:" + f" {first_response.json()}" + ) + assert second_response.status_code == 409, ( + f"Expected duplicate signup status 409, got" + f"{second_response.status_code}: {second_response.json()}" + ) + + +@pytest.mark.integration +@pytest.mark.api +@pytest.mark.parametrize( + "scenario", + [ + pytest.param("weak_password", id="weak_password"), + pytest.param("missing_client_id", id="missing_client_id"), + ], +) +def test_signup_validation_errors_return_422(faker, scenario: str) -> None: + """ + Verifies that `api.routes.auth.signup_route` returns request-validation + errors. This matters because the route contract must reject invalid client + input consistently. + + Covers: + - `app.api.routes.auth.signup_route` + - FastAPI/Pydantic validation on `app.schemas.auth.SignupRequest` + + Rationale: + The test mutates one valid payload into multiple invalid scenarios so + the failure cases stay aligned with the same contract boundary. + + Fixtures: + - faker: Session-scoped `Faker` instance used to create and then vary the + request payload. + + Parametrize: + - scenario: Names the invalid request shape being submitted. + - Cases: + - — password is shorter than the signup policy + permits. + - — the required client id field is omitted. + """ + payload = build_auth_payload(faker) + if scenario == "weak_password": + payload["password"] = faker.password(length=7, special_chars=False) + else: + payload.pop("client_id") + + with TestClient(app) as client: + response = signup(client, payload) + + assert response.status_code == 422, ( + f"Expected signup validation status 422 for {scenario}, got " + f"{response.status_code}: {response.json()}" + ) + + +@pytest.mark.integration +@pytest.mark.api +def test_login_returns_token_pair(faker) -> None: + """ + Verifies that `api.routes.auth.login_route` returns an access token and a + refresh token. This matters because clients depend on the login route to + establish an authenticated session. + + Covers: + - `app.api.routes.auth.login_route` + - the success path through `app.services.auth.login` + + Rationale: + The test provisions a real vendor through signup first so login is + exercised against persisted state instead of mocked collaborators. + + Fixtures: + - faker: Session-scoped `Faker` instance used to generate loginable + credentials. + """ + payload = build_auth_payload(faker) + with TestClient(app) as client: + signup(client, payload) + response = login(client, payload) + + assert response.status_code == 200, ( + f"Expected login status 200, got {response.status_code}: " + f"{response.json()}" + ) + data = response.json()["data"] + assert data["access_token"], ( + "Expected login response to include an access token" + ) + assert data["refresh_token"], ( + "Expected login response to include a refresh token" + ) + assert data["token_type"] == "bearer", ( + f"Expected token_type 'bearer', got '{data['token_type']}'" + ) + + +@pytest.mark.integration +@pytest.mark.api +@pytest.mark.parametrize( + ("use_unknown_email", "use_wrong_password"), + [ + pytest.param(True, False, id="unknown_email"), + pytest.param(False, True, id="wrong_password"), + ], +) +def test_login_rejects_invalid_credentials( + faker, *, use_unknown_email: bool, use_wrong_password: bool +) -> None: + """ + Verifies that `api.routes.auth.login_route` rejects unknown-email and + wrong-password attempts. This matters because the route must not + authenticate invalid credentials regardless of which field is wrong. + + Covers: + - `app.api.routes.auth.login_route` + - invalid-credential handling in `app.services.auth.login` + + Rationale: + The test keeps one provisioned vendor and varies only one credential + dimension per case so the failure reason stays isolated. + + Fixtures: + - faker: Session-scoped `Faker` instance used to generate valid and + alternate credentials. + + Parametrize: + Cases: + - — submits an email that was never signed up. + - — submits the persisted email with the wrong + password. + """ + payload = build_auth_payload(faker) + invalid_payload = dict(payload) + invalid_payload["email"] = ( + faker.email() if use_unknown_email else payload["email"] + ) + invalid_payload["password"] = ( + faker.password(length=16, special_chars=True) + if use_wrong_password + else payload["password"] + ) + + with TestClient(app) as client: + signup(client, payload) + response = login(client, invalid_payload) + + assert response.status_code == 401, ( + f"Expected login rejection status 401, got {response.status_code}: " + f"{response.json()}" + ) + + +@pytest.mark.integration +@pytest.mark.api +def test_refresh_issues_new_tokens(faker) -> None: + """ + Verifies that `api.routes.auth.refresh_route` exchanges a valid refresh + token for a new token pair. This matters because authenticated clients rely + on refresh to continue operating without re-entering credentials. + + Covers: + - `app.api.routes.auth.refresh_route` + - the success path through `app.services.auth.refresh` + + Rationale: + The test drives the full signup-to-login-to-refresh route sequence + because the refresh contract depends on real issued tokens. + + Fixtures: + - faker: Session-scoped `Faker` instance used to generate the + signup/login payload and client id. + """ + payload = build_auth_payload(faker) + with TestClient(app) as client: + signup(client, payload) + login_response = login(client, payload) + refresh_response = client.post( + f"{API_V1}/auth/refresh", + json={ + "refresh_token": login_response.json()["data"]["refresh_token"], + "client_id": payload["client_id"], + }, + ) + + assert refresh_response.status_code == 200, ( + f"Expected refresh status 200, got {refresh_response.status_code}: " + f"{refresh_response.json()}" + ) + data = refresh_response.json()["data"] + assert data["access_token"], ( + "Expected refresh response to include an access token" + ) + assert data["refresh_token"], ( + "Expected refresh response to include a refresh token" + ) + + +@pytest.mark.integration +@pytest.mark.api +@pytest.mark.parametrize( + "token_kind", + [ + pytest.param("access_token", id="access_token"), + pytest.param("garbage_token", id="garbage_token"), + ], +) +def test_refresh_rejects_invalid_tokens( + app_settings: Settings, faker, token_kind: str +) -> None: + """ + Verifies that `api.routes.auth.refresh_route` rejects invalid tokens. This + matters because the refresh endpoint must enforce token-type and + token-integrity checks at the API boundary. + + Covers: + - `app.api.routes.auth.refresh_route` + - invalid-token handling in `app.services.auth.refresh` + + Rationale: + Real tokens are generated through the project security helpers so the + test documents the external contract rather than any JWT library + internals. + + Fixtures: + - app_settings: Shared `Settings` object containing the signing secret used + to mint the invalid token variants. + - faker: Session-scoped `Faker` instance used to generate payload values and + the garbage token string. + + Parametrize: + - — uses a real access token where a refresh token + is required. + - — uses an unsigned arbitrary string that should + fail decoding. + """ + payload = build_auth_payload(faker) + invalid_token = ( + create_access_token(faker.uuid4(), app_settings) + if token_kind == "access_token" + else faker.sha256(raw_output=False) + ) + + with TestClient(app) as client: + if token_kind == "access_token": + signup(client, payload) + response = client.post( + f"{API_V1}/auth/refresh", + json={ + "refresh_token": invalid_token, + "client_id": payload["client_id"], + }, + ) + + assert response.status_code == 401, ( + f"Expected refresh rejection status 401 for {token_kind}, got " + f"{response.status_code}: {response.json()}" + ) + + +@pytest.mark.integration +@pytest.mark.api +def test_missing_token_returns_401() -> None: + """ + Verifies that the protected test route rejects unauthenticated requests + with a 401 response. This matters because the route exists to prove the + authentication dependency protects downstream handlers. + + Covers: + - the protected route registered in `backend/tests/conftest.py` + - `app.api.deps.get_current_vendor_id` + + Rationale: + This test is intentionally minimal because the contract under test is + the authentication gate itself, not the route payload. + """ + with TestClient(app) as client: + response = client.get("/tests/protected-test") + + assert response.status_code == 401, ( + f"Expected protected route status 401 without token, got " + f"{response.status_code}: {response.json()}" + ) + + +@pytest.mark.integration +@pytest.mark.api +def test_expired_token_returns_401(app_settings: Settings, faker) -> None: + """ + Verifies that the protected route rejects an expired access token. + This matters because stale tokens must fail before the protected handler + executes any application logic. + + Covers: + - the protected route registered in `backend/tests/conftest.py` + - token-expiry enforcement in `app.api.deps.get_current_vendor_id` + + Rationale: + The test uses a real signed token with a negative expiry delta so it + exercises the same path production requests use. + + Fixtures: + - app_settings: Shared `Settings` object used to sign the expired access token. + - faker: Session-scoped `Faker` instance used to generate the vendor id claim. + """ + token = create_access_token( + faker.uuid4(), app_settings, expires_delta=timedelta(seconds=-1) + ) + with TestClient(app) as client: + response = client.get( + "/tests/protected-test", + headers={"Authorization": f"Bearer {token}"}, + ) + + assert response.status_code == 401, ( + f"Expected expired token status 401, got {response.status_code}: {response.json()}" + ) + + +@pytest.mark.integration +@pytest.mark.api +def test_valid_token_returns_vendor_id(faker) -> None: + """ + Verifies that a valid access token reaches the protected route and that + created vendor id. This matters because the authenticated API and the + RLS database context must stay aligned for tenant isolation to work. + + Covers: + - the protected route registered in `backend/tests/conftest.py` + - `app.api.deps.get_current_vendor_id` + - `app.api.deps.get_rls_cursor` + + Rationale: + The test performs the full signup and login flow so the protected-route + assertion is made against a real issued token and real database context. + + Fixtures: + faker: Session-scoped `Faker` instance used to generate the auth payload + """ + payload = build_auth_payload(faker) + with TestClient(app) as client: + signup_response = signup(client, payload) + created_vendor_id = signup_response.json()["data"]["vendor"]["id"] + login_response = login(client, payload) + access_token = login_response.json()["data"]["access_token"] + protected_response = client.get( + "/tests/protected-test", + headers={"Authorization": f"Bearer {access_token}"}, + ) + + assert login_response.status_code == 200, ( + f"Expected login status 200 before protected call, got " + f"{login_response.status_code}: {login_response.json()}" + ) + assert protected_response.status_code == 200, ( + f"Expected protected route status 200, got " + f"{protected_response.status_code}: {protected_response.json()}" + ) + body = protected_response.json() + assert body["vendor_id"] == created_vendor_id, ( + f"Expected route vendor_id '{created_vendor_id}', got " + f"'{body['vendor_id']}'" + ) + assert body["db_vendor_id"] == created_vendor_id, ( + f"Expected RLS vendor_id '{created_vendor_id}', got " + f"'{body['db_vendor_id']}'" + ) diff --git a/backend/tests/api/routes/test_health.py b/backend/tests/api/routes/test_health.py index 121d4b8..c9b5ca6 100644 --- a/backend/tests/api/routes/test_health.py +++ b/backend/tests/api/routes/test_health.py @@ -13,11 +13,21 @@ @pytest.mark.api def test_health_check_returns_ok(): """ - WHEN the /health endpoint is called - THEN it should return a 200 OK response with the status and timestamp + Verifies that `app.api.routes.health.health_check` returns the public health + payload with an OK status and timestamp. This matters because infrastructure + and uptime checks depend on a stable liveness contract. + + Covers: + - `app.api.routes.health.health_check` + + Rationale: + This is a straightforward route integration test with no fixtures or patches because the endpoint contract is fully observable from the response. + + Fixtures: + None. """ - client = TestClient(app) - response = client.get("/api/v1/health") + with TestClient(app) as client: + response = client.get("/api/v1/health") assert response.status_code == status.HTTP_200_OK, ( f"Expected status 200 OK, got {response.status_code}: {response.text}" ) @@ -35,11 +45,23 @@ def test_health_check_returns_ok(): @pytest.mark.api def test_health_check_has_request_id(): """ - WHEN the /health endpoint is called - THEN the response should contain an X-Request-ID header + Verifies that `app.api.routes.health.health_check` responses include the + request-id header added by the middleware stack. This matters because error + and success responses should be traceable with the same correlation + identifier mechanism. + + Covers: + - `app.api.routes.health.health_check` + - request-id middleware behavior on a healthy route response + + Rationale: + The test asserts only the externally visible header because the middleware contract is exposed through the response. + + Fixtures: + None. """ - client = TestClient(app) - response = client.get("/api/v1/health") + with TestClient(app) as client: + response = client.get("/api/v1/health") assert "x-request-id" in response.headers, ( "Expected 'x-request-id' header in health endpoint response, " f"got headers: {list(response.headers.keys())}" diff --git a/backend/tests/api/test_deps.py b/backend/tests/api/test_deps.py new file mode 100644 index 0000000..674ab85 --- /dev/null +++ b/backend/tests/api/test_deps.py @@ -0,0 +1,504 @@ +from __future__ import annotations + +from contextlib import contextmanager +from datetime import datetime, timedelta, timezone +from types import SimpleNamespace + +import jwt +import pytest +from fastapi.security import HTTPAuthorizationCredentials + +from app.api import deps +from app.core.exceptions import ( + AuthenticationException, + ServiceUnavailableException, +) +from app.core.security import ( + JWT_ALGORITHM, + create_access_token, + create_refresh_token, +) + + +def build_fake_cursor() -> SimpleNamespace: + """ + Builds a cursor-like object that records executed SQL statements and parameters. + + Used by: + test_get_db_yields_cursor_when_pool_exists - asserts that the yielded cursor object is the same cursor provided by the fake pool. + test_get_rls_cursor_sets_context_and_yields_cursor - captures the RLS context statement executed before the cursor is yielded. + + Args: + None. + + Returns: + SimpleNamespace: A cursor substitute with `executed` and `execute` attributes. + """ + cursor = SimpleNamespace(executed=[]) + + def execute(statement: str, params=None) -> None: + cursor.executed.append((statement, params)) + + cursor.execute = execute + return cursor + + +def build_fake_connection(cursor: SimpleNamespace) -> SimpleNamespace: + """ + Builds a connection-like object that yields the provided fake cursor and counts cursor acquisitions. + + Used by: + test_get_db_yields_cursor_when_pool_exists - asserts that one cursor is opened from the borrowed connection. + test_get_rls_cursor_sets_context_and_yields_cursor - provides the cursor used to observe the RLS setup SQL. + + Args: + cursor: `SimpleNamespace` cursor substitute returned by `build_fake_cursor`. + + Returns: + SimpleNamespace: A connection substitute with `cursor_instance`, `cursor_calls`, and `cursor()` attributes. + """ + connection = SimpleNamespace(cursor_instance=cursor, cursor_calls=0) + + @contextmanager + def cursor_context(): + connection.cursor_calls += 1 + yield cursor + + connection.cursor = cursor_context + return connection + + +def build_fake_pool(connection: SimpleNamespace) -> SimpleNamespace: + """ + Builds a pool-like object that yields the provided fake connection and counts connection acquisitions. + + Used by: + test_get_db_yields_cursor_when_pool_exists - asserts that one pooled connection is borrowed. + test_get_rls_cursor_sets_context_and_yields_cursor - provides the connection whose cursor receives the RLS setup SQL. + + Args: + connection: `SimpleNamespace` connection substitute returned by `build_fake_connection`. + + Returns: + SimpleNamespace: A pool substitute with `connection_instance`, `connection_calls`, and `connection()` attributes. + """ + pool = SimpleNamespace(connection_instance=connection, connection_calls=0) + + @contextmanager + def connection_context(): + pool.connection_calls += 1 + yield connection + + pool.connection = connection_context + return pool + + +def build_request(**state) -> SimpleNamespace: + """ + Builds a request-like object whose nested `app.state` can be tailored for dependency tests. + + Used by: + test_get_db_yields_cursor_when_pool_exists - supplies a request object with a fake pool attached. + test_get_db_raises_when_pool_missing - supplies a request object whose pool is absent. + test_get_settings_raises_when_settings_missing - supplies a request object whose settings are absent. + test_get_settings_returns_settings_when_present - supplies a request object with a valid settings object attached. + test_get_rls_cursor_raises_when_pool_missing - supplies a request object whose pool is absent. + test_get_rls_cursor_sets_context_and_yields_cursor - supplies a request object with a fake pool attached. + + Args: + state: Application state fields to install under `request.app.state`. + + Returns: + SimpleNamespace: A lightweight request substitute with `app.state` attributes matching the provided keyword arguments. + """ + return SimpleNamespace(app=SimpleNamespace(state=SimpleNamespace(**state))) + + +def make_credentials(token: str) -> HTTPAuthorizationCredentials: + """ + Wraps a raw bearer token string in FastAPI's authorization-credentials type. + + Used by: + test_get_current_vendor_id_payload_errors - supplies credentials to the auth dependency under malformed payload cases. + test_get_current_vendor_id_rejects_invalid_tokens - supplies credentials for invalid token variants. + test_get_current_vendor_id_returns_vendor_id_from_valid_access_token - supplies credentials for the happy-path access token. + + Args: + token: `str` bearer token to place into the credentials wrapper. + + Returns: + HTTPAuthorizationCredentials: Credentials with the `Bearer` scheme and the provided token string. + """ + return HTTPAuthorizationCredentials(scheme="Bearer", credentials=token) + + +def build_access_payload( + faker, **overrides +) -> dict[str, str | datetime | None]: + """ + Builds a signed-token payload shape for `get_current_vendor_id` tests. + + Used by: + test_get_current_vendor_id_payload_errors - creates malformed access-token payloads without patching internal decode helpers. + + Args: + faker: `Faker` session fixture used to generate a realistic vendor id. + overrides: Payload field replacements applied on top of the default access-token claims. + + Returns: + dict[str, str | datetime | None]: A token payload with `token_type`, `vendor_id`, and `exp` claims. + """ + payload: dict[str, str | datetime | None] = { + "token_type": "access", + "vendor_id": faker.uuid4(), + "exp": datetime.now(timezone.utc) + timedelta(minutes=5), + } + payload.update(overrides) + return payload + + +@pytest.mark.unit +def test_get_db_yields_cursor_when_pool_exists(): + """ + Verifies that `app.api.deps.get_db` opens one pooled connection, yields one + cursor, and then terminates cleanly. This matters because route dependencies + rely on `get_db` to provide the database cursor used by downstream logic. + + Covers: + - `app.api.deps.get_db` + + Rationale: + The test uses lightweight fake pool and connection objects because the contract under test is dependency plumbing rather than SQL behavior. + + Fixtures: + None. + + """ + cursor = build_fake_cursor() + pool = build_fake_pool(build_fake_connection(cursor)) + request = build_request(db_pool=pool) + + generator = deps.get_db(request) + yielded = next(generator) + assert yielded is cursor, "get_db must yield the cursor from the pool" + + with pytest.raises(StopIteration, match=""): + next(generator) + + assert pool.connection_calls == 1, ( + "get_db must open exactly one pooled connection" + ) + assert pool.connection_instance.cursor_calls == 1, ( + "get_db must open exactly one cursor from the pooled connection" + ) + + +@pytest.mark.unit +def test_get_db_raises_when_pool_missing(): + """ + Verifies that `app.api.deps.get_db` raises a service-unavailable error when + the application state has no database pool. This matters because route + handlers should fail with a clear infrastructure error instead of + dereferencing a missing pool. + + Covers: + - `app.api.deps.get_db` + + Rationale: + A minimal request stub is enough because the failure happens before any database connection is attempted. + + Fixtures: + None. + + """ + request = build_request(db_pool=None) + + with pytest.raises( + ServiceUnavailableException, match="Database pool not initialized" + ): + next(deps.get_db(request)) + + +@pytest.mark.unit +def test_get_settings_raises_when_settings_missing(): + """ + Verifies that `app.api.deps.get_settings` fails fast when application + settings are absent from `app.state`. This matters because auth and + configuration-dependent code expects the dependency to provide a valid + settings object. + + Covers: + - `app.api.deps.get_settings` + + Rationale: + The request stub contains only the missing state needed to trigger the error path. + + Fixtures: + None. + + """ + request = build_request(settings=None) + + with pytest.raises( + ServiceUnavailableException, match="Settings not initialized" + ): + deps.get_settings(request) + + +@pytest.mark.unit +def test_get_settings_returns_settings_when_present(app_settings): + """ + Verifies that `app.api.deps.get_settings` returns the exact settings object + stored on application state. This matters because callers rely on + configuration identity and values from the dependency rather than a copied + object. + + Covers: + - `app.api.deps.get_settings` + + Rationale: + The assertion uses object identity because the dependency contract is to return the existing settings instance. + + Fixtures: + app_settings: Shared `Settings` object used to populate the request state. + + """ + request = build_request(settings=app_settings) + + resolved = deps.get_settings(request) + assert resolved is app_settings, ( + "get_settings must return the settings object from app state" + ) + + +@pytest.mark.unit +def test_get_rls_cursor_raises_when_pool_missing(faker): + """ + Verifies that `app.api.deps.get_rls_cursor` raises a service-unavailable + error when no database pool is configured. This matters because + authenticated routes still need the underlying pool before + row-level-security context can be installed. + + Covers: + - `app.api.deps.get_rls_cursor` + + Rationale: + The test keeps the request stub minimal because the failure occurs before any context-setting SQL executes. + + Fixtures: + faker: Session-scoped `Faker` instance used to generate a vendor id for the dependency call. + + """ + request = build_request(db_pool=None) + + with pytest.raises( + ServiceUnavailableException, match="Database pool not initialized" + ): + next(deps.get_rls_cursor(request, vendor_id=faker.uuid4())) + + +@pytest.mark.unit +def test_get_rls_cursor_sets_context_and_yields_cursor(faker): + """ + Verifies that `app.api.deps.get_rls_cursor` sets the authenticated vendor + context in SQL before yielding the cursor. This matters because + row-level-security queries depend on the app context being installed for the + current vendor. + + Covers: + - `app.api.deps.get_rls_cursor` + + Rationale: + Fake cursor objects are sufficient here because the contract being verified is the exact SQL statement and yielded cursor object. + + Fixtures: + faker: Session-scoped `Faker` instance used to generate the vendor id claim. + + """ + vendor_id = faker.uuid4() + cursor = build_fake_cursor() + pool = build_fake_pool(build_fake_connection(cursor)) + request = build_request(db_pool=pool) + + generator = deps.get_rls_cursor(request, vendor_id=vendor_id) + yielded = next(generator) + assert yielded is cursor, "get_rls_cursor must yield the DB cursor" + assert len(cursor.executed) == 1, ( + "get_rls_cursor must set app context exactly once per request" + ) + assert cursor.executed[0] == ( + "SELECT app.set_app_context(%s)", + (vendor_id,), + ), "get_rls_cursor must set app context using the authenticated vendor_id" + + with pytest.raises(StopIteration, match=""): + next(generator) + + +@pytest.mark.unit +@pytest.mark.parametrize( + ("scenario", "expected_message"), + [ + pytest.param( + "missing_vendor_id", "Invalid token payload", id="missing_vendor_id" + ), + pytest.param( + "malformed_vendor_id", + "Invalid token payload", + id="malformed_vendor_id", + ), + ], +) +def test_get_current_vendor_id_payload_errors( + app_settings, faker, scenario: str, expected_message: str +) -> None: + """ + Verifies that `app.api.deps.get_current_vendor_id` rejects access tokens + whose decoded payload omits or corrupts the vendor id claim. This matters + because authorization depends on a valid vendor identity being present in + every access token. + + Covers: + - `app.api.deps.get_current_vendor_id` + - payload validation after token decoding + + Rationale: + The test signs real JWT payloads with malformed claims so it documents the dependency contract without patching decode helpers. + + Fixtures: + app_settings: Shared `Settings` object containing the JWT signing secret. + faker: Session-scoped `Faker` instance used to generate claim values. + + Parametrize: + scenario: Identifies which vendor-id payload defect is being exercised. + expected_message: The authentication error expected for the malformed payload. + Cases: + - — the vendor id claim is present but null. + - — the vendor id claim is present but not a valid UUID string. + """ + token_payload = ( + build_access_payload(faker, vendor_id=None) + if scenario == "missing_vendor_id" + else build_access_payload(faker, vendor_id=faker.word()) + ) + token = jwt.encode( + token_payload, app_settings.SECRET_KEY, algorithm=JWT_ALGORITHM + ) + + with pytest.raises(AuthenticationException, match=expected_message): + deps.get_current_vendor_id(make_credentials(token), app_settings) + + +@pytest.mark.unit +@pytest.mark.parametrize( + ("token_factory", "expected_message"), + [ + pytest.param( + lambda faker, settings: create_refresh_token( + faker.uuid4(), settings + ), + "Invalid token type", + id="refresh_token", + ), + pytest.param( + lambda faker, settings: create_access_token( + faker.uuid4(), settings, expires_delta=timedelta(seconds=-1) + ), + "Invalid or expired token", + id="expired_token", + ), + pytest.param( + lambda faker, settings: faker.sha256(raw_output=False), + "Invalid or expired token", + id="garbage_token", + ), + ], +) +def test_get_current_vendor_id_rejects_invalid_tokens( + app_settings, faker, token_factory, expected_message: str +) -> None: + """ + Verifies that `app.api.deps.get_current_vendor_id` rejects non-access, + expired, and garbage bearer tokens. This matters because authenticated + endpoints depend on the dependency to enforce token type and expiry rules + uniformly. + + Covers: + - `app.api.deps.get_current_vendor_id` + - invalid-token handling in `app.core.security.decode_token` + + Rationale: + The token variants are produced through real project helpers and raw strings so the test stays at the contract boundary instead of asserting JWT-library internals. + + Fixtures: + app_settings: Shared `Settings` object used to sign token variants. + faker: Session-scoped `Faker` instance used to generate vendor ids and the garbage token string. + + Parametrize: + token_factory: Produces the invalid token variant for the scenario. + expected_message: The authentication error expected from the dependency. + Cases: + - — a refresh token is supplied where an access token is required. + - — an already-expired access token is supplied. + - — an arbitrary non-token string is supplied. + """ + token = token_factory(faker, app_settings) + + with pytest.raises(AuthenticationException, match=expected_message): + deps.get_current_vendor_id(make_credentials(token), app_settings) + + +@pytest.mark.unit +def test_get_current_vendor_id_returns_vendor_id_from_valid_access_token( + app_settings, faker +) -> None: + """ + Verifies that `app.api.deps.get_current_vendor_id` returns the vendor id + claim from a valid signed access token. This matters because downstream + route code depends on receiving the authenticated vendor identity from the + dependency. + + Covers: + - `app.api.deps.get_current_vendor_id` + + Rationale: + The test uses a real access token generated by the project security helper so the claim extraction path matches production behavior. + + Fixtures: + app_settings: Shared `Settings` object used to sign the access token. + faker: Session-scoped `Faker` instance used to generate the vendor id claim. + + """ + vendor_id = faker.uuid4() + token = create_access_token(vendor_id, app_settings) + + resolved_vendor_id = deps.get_current_vendor_id( + make_credentials(token), app_settings + ) + assert resolved_vendor_id == vendor_id, ( + f"Expected authenticated vendor_id '{vendor_id}', got " + f"'{resolved_vendor_id}'" + ) + + +@pytest.mark.unit +def test_get_current_vendor_id_requires_credentials(app_settings) -> None: + """ + Verifies that `app.api.deps.get_current_vendor_id` rejects requests that do + not include bearer credentials at all. This matters because the dependency + is the authentication gate for protected routes. + + Covers: + - `app.api.deps.get_current_vendor_id` + + Rationale: + This is the simplest unauthenticated boundary case and does not require token generation. + + Fixtures: + app_settings: Shared `Settings` object passed into the dependency call. + + """ + with pytest.raises( + AuthenticationException, match="Missing authentication token" + ): + deps.get_current_vendor_id(None, app_settings) diff --git a/backend/tests/api/test_main.py b/backend/tests/api/test_main.py new file mode 100644 index 0000000..726c0c6 --- /dev/null +++ b/backend/tests/api/test_main.py @@ -0,0 +1,29 @@ +from __future__ import annotations + +import pytest + +from app.api.main import api_router + + +@pytest.mark.unit +def test_api_router_includes_health_and_auth_routes() -> None: + """ + Verifies that `app.api.main.api_router` includes the health and auth route + trees expected by the public API. This matters because the main API router + is the composition point for the endpoints clients depend on. + + Covers: + - `app.api.main.api_router` + + Rationale: + This test inspects the router object directly because the contract under test is route registration, not request handling. + + Fixtures: + None. + """ + route_paths = {route.path for route in api_router.routes} + expected_paths = {"/health", "/auth/signup", "/auth/login", "/auth/refresh"} + + assert expected_paths <= route_paths, ( + f"Expected API router paths {expected_paths}, got {route_paths}" + ) diff --git a/backend/tests/api/test_middlewares.py b/backend/tests/api/test_middlewares.py new file mode 100644 index 0000000..899864d --- /dev/null +++ b/backend/tests/api/test_middlewares.py @@ -0,0 +1,107 @@ +import uuid + +import pytest +from fastapi import FastAPI, Request +from fastapi.testclient import TestClient + +from app.api.middlewares import add_request_id + + +@pytest.fixture +def middleware_app(): + """ + Provides a minimal FastAPI app with the request-id middleware installed and an echo endpoint for assertions. + + Scope: function — the fixture builds a fresh app object per test and the app state is mutable during request handling. + + Provides: + A `FastAPI` instance whose `/echo` route returns `request.state.request_id`. + + Dependencies: + None. + + Teardown: + None. + + Note: + This fixture exists only to exercise `app.api.middlewares.add_request_id` in isolation from the main application. + """ + app = FastAPI() + app.middleware("http")(add_request_id) + + @app.get("/echo") + async def echo(request: Request): + return {"request_id": request.state.request_id} + + return app + + +@pytest.mark.unit +def test_add_request_id_preserves_valid_incoming_header(middleware_app): + """ + Verifies that `app.api.middlewares.add_request_id` preserves a valid + incoming request id instead of replacing it. This matters because upstream + systems may supply trace identifiers that should survive through the + application. + + Covers: + - `app.api.middlewares.add_request_id` + + Rationale: + The middleware is exercised through a tiny in-process app because the contract under test is the header and request-state outcome. + + Fixtures: + middleware_app: Minimal FastAPI app with the request-id middleware and an echo endpoint. + """ + incoming = "123e4567-e89b-12d3-a456-426614174000" + + with TestClient(middleware_app) as client: + response = client.get("/echo", headers={"X-Request-ID": incoming}) + + assert response.status_code == 200, ( + f"Expected 200 response, got {response.status_code}: {response.text}" + ) + assert response.headers.get("X-Request-ID") == incoming, ( + "Middleware must preserve a valid incoming request id in response header" + ) + assert response.json()["request_id"] == incoming, ( + "Middleware must store the same valid request id on request.state" + ) + + +@pytest.mark.unit +def test_add_request_id_replaces_invalid_header_with_uuid(middleware_app): + """ + Verifies that `app.api.middlewares.add_request_id` replaces an invalid + incoming request id with a fresh canonical UUID. This matters because + downstream tracing should not trust malformed client-supplied identifiers. + + Covers: + - `app.api.middlewares.add_request_id` + + Rationale: + The assertions stay at the observable boundary by checking the response header and `request.state` value rather than middleware internals. + + Fixtures: + middleware_app: Minimal FastAPI app with the request-id middleware and an echo endpoint. + """ + incoming = "not-a-uuid" + + with TestClient(middleware_app) as client: + response = client.get("/echo", headers={"X-Request-ID": incoming}) + + assert response.status_code == 200, ( + f"Expected 200 response, got {response.status_code}: {response.text}" + ) + generated = response.headers.get("X-Request-ID") + assert generated, "Middleware must always emit an X-Request-ID header" + assert generated != incoming, ( + "Middleware must replace invalid incoming request ids with a new UUID" + ) + parsed = uuid.UUID(generated) + assert str(parsed) == generated, ( + "Generated request id header must be a canonical UUID string" + ) + assert response.json()["request_id"] == generated, ( + "request.state.request_id must match the response X-Request-ID header" + ) diff --git a/backend/tests/conftest.py b/backend/tests/conftest.py index af1d56c..16c9b1f 100644 --- a/backend/tests/conftest.py +++ b/backend/tests/conftest.py @@ -1,52 +1,377 @@ -""" -Root conftest - shared fixtures for unit and integration tests. +"""Root conftest — shared fixtures for all backend tests. -Key fixtures: - override_get_db: Module-scoped override of the get_db dependency. - Starts a dedicated Testcontainers Postgres per module with - migrations auto-applied via volume mapping. +Fixture hierarchy +----------------- +pg_container (session) — one Postgres Testcontainer per session +migrated_db_pool (session) — ConnectionPool over the container +app_settings (session) — Settings with known test secrets +faker (session) — reproducible Faker seeded per xdist worker +db_conn (function) — connection with force_rollback=True (no leaks) """ from __future__ import annotations +import os +import shlex +import time import typing from pathlib import Path import pytest -from psycopg import Cursor, connect +from faker import Faker +from fastapi import APIRouter as _APIRouter +from psycopg import Connection, Cursor +from psycopg_pool import ConnectionPool +from testcontainers.core.waiting_utils import WaitStrategy, WaitStrategyTarget from testcontainers.postgres import PostgresContainer -from app.api.deps import get_db +from app.api.deps import ( + CurrentVendorId, + RLSCursorDep, + get_db, + get_rls_cursor, + get_settings, +) +from app.core.config import Settings from app.main import app MIGRATIONS_DIR = str(Path(__file__).parents[2] / "migrations") +POSTGRES_IMAGE = "postgres:18.2-alpine3.23" +API_V1 = "/api/v1" + + +def xdist_worker_offset() -> int: + """ + Returns the numeric xdist worker offset used to derive a stable Faker seed. + + Used by: + faker - offsets the shared base seed so parallel workers do not reuse + identical fake data streams. + + + Returns: + int: The parsed xdist worker number, or `0` for the master process and + unknown names. + """ + worker_name = os.environ.get("PYTEST_XDIST_WORKER", "gw0") + if worker_name == "master": + return 0 + suffix = worker_name.removeprefix("gw") + return int(suffix) if suffix.isdigit() else 0 + + +# --------------------------------------------------------------------------- +# Wait strategy — mirrors migrations/tests/helpers.PgReadyWaitStrategy +# --------------------------------------------------------------------------- + + +class PgReadyWaitStrategy(WaitStrategy): + """Wait until PostgreSQL accepts connections; surface logs on failure.""" + + def wait_until_ready(self, container: WaitStrategyTarget) -> None: + wrapped = container.get_wrapped_container() + user = container.username + start = time.time() + + while True: + if time.time() - start > self._startup_timeout: + raise TimeoutError( + "Postgres did not become ready within " + f"{self._startup_timeout}s" + ) + + wrapped.reload() + state = wrapped.attrs["State"] + + if state["Status"] == "exited" and state["ExitCode"] != 0: + stdout, stderr = container.get_logs() + logs = (stdout + stderr).decode(errors="replace").strip() + raise RuntimeError( + f"Postgres container exited with code {state['ExitCode']}\n" + f"--- container logs ---\n{logs}" + ) + + result = container.exec(f"pg_isready -U {shlex.quote(user)}") + if result.exit_code == 0: + return + + time.sleep(self._poll_interval) + + +# --------------------------------------------------------------------------- +# Session-scoped: one container + pool for the whole test run +# --------------------------------------------------------------------------- + + +@pytest.fixture(scope="session") +def pg_container() -> typing.Generator[PostgresContainer, None, None]: + """ + Provides a PostgreSQL Testcontainer with the backend migrations applied. + + Scope: session — the container is expensive to start and tests do not mutate the container definition itself. + + Provides: + A started `PostgresContainer` whose connection details are exported into the process environment for `Settings`. + + Dependencies: + None. + Teardown: + The context manager stops the container after the test session ends. -@pytest.fixture(scope="module") -def test_container() -> typing.Generator[PostgresContainer, None, None]: - """Fixture: Testcontainers Postgres with migrations.""" - with PostgresContainer( - "postgres:18.2-alpine3.23", driver=None - ).with_volume_mapping( - MIGRATIONS_DIR, "/docker-entrypoint-initdb.d" + Note: + This fixture shares the same database container across the session; write isolation must come from `db_conn`, not from restarting the container. + """ + with ( + PostgresContainer(POSTGRES_IMAGE, driver=None) + .with_volume_mapping( + MIGRATIONS_DIR, "/docker-entrypoint-initdb.d", mode="ro" + ) + .waiting_for(PgReadyWaitStrategy()) ) as container: + os.environ["POSTGRES_SERVER"] = str(container.get_container_host_ip()) + os.environ["POSTGRES_PORT"] = str(container.get_exposed_port(5432)) + os.environ["POSTGRES_USER"] = str(container.username) + os.environ["POSTGRES_PASSWORD"] = str(container.password) + os.environ["POSTGRES_DB"] = str(container.dbname) + os.environ["PROJECT_NAME"] = "test" + os.environ["SECRET_KEY"] = "integration-test-secret-key-32bytes!" yield container -@pytest.fixture(scope="module") -def override_get_db(test_container: PostgresContainer): - """Override the get_db dependency to use test container database. +@pytest.fixture(scope="session") +def migrated_db_pool(pg_container: PostgresContainer): + """ + Provides a connection pool bound to the migrated PostgreSQL test container. + + Scope: session — opening the pool is expensive and tests safely share it through function-scoped transactions. + + Provides: + An open `ConnectionPool` configured against the test container database. - Ensures each test module gets its own isolated container, avoiding - cross-module side effects when tests run in parallel. + Dependencies: + pg_container: Supplies the live PostgreSQL test container and its connection URL. + + Teardown: + The context manager closes the pool after the session completes. + + Note: + This fixture is read-write, but individual tests must isolate writes through `db_conn`. """ + with ConnectionPool( + pg_container.get_connection_url(), + min_size=4, + max_size=20, + open=True, + check=ConnectionPool.check_connection, + ) as pool: + yield pool + + +@pytest.fixture(scope="session") +def app_settings(pg_container: PostgresContainer) -> Settings: + """ + Provides a shared `Settings` instance configured from the test container environment. + + Scope: session — configuration is immutable for the duration of the suite and is safe to reuse. + + Provides: + A `Settings` object with deterministic test secrets and database connection values. + + Dependencies: + pg_container: Ensures the environment variables reflect the live container ports and credentials before settings are instantiated. + + Teardown: + None. + + Note: + Tests that need per-request database state should not mutate this object. + """ + return Settings() + + +@pytest.fixture(scope="session") +def faker() -> Faker: + """ + Provides a session-scoped Faker instance with a stable worker-aware seed. + + Scope: session — the faker object is reused safely and deterministic seeding is part of the test contract. + + Provides: + A `Faker` instance seeded from `PYTEST_RANDOMLY_SEED` plus the current xdist worker offset. + + Dependencies: + None. + + Teardown: + None. + + Note: + Parallel workers intentionally receive different offsets so generated values remain reproducible without colliding across workers. + """ + base_seed = int(os.environ.get("PYTEST_RANDOMLY_SEED", "20260318")) + seed = base_seed + xdist_worker_offset() + fake = Faker() + fake.seed_instance(seed) + return fake + + +@pytest.fixture(scope="session", autouse=True) +def setup_session_overrides(app_settings: Settings) -> None: + """ + Applies the session-wide FastAPI dependency override for shared settings. + + Scope: session — the settings override is immutable and should remain installed for the whole run. + + Provides: + `None`; it installs `get_settings` into `app.dependency_overrides`. + + Dependencies: + app_settings: Supplies the shared `Settings` object returned by the dependency override. + + Teardown: + None. + + Note: + This fixture mutates global application state once at session start because `get_settings` is not injectable per request in the current test setup. + """ + app.dependency_overrides[get_settings] = lambda: app_settings + + +# --------------------------------------------------------------------------- +# Function-scoped: transactional isolation per test +# --------------------------------------------------------------------------- + + +@pytest.fixture +def db_conn( + migrated_db_pool: ConnectionPool, +) -> typing.Generator[Connection, None, None]: + """ + Provides a database connection wrapped in an automatic rollback transaction. + + Scope: function — each test needs isolated writes that are rolled back after execution. + + Provides: + An open `Connection` with `force_rollback=True` active for the duration of the test. + + Dependencies: + migrated_db_pool: Supplies the shared connection pool used to borrow the test connection. + + Teardown: + The transaction rolls back and the connection returns to the pool after the fixture yields. + + Note: + Tests should create cursors from this connection rather than opening new pooled connections directly. + """ + with migrated_db_pool.connection() as conn: + with conn.transaction(force_rollback=True): + yield conn + + +# --------------------------------------------------------------------------- +# Test-only router (protected endpoint helper for integration tests) +# --------------------------------------------------------------------------- + +_test_router = _APIRouter(prefix="/tests") + + +@_test_router.get("/protected-test") +def protected_test(vendor_id: CurrentVendorId, cursor: RLSCursorDep) -> dict: + """ + Returns the authenticated vendor id and the database RLS context for protected-route assertions. + + Used by: + test_missing_token_returns_401 - exercises the authentication boundary for a protected endpoint. + test_expired_token_returns_401 - verifies expired access tokens are rejected before reaching protected logic. + test_valid_token_returns_vendor_id - proves the API and database see the same vendor context after authentication. + + Args: + vendor_id: `str` vendor identifier resolved from the access token dependency. + cursor: `Cursor` opened through the RLS dependency with tenant context already applied. + + Returns: + dict: A payload containing the dependency-resolved `vendor_id` and the value stored in `current_setting('app.vendor_id', true)`. + """ + cursor.execute("SELECT current_setting('app.vendor_id', true)") + row = cursor.fetchone() + db_vendor_id = row[0] if row else None + return {"vendor_id": vendor_id, "db_vendor_id": db_vendor_id} + + +# Register the test-only router onto the global app once. +app.include_router(_test_router) + + +# --------------------------------------------------------------------------- +# Function-scoped autouse: Isolated dependencies per test +# --------------------------------------------------------------------------- + + +@pytest.fixture(autouse=True) +def override_function_dependencies( + db_conn: Connection, +) -> typing.Generator[None, None, None]: + """ + Applies per-test database dependency overrides that bind FastAPI dependencies to the transactional test connection. + + Scope: function — the overrides capture a function-scoped transaction and must be reset after every test. + + Provides: + `None`; it temporarily installs `get_db` and `get_rls_cursor` overrides onto the global `app`. + + Dependencies: + db_conn: Supplies the transactional database connection whose cursors back the overrides. + + Teardown: + Removes the temporary overrides for `get_db` and `get_rls_cursor` after the test finishes. + + Note: + This fixture mutates global dependency state because the application dependencies are not injectable without touching `app.dependency_overrides`. + """ + + def get_db_override() -> typing.Generator[Cursor, None, None]: + """ + Yields a cursor backed by the current test transaction. + + Used by: + override_function_dependencies - binds FastAPI's `get_db` dependency to the per-test connection. + + Args: + None. + + Returns: + typing.Generator[Cursor, None, None]: A cursor created from `db_conn` for one dependency resolution. + """ + with db_conn.cursor() as cur: + yield cur + + def get_rls_cursor_override( + vendor_id: CurrentVendorId, + ) -> typing.Generator[Cursor, None, None]: + """ + Yields a cursor after applying the authenticated vendor context to the current transaction. + + Used by: + override_function_dependencies - binds FastAPI's `get_rls_cursor` dependency to the per-test connection. + + Args: + vendor_id: `str` vendor identifier to install through `app.set_app_context`. + + Returns: + typing.Generator[Cursor, None, None]: A cursor with the RLS app context already set for the authenticated vendor. + """ + with db_conn.cursor() as cur: + cur.execute("SELECT app.set_app_context(%s)", (vendor_id,)) + yield cur - def _get_db() -> typing.Generator[Cursor, None, None]: - with connect(test_container.get_connection_url()) as conn: - with conn.cursor() as cursor: - yield cursor + app.dependency_overrides[get_db] = get_db_override + app.dependency_overrides[get_rls_cursor] = get_rls_cursor_override - app.dependency_overrides[get_db] = _get_db - yield - app.dependency_overrides.pop(get_db, None) + try: + yield + finally: + # Avoid .clear() to preserve session-level overrides + app.dependency_overrides.pop(get_db, None) + app.dependency_overrides.pop(get_rls_cursor, None) diff --git a/backend/tests/core/test_config.py b/backend/tests/core/test_config.py new file mode 100644 index 0000000..35f4379 --- /dev/null +++ b/backend/tests/core/test_config.py @@ -0,0 +1,73 @@ +from __future__ import annotations + +import pytest + +from app.core.config import Settings + + +@pytest.mark.unit +def test_settings_builds_database_dsn_from_fields(faker) -> None: + """ + Verifies that `app.core.config.Settings` assembles `DATABASE_DSN` from the + individual Postgres fields. This matters because database connectivity + depends on the computed DSN matching the configured environment. + + Covers: + - `app.core.config.Settings` + + Rationale: + The test instantiates `Settings` directly because DSN assembly is a pure configuration concern with no external dependencies. + + Fixtures: + faker: Session-scoped `Faker` instance used to generate realistic settings values. + """ + settings = Settings( + SECRET_KEY=faker.password(length=32, special_chars=False), + PROJECT_NAME=faker.slug(), + POSTGRES_SERVER=faker.ipv4_private(), + POSTGRES_PORT=faker.random_int(min=1025, max=65535), + POSTGRES_USER=faker.user_name(), + POSTGRES_PASSWORD=faker.password(length=16, special_chars=False), + POSTGRES_DB=faker.slug(), + ) + expected_dsn = ( + f"postgresql://{settings.POSTGRES_USER}:{settings.POSTGRES_PASSWORD}" + f"@{settings.POSTGRES_SERVER}:{settings.POSTGRES_PORT}/{settings.POSTGRES_DB}" + ) + + assert str(settings.DATABASE_DSN) == expected_dsn, ( + f"Expected DATABASE_DSN '{expected_dsn}', got '{settings.DATABASE_DSN}'" + ) + + +@pytest.mark.unit +def test_settings_expose_default_token_expiries(faker) -> None: + """ + Verifies that `app.core.config.Settings` exposes the default access-token + and refresh-token expiry values. This matters because auth token lifetime + is part of the application's default security contract. + + Covers: + - `app.core.config.Settings` + + Rationale: + The test constructs a minimal valid settings object and asserts the defaults that should apply when the expiry fields are not overridden. + + Fixtures: + faker: Session-scoped `Faker` instance used to generate the required non-expiry settings values. + """ + settings = Settings( + SECRET_KEY=faker.password(length=32, special_chars=False), + PROJECT_NAME=faker.slug(), + POSTGRES_SERVER=faker.ipv4_private(), + POSTGRES_USER=faker.user_name(), + POSTGRES_PASSWORD=faker.password(length=16, special_chars=False), + POSTGRES_DB=faker.slug(), + ) + + assert settings.ACCESS_TOKEN_EXPIRE_MINUTES == 60, ( + f"Expected default access token expiry 60, got {settings.ACCESS_TOKEN_EXPIRE_MINUTES}" + ) + assert settings.REFRESH_TOKEN_EXPIRE_DAYS == 7, ( + f"Expected default refresh token expiry 7, got {settings.REFRESH_TOKEN_EXPIRE_DAYS}" + ) diff --git a/backend/tests/core/test_ed25519.py b/backend/tests/core/test_ed25519.py new file mode 100644 index 0000000..790e0a8 --- /dev/null +++ b/backend/tests/core/test_ed25519.py @@ -0,0 +1,136 @@ +from __future__ import annotations + +import pytest +from cryptography.hazmat.primitives.asymmetric import rsa +from cryptography.hazmat.primitives.asymmetric.ed25519 import ( + Ed25519PrivateKey, + Ed25519PublicKey, +) +from cryptography.hazmat.primitives.serialization import ( + Encoding, + NoEncryption, + PrivateFormat, + PublicFormat, +) + +from app.core.ed25519 import load_private_ed25519_key, load_public_ed25519_key + + +@pytest.mark.unit +def test_load_private_ed25519_key_returns_private_key() -> None: + """ + Verifies that `app.core.ed25519.load_private_ed25519_key` accepts a + PEM-encoded Ed25519 private key and returns the expected key type. This + matters because license signing and verification depend on loading the + correct asymmetric key material. + + Covers: + - `app.core.ed25519.load_private_ed25519_key` + + Rationale: + The test uses real generated key material because the loader contract is defined by accepted and rejected key types. + + Fixtures: + None. + """ + private_key = Ed25519PrivateKey.generate() + private_key_pem = private_key.private_bytes( + encoding=Encoding.PEM, + format=PrivateFormat.PKCS8, + encryption_algorithm=NoEncryption(), + ) + loaded_private_key = load_private_ed25519_key(private_key_pem) + + assert isinstance(loaded_private_key, Ed25519PrivateKey), ( + f"Expected Ed25519PrivateKey, got {type(loaded_private_key)}" + ) + + +@pytest.mark.unit +def test_load_private_ed25519_key_rejects_rsa_private_key() -> None: + """ + Verifies that `app.core.ed25519.load_private_ed25519_key` rejects PEM data + for a non-Ed25519 private key. This matters because callers should fail fast + when they provide the wrong key type for Ed25519 operations. + + Covers: + - `app.core.ed25519.load_private_ed25519_key` + + Rationale: + A real RSA key is used so the failure documents the public loader contract rather than a mocked type check. + + Fixtures: + None. + """ + rsa_private_key = rsa.generate_private_key( + public_exponent=65537, key_size=2048 + ) + rsa_private_key_pem = rsa_private_key.private_bytes( + encoding=Encoding.PEM, + format=PrivateFormat.PKCS8, + encryption_algorithm=NoEncryption(), + ) + + with pytest.raises(TypeError, match="not an Ed25519 private key"): + load_private_ed25519_key(rsa_private_key_pem) + + +@pytest.mark.unit +def test_load_public_ed25519_key_returns_public_key() -> None: + """ + Verifies that `app.core.ed25519.load_public_ed25519_key` accepts a + PEM-encoded Ed25519 public key and returns the expected key type. This + matters because license verification depends on loading the correct public + key material. + + Covers: + - `app.core.ed25519.load_public_ed25519_key` + + Rationale: + The test generates a real Ed25519 keypair and round-trips only the public key because that is the public contract of the loader. + + Fixtures: + None. + """ + public_key_pem = ( + Ed25519PrivateKey + .generate() + .public_key() + .public_bytes( + encoding=Encoding.PEM, format=PublicFormat.SubjectPublicKeyInfo + ) + ) + loaded_public_key = load_public_ed25519_key(public_key_pem) + + assert isinstance(loaded_public_key, Ed25519PublicKey), ( + f"Expected Ed25519PublicKey, got {type(loaded_public_key)}" + ) + + +@pytest.mark.unit +def test_load_public_ed25519_key_rejects_rsa_public_key() -> None: + """ + Verifies that `app.core.ed25519.load_public_ed25519_key` rejects PEM data + for a non-Ed25519 public key. This matters because callers must not + accidentally verify signatures with incompatible key material. + + Covers: + - `app.core.ed25519.load_public_ed25519_key` + + Rationale: + A real RSA public key is used so the failure path reflects the same key-type mismatch production code would encounter. + + Fixtures: + None. + """ + rsa_public_key_pem = ( + rsa + .generate_private_key(public_exponent=65537, key_size=2048) + .public_key() + .public_bytes( + encoding=Encoding.PEM, format=PublicFormat.SubjectPublicKeyInfo + ) + ) + + with pytest.raises(TypeError, match="not an Ed25519 public key"): + load_public_ed25519_key(rsa_public_key_pem) diff --git a/backend/tests/core/test_exception_handlers.py b/backend/tests/core/test_exception_handlers.py index 7f2f45d..8a20896 100644 --- a/backend/tests/core/test_exception_handlers.py +++ b/backend/tests/core/test_exception_handlers.py @@ -17,6 +17,7 @@ from app.core.exception_handlers import ( api_exception_handler, + build_error_details, general_exception_handler, validation_exception_handler, ) @@ -34,6 +35,7 @@ ServiceUnavailableException, ValidationException, ) +from app.schemas.response import ErrorDetail # Pre-compiled UUID v4 pattern reused across request-id assertions @@ -42,9 +44,124 @@ ) +@pytest.mark.unit +def test_build_error_details_none_returns_empty_list(): + """ + Verifies that `app.core.exception_handlers._build_error_details` returns an + empty list when given `None`. This matters because error handlers normalize + optional detail payloads into a consistent response shape. + + Covers: + - `app.core.exception_handlers._build_error_details` + + Rationale: + This is a direct helper-unit test with no fixtures because the normalization rule is pure and deterministic. + + Fixtures: + None. + """ + details = build_error_details(None) + assert details == [], "Expected no details when input is None" + + +@pytest.mark.unit +def test_build_error_details_accepts_error_detail_instance(): + """ + Verifies that `app.core.exception_handlers._build_error_details` preserves + an existing `ErrorDetail` instance. This matters because handlers should + not mutate already-normalized detail objects. + + Covers: + - `app.core.exception_handlers._build_error_details` + + Rationale: + The test uses a real `ErrorDetail` instance because the helper contract is about normalization, not copying. + + Fixtures: + None. + """ + item = ErrorDetail(field="email", message="Invalid email") + + details = build_error_details(item) + assert len(details) == 1, "Expected exactly one normalized detail entry" + assert details[0] is item, ( + "Expected ErrorDetail instance input to be preserved without copying" + ) + + +@pytest.mark.unit +def test_build_error_details_dict_uses_fallback_message_when_missing(): + """ + Verifies that `app.core.exception_handlers._build_error_details` supplies + the fallback message when a detail dict omits `message`. This matters + because API error details must always carry a message field after + normalization. + + Covers: + - `app.core.exception_handlers._build_error_details` + + Rationale: + A single dict input is enough because the contract under test is the fallback message behavior. + + Fixtures: + None. + """ + details = build_error_details({"field": "password"}) + assert len(details) == 1, "Expected exactly one normalized detail entry" + assert details[0].field == "password", ( + "Expected dict field to be mapped to ErrorDetail.field" + ) + assert details[0].message == "Unknown error", ( + "Expected missing/empty dict message to use 'Unknown error' fallback" + ) + + +@pytest.mark.unit +def test_build_error_details_non_dict_item_is_stringified(): + """ + Verifies that `app.core.exception_handlers._build_error_details` stringifies + non-dict inputs into a detail message. This matters because handlers may + receive arbitrary exception detail items that still need to be serialized + into the response contract. + + Covers: + - `app.core.exception_handlers._build_error_details` + + Rationale: + The runtime-error input documents the behavior for non-dict, non-`ErrorDetail` detail items. + + Fixtures: + None. + """ + details = build_error_details(RuntimeError("boom")) + assert len(details) == 1, "Expected exactly one normalized detail entry" + assert details[0].field is None, ( + "Expected non-dict inputs to map to ErrorDetail with field=None" + ) + assert details[0].message == "boom", ( + "Expected non-dict inputs to be stringified into detail message" + ) + + @pytest.fixture(scope="module") def error_contract_app(): - """FastAPI app wired with all exception handlers.""" + """ + Provides a FastAPI app wired with the project exception handlers and test-only endpoints that trigger each handler path. + + Scope: module — the app wiring is expensive enough to share within the module and tests do not mutate the router structure. + + Provides: + A `FastAPI` instance configured with API, validation, and general exception handlers plus endpoints that raise representative exceptions. + + Dependencies: + None. + + Teardown: + None. + + Note: + The fixture exists solely to exercise the real response envelope produced by the installed handlers. + """ app = FastAPI() app.add_exception_handler(APIException, api_exception_handler) @@ -138,7 +255,7 @@ async def validate_nested(data: NestedBodyRequest): @pytest.mark.integration @pytest.mark.parametrize( - "endpoint,expected_code,expected_status", + ("endpoint", "expected_code", "expected_status"), [ pytest.param( "/validation", @@ -216,7 +333,41 @@ async def validate_nested(data: NestedBodyRequest): def test_error_status_code_and_code_match( error_contract_app, endpoint, expected_code, expected_status ): - """HTTP status and error code must match for every type.""" + """ + Verifies that each registered exception path returns the expected HTTP + status code and error code pair. This matters because clients and + observability tooling depend on the wire status and structured error code + staying aligned. + + Covers: + - `app.core.exception_handlers.api_exception_handler` + - `app.core.exception_handlers.general_exception_handler` + - `app.core.exception_handlers.validation_exception_handler` + + Rationale: + The test drives the app through real endpoints because the contract under test is the final HTTP response envelope for each handler path. + + Fixtures: + error_contract_app: FastAPI app configured with the project exception handlers and trigger endpoints. + + Parametrize: + endpoint: The route that triggers the handler under test. + expected_code: The error code expected in the response body. + expected_status: The HTTP status expected on the response. + Cases: + - — validation exception path. + - — invalid-authentication path. + - — expired-authentication path. + - — authorization failure path. + - — generic not-found path. + - — license-not-found path. + - — generic conflict path. + - — license-revoked path. + - — license-expired path. + - — business-logic error path. + - — service-unavailable path. + - — uncaught exception path. + """ client = TestClient(error_contract_app, raise_server_exceptions=False) response = client.get(endpoint) @@ -247,7 +398,20 @@ def test_error_status_code_and_code_match( @pytest.mark.integration def test_error_details_structure(error_contract_app): - """Details list must contain exact field+message pairs.""" + """ + Verifies that validation-style details are serialized with the expected + field and message pairs. This matters because API clients rely on the + `details` list to display precise validation feedback. + + Covers: + - `app.core.exception_handlers.api_exception_handler` + + Rationale: + The test exercises the real `/validation` endpoint so the final response envelope is asserted end to end. + + Fixtures: + error_contract_app: FastAPI app configured with the project exception handlers and trigger endpoints. + """ client = TestClient(error_contract_app, raise_server_exceptions=False) response = client.get("/validation") @@ -281,7 +445,20 @@ def test_error_details_structure(error_contract_app): @pytest.mark.integration def test_error_details_default_empty(error_contract_app): - """Exceptions without details must yield empty details.""" + """ + Verifies that exception responses without explicit details still serialize + `details` as an empty list. This matters because the API error contract + should stay shape-stable even when no field-level detail exists. + + Covers: + - `app.core.exception_handlers.api_exception_handler` + + Rationale: + The test uses a real auth error endpoint because the observable response shape is the contract that matters. + + Fixtures: + error_contract_app: FastAPI app configured with the project exception handlers and trigger endpoints. + """ client = TestClient(error_contract_app, raise_server_exceptions=False) response = client.get("/auth-invalid") @@ -302,10 +479,20 @@ def test_error_details_default_empty(error_contract_app): @pytest.mark.integration def test_validation_handler_nested_field_path(error_contract_app): - """validation_exception_handler must join nested loc segments with dots. + """ + Verifies that `app.core.exception_handlers.validation_exception_handler` + joins nested validation locations with dots. This matters because clients + need a stable field path such as `address.street` to map nested validation + errors back to inputs. + + Covers: + - `app.core.exception_handlers.validation_exception_handler` + + Rationale: + The nested-body endpoint is exercised through `TestClient` because the contract under test is the emitted validation detail field path. - Sending {"address": {}} triggers loc=["body", "address", "street"]; - the handler must produce field="address.street" via its dot-join logic. + Fixtures: + error_contract_app: FastAPI app configured with the project exception handlers and trigger endpoints. """ client = TestClient(error_contract_app, raise_server_exceptions=False) response = client.post("/validate-nested", json={"address": {}}) @@ -327,12 +514,19 @@ def test_validation_handler_nested_field_path(error_contract_app): def test_validation_handler_body_level_error_sets_field_to_none( error_contract_app, ): - """Body-level error (loc with one element after slicing) - must produce field=None in the detail. + """ + Verifies that `app.core.exception_handlers.validation_exception_handler` + emits `field=None` for body-level validation failures. This matters because + some request errors apply to the whole body rather than a named field. + + Covers: + - `app.core.exception_handlers.validation_exception_handler` + + Rationale: + The test posts a scalar to an object endpoint so the handler receives a body-level location and must normalize it into a `None` field. - Sending an integer body against an object-typed endpoint - triggers loc=["body"], making loc[1:] empty and - field_path="", so field=None. + Fixtures: + error_contract_app: FastAPI app configured with the project exception handlers and trigger endpoints. """ client = TestClient(error_contract_app, raise_server_exceptions=False) response = client.post("/validate-body", json=5) @@ -361,7 +555,7 @@ def test_validation_handler_body_level_error_sets_field_to_none( @pytest.mark.integration @pytest.mark.parametrize( - "endpoint,method,json_body,raise_exceptions", + ("endpoint", "method", "json_body", "raise_exceptions"), [ pytest.param( "/auth-invalid", "get", None, True, id="api_exception_handler" @@ -381,7 +575,33 @@ def test_validation_handler_body_level_error_sets_field_to_none( def test_request_id_header_matches_body_and_is_uuid_v4( error_contract_app, endpoint, method, json_body, raise_exceptions ): - """X-Request-ID header must match body and be UUID v4.""" + """ + Verifies that every exception-handler response echoes the same request id in + the header and body and that the value is UUIDv4-shaped. This matters + because traceability depends on a single correlation id surviving across + both response surfaces. + + Covers: + - `app.core.exception_handlers.api_exception_handler` + - `app.core.exception_handlers.general_exception_handler` + - `app.core.exception_handlers.validation_exception_handler` + + Rationale: + The parametrized endpoints cover one path for each handler family so the request-id contract is checked across the full error stack. + + Fixtures: + error_contract_app: FastAPI app configured with the project exception handlers and trigger endpoints. + + Parametrize: + endpoint: Route that triggers the handler under test. + method: HTTP verb used to reach that route. + json_body: Optional request JSON body for the route. + raise_exceptions: Whether the `TestClient` should re-raise server exceptions. + Cases: + - — exercises an APIException-derived handler path. + - — exercises a request-validation handler path. + - — exercises the uncaught-exception handler path. + """ client = TestClient( error_contract_app, raise_server_exceptions=raise_exceptions ) @@ -408,7 +628,20 @@ def test_request_id_header_matches_body_and_is_uuid_v4( @pytest.mark.integration def test_request_id_uniqueness_across_requests(error_contract_app): - """Repeated requests must each get a distinct request_id.""" + """ + Verifies that separate error responses receive distinct request ids across + repeated requests. This matters because a reused request id would break + per-request tracing and log correlation. + + Covers: + - request-id generation behavior exercised through the exception handlers + + Rationale: + Repeating the same failing request keeps the route constant while checking the per-request uniqueness contract. + + Fixtures: + error_contract_app: FastAPI app configured with the project exception handlers and trigger endpoints. + """ client = TestClient(error_contract_app, raise_server_exceptions=False) ids = { @@ -428,62 +661,21 @@ def test_request_id_uniqueness_across_requests(error_contract_app): @pytest.mark.integration -def test_api_exception_handler_returns_correct_structure(error_contract_app): - """api_exception_handler must return all required top-level error fields.""" - client = TestClient(error_contract_app, raise_server_exceptions=False) - response = client.get("/auth-invalid") - - assert response.status_code == status.HTTP_401_UNAUTHORIZED, ( - f"Expected status 401 for APIException, got {response.status_code}" - ) - error = response.json()["error"] - for field in ("code", "message", "http_status", "details", "request_id"): - assert field in error, ( - f"Missing field '{field}' in error response, got: {error.keys()}" - ) - - -@pytest.mark.integration -def test_validation_exception_handler_returns_correct_structure( - error_contract_app, -): - """validation_exception_handler must return - VALIDATION_FAILED with populated details. +def test_general_exception_handler_returns_sanitized_500(error_contract_app): """ - client = TestClient(error_contract_app, raise_server_exceptions=False) - response = client.post("/validate-body", json={"invalid": "data"}) + Verifies that `app.core.exception_handlers.general_exception_handler` + returns a sanitized 500 response instead of leaking internal exception + details. This matters because unexpected server errors should preserve the + API contract without exposing internals to clients. - assert response.status_code == getattr( - status, "HTTP_422_UNPROCESSABLE_CONTENT", 422 - ), f"Expected status 422 for validation error, got {response.status_code}" - error = response.json()["error"] - assert error["code"] == "VALIDATION_FAILED", ( - f"Expected code 'VALIDATION_FAILED', got '{error['code']}'" - ) - assert error["http_status"] == getattr( - status, "HTTP_422_UNPROCESSABLE_CONTENT", 422 - ), f"Expected http_status 422, got {error['http_status']}" - assert len(error["details"]) > 0, ( - "Expected at least one validation detail, got empty list" - ) - - for detail in error["details"]: - assert "field" in detail, ( - f"Expected 'field' key in detail, got keys: {detail.keys()}" - ) - assert "message" in detail, ( - f"Expected 'message' key in detail, got keys: {detail.keys()}" - ) - assert isinstance(detail["message"], str), ( - "Expected detail message to be string," - f" got {type(detail['message'])}" - ) + Covers: + - `app.core.exception_handlers.general_exception_handler` + Rationale: + The test triggers a real runtime error endpoint so the final error envelope is asserted exactly as a client would observe it. -@pytest.mark.integration -def test_general_exception_handler_returns_sanitized_500(error_contract_app): - """general_exception_handler must return 500 - with a sanitized message, never internal details. + Fixtures: + error_contract_app: FastAPI app configured with the project exception handlers and trigger endpoints. """ client = TestClient(error_contract_app, raise_server_exceptions=False) response = client.get("/general-error") @@ -510,8 +702,20 @@ def test_general_exception_handler_returns_sanitized_500(error_contract_app): @pytest.mark.integration def test_validation_error_message_and_details(error_contract_app): - """validation_exception_handler must return the - standard message with non-empty detail messages. + """ + Verifies that `app.core.exception_handlers.validation_exception_handler` + emits the standard validation message and non-empty detail messages. This + matters because clients depend on a stable top-level validation message and + usable detail entries. + + Covers: + - `app.core.exception_handlers.validation_exception_handler` + + Rationale: + The test exercises a real validation failure so the asserted messages come from the actual response contract. + + Fixtures: + error_contract_app: FastAPI app configured with the project exception handlers and trigger endpoints. """ client = TestClient(error_contract_app, raise_server_exceptions=False) response = client.post("/validate-body", json={"invalid": "data"}) @@ -535,7 +739,7 @@ def test_validation_error_message_and_details(error_contract_app): @pytest.mark.integration @pytest.mark.parametrize( - "endpoint,expected_message", + ("endpoint", "expected_message"), [ pytest.param("/auth-invalid", "Invalid credentials", id="auth_invalid"), pytest.param("/auth-expired", "Token has expired", id="auth_expired"), @@ -567,8 +771,36 @@ def test_validation_error_message_and_details(error_contract_app): def test_raised_error_messages_roundtrip_correctly( error_contract_app, endpoint, expected_message ): - """The message passed to the constructor must - appear in the response. + """ + Verifies that exception messages raised by the test endpoints appear + unchanged in the serialized API error response where that is the intended + contract. This matters because domain-specific error messages are part of + the client-visible failure contract for handled exceptions. + + Covers: + - `app.core.exception_handlers.api_exception_handler` + + Rationale: + The test drives each endpoint through `TestClient` because the contract under test is the final response body seen by clients. + + Fixtures: + error_contract_app: FastAPI app configured with the project exception handlers and trigger endpoints. + + Parametrize: + endpoint: Route that raises the exception under test. + expected_message: The message expected in the serialized error body. + Cases: + - — invalid-authentication message. + - — expired-authentication message. + - — authorization message. + - — generic not-found message. + - — generic conflict message. + - — validation exception message. + - — business-logic message. + - — service-unavailable message. + - — license-not-found message. + - — license-revoked message. + - — license-expired message. """ client = TestClient(error_contract_app, raise_server_exceptions=False) response = client.get(endpoint) diff --git a/backend/tests/core/test_exceptions.py b/backend/tests/core/test_exceptions.py index 53b0161..bb03af0 100644 --- a/backend/tests/core/test_exceptions.py +++ b/backend/tests/core/test_exceptions.py @@ -1,11 +1,3 @@ -""" -Unit tests for exception classes in app.core.exceptions. - -Covers: error codes, HTTP status codes, base-class parameter -storage, details defaulting, custom details, custom messages, -and default message lengths. -""" - import pytest from fastapi import status @@ -34,7 +26,7 @@ @pytest.mark.unit @pytest.mark.parametrize( - "exception_class,expected_code", + ("exception_class", "expected_code"), [ pytest.param( ValidationException, @@ -99,7 +91,37 @@ ], ) def test_exception_has_correct_error_code(exception_class, expected_code): - """Each exception class must carry the expected error_code.""" + """ + Verifies that each concrete exception class in `app.core.exceptions` exposes + the expected `error_code`. This matters because the error code is the + machine-readable part of the public API error contract. + + Covers: + - concrete exception classes in `app.core.exceptions` + + Rationale: + A parametrized class matrix keeps the exception-to-code mapping documented in one place. + + Fixtures: + None. + + Parametrize: + exception_class: Exception class being instantiated. + expected_code: `ErrorCode` value expected on the exception instance. + Cases: + - — validation error mapping. + - — invalid-authentication mapping. + - — expired-authentication mapping. + - — authorization mapping. + - — generic not-found mapping. + - — generic conflict mapping. + - — business-logic mapping. + - — service-unavailable mapping. + - — license-not-found mapping. + - — license-revoked mapping. + - — license-expired mapping. + - — license-key-generation mapping. + """ exc = exception_class() assert exc.error_code == expected_code, ( f"{exception_class.__name__} has error_code={exc.error_code.value}, " @@ -114,7 +136,7 @@ def test_exception_has_correct_error_code(exception_class, expected_code): @pytest.mark.unit @pytest.mark.parametrize( - "exception_class,expected_status", + ("exception_class", "expected_status"), [ pytest.param( ValidationException, @@ -179,7 +201,37 @@ def test_exception_has_correct_error_code(exception_class, expected_code): ], ) def test_exception_has_correct_http_status(exception_class, expected_status): - """Each exception class must carry the expected http_status.""" + """ + Verifies that each concrete exception class in `app.core.exceptions` exposes + the expected HTTP status code. This matters because the status code is part + of the API error envelope emitted by the exception handlers. + + Covers: + - concrete exception classes in `app.core.exceptions` + + Rationale: + The parametrized matrix keeps the exception-to-status mapping readable and avoids duplicate single-case tests. + + Fixtures: + None. + + Parametrize: + exception_class: Exception class being instantiated. + expected_status: HTTP status code expected on the exception instance. + Cases: + - — validation status mapping. + - — invalid-authentication status mapping. + - — expired-authentication status mapping. + - — authorization status mapping. + - — generic not-found status mapping. + - — generic conflict status mapping. + - — business-logic status mapping. + - — service-unavailable status mapping. + - — license-not-found status mapping. + - — license-revoked status mapping. + - — license-expired status mapping. + - — license-key-generation status mapping. + """ exc = exception_class() assert exc.http_status == expected_status, ( f"{exception_class.__name__} has http_status={exc.http_status}, " @@ -193,27 +245,42 @@ def test_exception_has_correct_http_status(exception_class, expected_status): @pytest.mark.unit -def test_api_exception_base_class_stores_all_parameters(): - """APIException must store all constructor args.""" +def test_api_exception_base_class_stores_all_parameters(faker): + """ + Verifies that `app.core.exceptions.APIException` stores the constructor + arguments that drive the API error contract. This matters because all + concrete exception types inherit this state and the exception handlers + serialize it directly. + + Covers: + - `app.core.exceptions.APIException` + + Rationale: + The base class is instantiated directly so the storage contract is asserted independently of any subclass defaults. + + Fixtures: + faker: Session-scoped `Faker` instance used to generate message and detail values. + """ + msg = faker.sentence() + field = faker.word() + detail_msg = faker.sentence() exc = APIException( error_code=ErrorCode.VALIDATION_FAILED, - message="Test error", + message=msg, http_status=status.HTTP_400_BAD_REQUEST, - details=[{"field": "test", "message": "error"}], + details=[{"field": field, "message": detail_msg}], ) assert exc.error_code == ErrorCode.VALIDATION_FAILED, ( f"Expected error_code VALIDATION_FAILED, got {exc.error_code}" ) - assert exc.message == "Test error", ( - f"Expected message 'Test error', got '{exc.message}'" - ) + assert exc.message == msg, f"Expected message '{msg}', got '{exc.message}'" assert exc.http_status == status.HTTP_400_BAD_REQUEST, ( f"Expected http_status 400, got {exc.http_status}" ) - assert exc.details == [{"field": "test", "message": "error"}], ( + assert exc.details == [{"field": field, "message": detail_msg}], ( f"Expected details with test field, got {exc.details}" ) - assert str(exc) == "Test error", ( + assert str(exc) == msg, ( f"Expected str(exc) to return message, got '{exc!s}'" ) @@ -224,11 +291,24 @@ def test_api_exception_base_class_stores_all_parameters(): @pytest.mark.unit -def test_api_exception_with_details_none_defaults_to_empty(): - """APIException(details=None) must produce an empty details list.""" +def test_api_exception_with_details_none_defaults_to_empty(faker): + """ + Verifies that `app.core.exceptions.APIException` normalizes `details=None` + into an empty list. This matters because handlers expect a list-shaped + details field even when no details are provided. + + Covers: + - `app.core.exceptions.APIException` + + Rationale: + The test isolates the `details=None` case on the base class because that normalization is inherited by downstream exceptions. + + Fixtures: + faker: Session-scoped `Faker` instance used to generate the exception message. + """ exc = APIException( error_code=ErrorCode.VALIDATION_FAILED, - message="Test error", + message=faker.sentence(), http_status=status.HTTP_400_BAD_REQUEST, details=None, ) @@ -246,7 +326,27 @@ def test_api_exception_with_details_none_defaults_to_empty(): ], ) def test_exception_with_details_none_defaults_to_empty_list(exception_class): - """Exceptions that accept a details kwarg must treat details=None as [].""" + """ + Verifies that detail-bearing concrete exceptions normalize `details=None` + into an empty list. This matters because callers should get the same details + shape whether they use the base class or concrete subclasses. + + Covers: + - `app.core.exceptions.ValidationException` + - `app.core.exceptions.BusinessLogicException` + + Rationale: + A small parametrized matrix is enough because the subclasses share the same details-defaulting contract. + + Fixtures: + None. + + Parametrize: + exception_class: Concrete exception class that accepts a `details` keyword. + Cases: + - — validation exception defaulting. + - — business-logic exception defaulting. + """ exc = exception_class(details=None) assert exc.details == [], ( f"{exception_class.__name__} with details=None should produce [], " @@ -261,7 +361,7 @@ def test_exception_with_details_none_defaults_to_empty_list(exception_class): @pytest.mark.unit @pytest.mark.parametrize( - "exception_class,message,details", + ("exception_class", "message", "details"), [ pytest.param( ValidationException, @@ -281,8 +381,28 @@ def test_exception_with_details_none_defaults_to_empty_list(exception_class): ], ) def test_exception_stores_custom_details(exception_class, message, details): - """Exceptions must store the exact message and - details passed at construction. + """ + Verifies that detail-bearing concrete exceptions preserve the custom message + and detail payload passed at construction. This matters because handlers + serialize these fields directly into the API response. + + Covers: + - `app.core.exceptions.ValidationException` + - `app.core.exceptions.BusinessLogicException` + + Rationale: + The parametrized cases document both detail-bearing exception families that expose this constructor contract. + + Fixtures: + None. + + Parametrize: + exception_class: Concrete exception class under test. + message: Custom message passed into the exception constructor. + details: Custom detail payload expected to round-trip unchanged. + Cases: + - — validation exception with two detail entries. + - — business-logic exception with one detail entry. """ exc = exception_class(message=message, details=details) assert exc.message == message, ( @@ -320,9 +440,37 @@ def test_exception_stores_custom_details(exception_class, message, details): ), ], ) -def test_simple_exception_accepts_custom_message(exception_class): - """Positional-message exceptions must store whatever message is passed.""" - custom_msg = f"Custom message for {exception_class.__name__}" +def test_simple_exception_accepts_custom_message(exception_class, faker): + """ + Verifies that positional-message exception classes preserve the custom + message passed to their constructor. This matters because the exception + handlers expose these messages directly to API clients for handled + exceptions. + + Covers: + - positional-message concrete exception classes in `app.core.exceptions` + + Rationale: + The parametrized matrix keeps the shared custom-message contract aligned across all positional-message subclasses. + + Fixtures: + faker: Session-scoped `Faker` instance used to generate the custom message. + + Parametrize: + exception_class: Positional-message exception class under test. + Cases: + - — invalid-authentication exception. + - — expired-authentication exception. + - — authorization exception. + - — generic not-found exception. + - — generic conflict exception. + - — service-unavailable exception. + - — license-not-found exception. + - — license-revoked exception. + - — license-expired exception. + - — license-key-generation exception. + """ + custom_msg = faker.sentence() exc = exception_class(custom_msg) assert exc.message == custom_msg, ( f"Expected message '{custom_msg}', got '{exc.message}'" @@ -337,68 +485,36 @@ def test_simple_exception_accepts_custom_message(exception_class): pytest.param(BusinessLogicException, id="business_logic_exception"), ], ) -def test_parameterized_exception_accepts_custom_message(exception_class): - """Keyword-message exceptions must store whatever message is passed.""" - custom_msg = f"Custom message for {exception_class.__name__}" +def test_parameterized_exception_accepts_custom_message(exception_class, faker): + """ + Verifies that keyword-message exception classes preserve the custom message + passed to their constructor. This matters because these subclasses expose + their message through the same API error serialization path as the base + class. + + Covers: + - `app.core.exceptions.ValidationException` + - `app.core.exceptions.BusinessLogicException` + + Rationale: + A small parametrized set keeps the shared keyword-message constructor contract explicit. + + Fixtures: + faker: Session-scoped `Faker` instance used to generate the custom message. + + Parametrize: + exception_class: Keyword-message exception class under test. + Cases: + - — validation exception custom message. + - — business-logic exception custom message. + """ + custom_msg = faker.sentence() exc = exception_class(message=custom_msg) assert exc.message == custom_msg, ( f"Expected message '{custom_msg}', got '{exc.message}'" ) -# --------------------------------------------------------------------------- -# Default message sanity -# --------------------------------------------------------------------------- - - -@pytest.mark.unit -@pytest.mark.parametrize( - "exception_class", - [ - pytest.param(ValidationException, id="validation_exception"), - pytest.param(AuthenticationException, id="authentication_exception"), - pytest.param(AuthExpiredException, id="auth_expired_exception"), - pytest.param(AuthorizationException, id="authorization_exception"), - pytest.param(NotFoundException, id="not_found_exception"), - pytest.param(ConflictException, id="conflict_exception"), - pytest.param(BusinessLogicException, id="business_logic_exception"), - pytest.param( - ServiceUnavailableException, id="service_unavailable_exception" - ), - pytest.param( - LicenseNotFoundException, id="license_not_found_exception" - ), - pytest.param(LicenseRevokedException, id="license_revoked_exception"), - pytest.param(LicenseExpiredException, id="license_expired_exception"), - pytest.param( - LicenseKeyGenerationError, id="license_key_generation_error" - ), - ], -) -def test_exception_default_message_has_reasonable_length(exception_class): - """Default messages must be between 5 and 200 characters.""" - message = exception_class().message - assert message, f"{exception_class.__name__} has empty default message" - assert 5 <= len(message) <= 200, ( - f"{exception_class.__name__} message '{message}' is outside " - f"reasonable 5-200 character range ({len(message)} chars)" - ) - - -# --------------------------------------------------------------------------- -# Inheritance chain -# --------------------------------------------------------------------------- - - -@pytest.mark.unit -def test_api_exception_is_subclass_of_exception(): - """APIException must inherit from Exception so the general_exception_handler - fallback can catch it if the api_exception_handler is ever misconfigured.""" - assert issubclass(APIException, Exception), ( - "APIException must be a subclass of Exception" - ) - - @pytest.mark.unit @pytest.mark.parametrize( "exception_class", @@ -424,10 +540,35 @@ def test_api_exception_is_subclass_of_exception(): ], ) def test_concrete_exception_is_subclass_of_api_exception(exception_class): - """Every concrete exception must subclass - APIException so api_exception_handler catches it. - If this invariant breaks, those exceptions silently - fall through to general_exception_handler (500s). + """ + Verifies that every concrete exception class subclasses + `app.core.exceptions.APIException`. This matters because the API exception + handler relies on that inheritance chain to catch handled application errors + + Covers: + - concrete exception classes in `app.core.exceptions` + + Rationale: + The inheritance check is expressed directly because the contract is structural rather than behavioral. + + Fixtures: + None. + + Parametrize: + exception_class: Concrete exception class whose inheritance chain is being checked. + Cases: + - — invalid-authentication inheritance. + - — expired-authentication inheritance. + - — authorization inheritance. + - — generic not-found inheritance. + - — generic conflict inheritance. + - — validation inheritance. + - — business-logic inheritance. + - — service-unavailable inheritance. + - — license-not-found inheritance. + - — license-revoked inheritance. + - — license-expired inheritance. + - — license-key-generation inheritance. """ assert issubclass(exception_class, APIException), ( f"{exception_class.__name__} is not a subclass of APIException; " diff --git a/backend/tests/core/test_security.py b/backend/tests/core/test_security.py new file mode 100644 index 0000000..4502967 --- /dev/null +++ b/backend/tests/core/test_security.py @@ -0,0 +1,125 @@ +from __future__ import annotations + +import pytest + +from app.core.security import ( + create_access_token, + create_refresh_token, + decode_token, + get_password_hash, + verify_password, +) + + +@pytest.mark.unit +def test_get_password_hash_round_trip_verifies_password(faker) -> None: + """ + Verifies that `app.core.security.get_password_hash` and + `app.core.security.verify_password` work together for the valid-password + round trip. This matters because the application's password wrapper contract + depends on a stored hash being accepted for the original plaintext. + + Covers: + - `app.core.security.get_password_hash` + - `app.core.security.verify_password` + + Rationale: + This test stays at the project boundary by checking the wrapper round trip instead of asserting on hash-library implementation details. Historical third-party-coupled assertions were removed under REM-012. + + Fixtures: + faker: Session-scoped `Faker` instance used to generate a password value. + """ + plain_password = faker.password(length=16, special_chars=True) + hashed_password = get_password_hash(plain_password) + valid, updated_hash = verify_password(plain_password, hashed_password) + + assert hashed_password != plain_password, ( + "Expected password hashing to produce a value distinct from the plaintext" + ) + assert valid is True, ( + "Expected hashed password to verify the original plaintext" + ) + assert updated_hash is None, ( + "Expected a freshly generated password hash to require no upgrade" + ) + + +@pytest.mark.unit +def test_verify_password_rejects_wrong_password(faker) -> None: + """ + Verifies that `app.core.security.verify_password` rejects an incorrect plaintext for a stored hash. + This matters because invalid credentials must fail cleanly at the password-wrapper boundary. + + Covers: + - `app.core.security.get_password_hash` + - `app.core.security.verify_password` + + Rationale: + The test uses a real hash and a different plaintext so it documents the wrapper behavior without testing the hash library itself. + + Fixtures: + faker: Session-scoped `Faker` instance used to generate the original and wrong passwords. + """ + original_password = faker.password(length=16, special_chars=True) + wrong_password = faker.password(length=16, special_chars=True) + hashed_password = get_password_hash(original_password) + valid, updated_hash = verify_password(wrong_password, hashed_password) + + assert valid is False, ( + "Expected password verification to fail for the wrong input" + ) + assert updated_hash is None, ( + "Expected failed verification to avoid returning an upgraded hash" + ) + + +@pytest.mark.unit +@pytest.mark.parametrize( + ("token_factory", "expected_type"), + [ + pytest.param(create_access_token, "access", id="access_token"), + pytest.param(create_refresh_token, "refresh", id="refresh_token"), + ], +) +def test_token_round_trip_preserves_vendor_id_and_type( + app_settings, faker, token_factory, expected_type: str +) -> None: + """ + Verifies that tokens issued by the project helpers decode back to the + expected vendor id and token type. This matters because the application's + auth layer depends on a stable round-trip contract for access and refresh + tokens. + + Covers: + - `app.core.security.create_access_token` + - `app.core.security.create_refresh_token` + - `app.core.security.decode_token` + + Rationale: + This test exercises the project's token helpers as a boundary round trip + instead of asserting on JWT-library internals. + + Fixtures: + app_settings: Shared `Settings` object used to sign and decode the test tokens. + faker: Session-scoped `Faker` instance used to generate the vendor id claim. + + Parametrize: + token_factory: Selects which project token helper issues the token. + expected_type: The token type claim expected after decoding. + Cases: + - — issues an access token and expects the `access` claim. + - — issues a refresh token and expects the `refresh` claim. + """ + vendor_id = faker.uuid4() + token = token_factory(vendor_id, app_settings) + payload = decode_token(token, app_settings) + + assert payload["vendor_id"] == vendor_id, ( + f"Expected token payload vendor_id '{vendor_id}', got '{payload['vendor_id']}'" + ) + assert payload["token_type"] == expected_type, ( + f"Expected token_type '{expected_type}', got '{payload['token_type']}'" + ) + assert "exp" in payload, ( + "Expected decoded token payload to contain an expiry claim" + ) diff --git a/backend/tests/crud/test_vendor.py b/backend/tests/crud/test_vendor.py new file mode 100644 index 0000000..1535c78 --- /dev/null +++ b/backend/tests/crud/test_vendor.py @@ -0,0 +1,165 @@ +from __future__ import annotations + +import pytest +from psycopg import Connection + +from app.core.security import get_password_hash +from app.crud.vendor import create_vendor, get_vendor_by_email, get_vendor_by_id + + +def build_vendor_input(faker) -> tuple[str, str]: + """ + Builds a vendor email and hashed password pair for CRUD integration tests. + + Used by: + test_create_vendor_returns_created_row - creates the insert payload for the happy path. + test_get_vendor_by_email_is_case_insensitive - provisions a vendor before lookup. + test_create_vendor_returns_none_for_case_insensitive_duplicate - creates the original record used to trigger the duplicate path. + test_deleted_vendor_is_excluded_from_email_and_id_lookups - provisions the vendor that is later soft-deleted. + + Args: + faker: `Faker` session fixture used to generate a unique email and password. + + Returns: + tuple[str, str]: A unique vendor email and its hashed password string. + """ + return faker.email(), get_password_hash( + faker.password(length=16, special_chars=True) + ) + + +@pytest.mark.integration +def test_create_vendor_returns_created_row(db_conn: Connection, faker) -> None: + """ + Verifies that `app.crud.vendor.create_vendor` inserts a vendor row and + returns the created record. This matters because higher-level auth flows + depend on CRUD creation returning the persisted vendor identity and email. + + Covers: + - `app.crud.vendor.create_vendor` + + Rationale: + This is a real database integration test because vendor creation is fundamentally a persistence contract. + + Fixtures: + db_conn: Transactional database connection rolled back after the test. + faker: Session-scoped `Faker` instance used to generate vendor credentials. + """ + email, password_hash = build_vendor_input(faker) + with db_conn.cursor() as db_cursor: + vendor = create_vendor(db_cursor, email, password_hash) + + assert vendor is not None, ( + "Expected create_vendor to return the inserted row" + ) + assert vendor["email"] == email, ( + f"Expected created vendor email '{email}', got '{vendor['email']}'" + ) + assert vendor["id"], "Expected create_vendor to return a vendor id" + + +@pytest.mark.integration +def test_get_vendor_by_email_is_case_insensitive( + db_conn: Connection, faker +) -> None: + """ + Verifies that `app.crud.vendor.get_vendor_by_email` performs + case-insensitive email lookup. This matters because the auth layer should + treat vendor emails consistently regardless of request casing. + + Covers: + - `app.crud.vendor.create_vendor` + - `app.crud.vendor.get_vendor_by_email` + + Rationale: + The test inserts a real row and queries with `upper()` casing so the lookup behavior is proven against PostgreSQL rather than mocked normalization logic. + + Fixtures: + db_conn: Transactional database connection rolled back after the test. + faker: Session-scoped `Faker` instance used to generate vendor credentials. + """ + email, password_hash = build_vendor_input(faker) + with db_conn.cursor() as db_cursor: + create_vendor(db_cursor, email, password_hash) + found_vendor = get_vendor_by_email(db_cursor, email.upper()) + + assert found_vendor is not None, ( + f"Expected lookup by '{email.upper()}' to find vendor '{email}'" + ) + assert found_vendor["email"] == email, ( + f"Expected case-insensitive lookup to return '{email}', got '{found_vendor['email']}'" + ) + + +@pytest.mark.integration +def test_create_vendor_returns_none_for_case_insensitive_duplicate( + db_conn: Connection, faker +) -> None: + """ + Verifies that `app.crud.vendor.create_vendor` returns `None` when a + case-insensitive duplicate email already exists. This matters because + duplicate vendor emails must be rejected consistently even when the casing + differs. + + Covers: + - `app.crud.vendor.create_vendor` + + Rationale: + This integration test exercises the duplicate path against the real database because the uniqueness behavior is a persistence concern. + + Fixtures: + db_conn: Transactional database connection rolled back after the test. + faker: Session-scoped `Faker` instance used to generate vendor credentials. + """ + email, password_hash = build_vendor_input(faker) + with db_conn.cursor() as db_cursor: + create_vendor(db_cursor, email, password_hash) + duplicate_vendor = create_vendor( + db_cursor, + email.swapcase(), + get_password_hash(faker.password(length=16, special_chars=True)), + ) + + assert duplicate_vendor is None, ( + "Expected create_vendor to return None when a case-insensitive duplicate email exists" + ) + + +@pytest.mark.integration +def test_deleted_vendor_is_excluded_from_email_and_id_lookups( + db_conn: Connection, faker +) -> None: + """ + Verifies that `app.crud.vendor.get_vendor_by_email` and + `app.crud.vendor.get_vendor_by_id` ignore soft-deleted vendor rows. This + matters because deleted vendors must not remain visible to auth and + business-logic lookups. + + Covers: + - `app.crud.vendor.create_vendor` + - `app.crud.vendor.get_vendor_by_email` + - `app.crud.vendor.get_vendor_by_id` + + Rationale: + The test marks the persisted row as deleted in SQL and then exercises both lookup paths against the real database state. + + Fixtures: + db_conn: Transactional database connection rolled back after the test. + faker: Session-scoped `Faker` instance used to generate vendor credentials. + """ + email, password_hash = build_vendor_input(faker) + with db_conn.cursor() as db_cursor: + vendor = create_vendor(db_cursor, email, password_hash) + db_cursor.execute( + 'UPDATE app."vendors" SET "deleted_at" = NOW() WHERE "id" = %s', + (vendor["id"],), + ) + found_by_email = get_vendor_by_email(db_cursor, email) + found_by_id = get_vendor_by_id(db_cursor, vendor["id"]) + + assert found_by_email is None, ( + "Expected soft-deleted vendor to be excluded from email lookups" + ) + assert found_by_id is None, ( + "Expected soft-deleted vendor to be excluded from id lookups" + ) diff --git a/backend/tests/domain/test_activation.py b/backend/tests/domain/test_activation.py new file mode 100644 index 0000000..3dd32d9 --- /dev/null +++ b/backend/tests/domain/test_activation.py @@ -0,0 +1,144 @@ +import uuid + +import pytest +import uuid6 + +from app.domain.activation import ActivationCode + + +@pytest.mark.unit +def test_activation_code_generate_round_trips_uuid7(): + """ + Verifies that `app.domain.activation.ActivationCode.generate` preserves a + supplied UUIDv7 and emits a grouped 30-symbol activation code. This matters + because activation codes are a public representation of the underlying UUIDv7 identity. + + Covers: + - `app.domain.activation.ActivationCode.generate` + - `app.domain.activation.ActivationCode.uuid` + + Rationale: + The test uses a real UUIDv7 because the contract is the round trip between the UUID and generated activation code. + + Fixtures: + None. + """ + original = uuid6.uuid7() + generated = ActivationCode.generate(original) + + assert generated.uuid == original, ( + "ActivationCode.generate must preserve the original UUID value" + ) + assert generated.code.count("-") == 5, ( + "Generated activation code must contain six 5-char groups" + ) + assert len(generated.code.replace("-", "")) == ActivationCode.LENGTH, ( + "Generated activation code must contain exactly 30 symbols" + ) + + +@pytest.mark.unit +def test_activation_code_generate_without_uuid_creates_uuid7(): + """ + Verifies that `app.domain.activation.ActivationCode.generate` creates a UUIDv7 when no source UUID is supplied. + This matters because callers rely on the helper to create new activation identities as well as encode existing ones. + + Covers: + - `app.domain.activation.ActivationCode.generate` + + Rationale: + The test asserts only the UUID version because that is the externally meaningful contract of the generated identifier. + + Fixtures: + None. + """ + generated = ActivationCode.generate() + assert generated.uuid.version == 7, ( + "ActivationCode.generate() must create a UUIDv7 when no UUID is passed" + ) + + +@pytest.mark.unit +def test_activation_code_rejects_invalid_length(): + """ + Verifies that `app.domain.activation.ActivationCode` rejects code strings that do not contain the required 30 symbols. + This matters because malformed activation codes should fail validation before decode logic runs. + + Covers: + - `app.domain.activation.ActivationCode` + + Rationale: + The failure is a direct model-construction contract, so a single invalid length case is sufficient. + + Fixtures: + None. + """ + with pytest.raises(ValueError, match="30 symbols"): + ActivationCode(code="0" * 29) + + +@pytest.mark.unit +def test_activation_code_normalizes_input_format(): + """ + Verifies that `app.domain.activation.ActivationCode` normalizes lowercase compact input into the canonical grouped format. + This matters because user-supplied activation codes may omit separators or use lowercase letters. + + Covers: + - `app.domain.activation.ActivationCode` + - `app.domain.activation.ActivationCode.uuid` + + Rationale: + The test compares normalized input against a canonical generated code so the normalization contract is explicit. + + Fixtures: + None. + """ + original = uuid6.uuid7() + canonical = ActivationCode.generate(original) + compact = canonical.code.replace("-", "").lower() + + normalized = ActivationCode(code=compact) + assert normalized.code == canonical.code, ( + "ActivationCode must normalize lowercase compact input to grouped format" + ) + assert normalized.uuid == original, ( + "ActivationCode uuid property must decode normalized code correctly" + ) + + +@pytest.mark.unit +def test_activation_code_generate_rejects_non_uuid_input(): + """ + Verifies that `app.domain.activation.ActivationCode.generate` rejects non-UUID input types. + This matters because callers should get a clear error instead of silent coercion when the API is misused. + + Covers: + - `app.domain.activation.ActivationCode.generate` + + Rationale: + This is a direct type-guard test with no fixtures or patches. + + Fixtures: + None. + """ + with pytest.raises(TypeError, match="uuid cannot be of type"): + ActivationCode.generate("not-a-uuid") + + +@pytest.mark.unit +def test_activation_code_generate_rejects_non_v7_uuid(): + """ + Verifies that `app.domain.activation.ActivationCode.generate` rejects UUID values that are not version 7. + This matters because the activation-code encoding contract is defined only for UUIDv7 inputs. + + Covers: + - `app.domain.activation.ActivationCode.generate` + + Rationale: + A UUIDv4 input is enough to prove the version guard on non-v7 identifiers. + + Fixtures: + None. + """ + with pytest.raises(TypeError, match="UUID version 7"): + ActivationCode.generate(uuid.uuid4()) diff --git a/backend/tests/domain/test_fingerprint.py b/backend/tests/domain/test_fingerprint.py new file mode 100644 index 0000000..7a5c0e6 --- /dev/null +++ b/backend/tests/domain/test_fingerprint.py @@ -0,0 +1,109 @@ +import hashlib +import json + +import pytest + +from app.domain.fingerprint import Device + + +def make_device(**overrides) -> Device: + """ + Builds a `Device` model with stable test identifiers that can be selectively overridden. + + Used by: + test_device_fingerprint_matches_sha256_of_sorted_json_payload - creates the baseline device used for canonical hashing. + test_device_fingerprint_is_cached_on_repeated_access - creates the device whose computed fingerprint is read twice. + test_device_fingerprint_changes_when_hardware_identity_changes - creates the two devices that differ by one hardware identifier. + + Args: + overrides: Field replacements applied on top of the default hardware identifier set. + + Returns: + Device: A `Device` instance with deterministic identifiers unless overridden by the caller. + """ + data = { + "cpu_id": "cpu-1", + "motherboard_id": "mb-1", + "motherboard_serial": "mb-serial-1", + "machine_id": "machine-1", + "primary_disk_serial": "disk-1", + } + data.update(overrides) + return Device(**data) + + +@pytest.mark.unit +def test_device_fingerprint_matches_sha256_of_sorted_json_payload(): + """ + Verifies that `app.domain.fingerprint.Device.fingerprint` is the SHA-256 digest of the sorted JSON payload of raw identifiers. + This matters because license node-locking depends on a stable, deterministic device fingerprint contract. + + Covers: + - `app.domain.fingerprint.Device` + - `app.domain.fingerprint.Device.fingerprint` + + Rationale: + The test recomputes the digest from the model dump so the fingerprint contract is explicit and independent of implementation shortcuts. + + Fixtures: + None. + """ + device = make_device() + payload = json.dumps( + device.model_dump(exclude_computed_fields=True), sort_keys=True + ) + expected = f"sha256:{hashlib.sha256(payload.encode()).hexdigest()}" + + assert device.fingerprint == expected, ( + "Device.fingerprint must be sha256 of sorted JSON over raw identifiers" + ) + + +@pytest.mark.unit +def test_device_fingerprint_is_cached_on_repeated_access(): + """ + Verifies that `app.domain.fingerprint.Device.fingerprint` remains stable and is cached across repeated access. + This matters because repeated fingerprint reads should not recompute or drift within the same device instance. + + Covers: + - `app.domain.fingerprint.Device.fingerprint` + + Rationale: + The identity assertion documents the current cached-property behavior that the tests actually rely on. + + Fixtures: + None. + """ + device = make_device() + first = device.fingerprint + second = device.fingerprint + + assert first == second, ( + "Device.fingerprint must remain stable across repeated access" + ) + assert first is second, ( + "Device.fingerprint should be served from cached_property on re-access" + ) + + +@pytest.mark.unit +def test_device_fingerprint_changes_when_hardware_identity_changes(): + """ + Verifies that changing a hardware identifier changes `app.domain.fingerprint.Device.fingerprint`. + This matters because node-locking must distinguish devices when any hardware identity input changes. + + Covers: + - `app.domain.fingerprint.Device.fingerprint` + + Rationale: + The test varies only one identifier so the fingerprint change is attributable to a single hardware field. + + Fixtures: + None. + """ + device_a = make_device(primary_disk_serial="disk-1") + device_b = make_device(primary_disk_serial="disk-2") + + assert device_a.fingerprint != device_b.fingerprint, ( + "Fingerprint must change when any hardware identifier changes" + ) diff --git a/backend/tests/domain/test_license.py b/backend/tests/domain/test_license.py new file mode 100644 index 0000000..8fe79c8 --- /dev/null +++ b/backend/tests/domain/test_license.py @@ -0,0 +1,167 @@ +from __future__ import annotations + +import json +from datetime import datetime + +import pytest +from cryptography.hazmat.primitives.asymmetric import rsa +from cryptography.hazmat.primitives.serialization import ( + Encoding, + NoEncryption, + PrivateFormat, +) +from pydantic import ValidationError +from uuid6 import uuid7 + +from app.domain.license import BaseLicense, LicenseFile, NodeLockedLicense + + +def build_base_license(faker, **overrides) -> BaseLicense: + """ + Builds a valid `BaseLicense` payload that callers can override for focused test scenarios. + + Used by: + test_base_license_canonical_json_is_sorted - creates the baseline license model for canonical serialization. + test_base_license_rejects_expiry_before_creation - supplies a valid payload before one temporal field is invalidated. + test_node_locked_license_requires_positive_session_limit - seeds the node-locked license builder. + + Args: + faker: `Faker` session fixture used to generate metadata values. + overrides: Field replacements applied on top of the default base-license payload. + + Returns: + BaseLicense: A valid base-license model unless the caller intentionally overrides a field into an invalid state. + """ + now = datetime.now().timestamp() + payload = { + "id": uuid7(), + "vendor_id": uuid7(), + "client_id": uuid7(), + "expires_at": now + 3600, + "max_grace_secs": 0, + "created_at": now, + "meta_data": {faker.word(): faker.word(), faker.word(): faker.word()}, + } + payload.update(overrides) + return BaseLicense(**payload) + + +def build_node_locked_license(faker, **overrides) -> NodeLockedLicense: + """ + Builds a valid `NodeLockedLicense` model that callers can override for targeted validation cases. + + Used by: + test_node_locked_license_requires_positive_session_limit - creates the baseline payload before invalidating the session limit. + test_license_file_rejects_non_ed25519_private_key - provides a valid license payload for signature-construction validation. + + Args: + faker: `Faker` session fixture used to generate metadata and fingerprint values. + overrides: Field replacements applied on top of the default node-locked payload. + + Returns: + NodeLockedLicense: A valid node-locked license model unless the caller intentionally overrides a field into an invalid state. + """ + payload = build_base_license(faker).model_dump() + payload.update({ + "device_fingerprint": faker.sha256(raw_output=False), + "session_limit": 1, + }) + payload.update(overrides) + return NodeLockedLicense(**payload) + + +@pytest.mark.unit +def test_base_license_canonical_json_is_sorted(faker) -> None: + """ + Verifies that `app.domain.license.BaseLicense.canonical_json` sorts keys and matches the model dump serialization. + This matters because signature generation depends on a stable canonical license representation. + + Covers: + - `app.domain.license.BaseLicense` + - `app.domain.license.BaseLicense.canonical_json` + + Rationale: + The expected JSON is recomputed from the model dump so the canonicalization contract is explicit. + + Fixtures: + faker: Session-scoped `Faker` instance used to generate metadata values. + """ + license_model = build_base_license(faker) + canonical_json = license_model.canonical_json() + expected_json = json.dumps( + license_model.model_dump(), sort_keys=True, default=str + ) + + assert canonical_json == expected_json, ( + f"Expected canonical JSON '{expected_json}', got '{canonical_json}'" + ) + + +@pytest.mark.unit +def test_base_license_rejects_expiry_before_creation(faker) -> None: + """ + Verifies that `app.domain.license.BaseLicense` rejects payloads whose expiry precedes creation time. + This matters because licenses with inverted temporal bounds should never be considered valid. + + Covers: + - `app.domain.license.BaseLicense` + + Rationale: + The test overrides only the temporal fields of an otherwise valid payload so the validation failure is isolated. + + Fixtures: + faker: Session-scoped `Faker` instance used to generate a valid baseline payload. + """ + now = datetime.now().timestamp() + with pytest.raises( + ValidationError, match="expiry date cannot be before creation date" + ): + build_base_license(faker, created_at=now, expires_at=now - 1) + + +@pytest.mark.unit +def test_node_locked_license_requires_positive_session_limit(faker) -> None: + """ + Verifies that `app.domain.license.NodeLockedLicense` rejects non-positive session limits. + This matters because node-locked licensing uses the session limit as an enforced capacity constraint. + + Covers: + - `app.domain.license.NodeLockedLicense` + + Rationale: + The test mutates only the session-limit field of an otherwise valid payload so the model validation rule stays isolated. + + Fixtures: + faker: Session-scoped `Faker` instance used to generate a valid baseline payload. + """ + with pytest.raises(ValidationError, match="session_limit"): + build_node_locked_license(faker, session_limit=0) + + +@pytest.mark.unit +def test_license_file_rejects_non_ed25519_private_key(faker) -> None: + """ + Verifies that `app.domain.license.LicenseFile` rejects signature construction with a non-Ed25519 private key. + This matters because license signing must fail fast when callers provide incompatible key material. + + Covers: + - `app.domain.license.LicenseFile` + + Rationale: + A real RSA key is used so the failure documents the public key-type contract rather than a mocked branch. + + Fixtures: + faker: Session-scoped `Faker` instance used to generate the valid license payload. + """ + rsa_key = rsa.generate_private_key(public_exponent=65537, key_size=2048) + rsa_private_key_pem = rsa_key.private_bytes( + encoding=Encoding.PEM, + format=PrivateFormat.PKCS8, + encryption_algorithm=NoEncryption(), + ) + + with pytest.raises(TypeError, match="Ed25519 private key"): + LicenseFile( + license_data=build_node_locked_license(faker), + private_key_pem=rsa_private_key_pem, + ) diff --git a/backend/tests/integration/test_auth.py b/backend/tests/integration/test_auth.py deleted file mode 100644 index e2c340b..0000000 --- a/backend/tests/integration/test_auth.py +++ /dev/null @@ -1,339 +0,0 @@ -"""Integration tests for auth endpoints (signup, login, refresh). - -Uses Testcontainers for a real PostgreSQL instance with migrations applied. -Verifies the full HTTP round-trip including RLS context setting. -""" - -from __future__ import annotations - -import typing -import uuid -from contextlib import asynccontextmanager -from datetime import timedelta -from pathlib import Path - -import pytest -from fastapi import APIRouter as _APIRouter -from fastapi import FastAPI -from fastapi.exceptions import RequestValidationError -from fastapi.testclient import TestClient -from psycopg import Cursor, connect -from testcontainers.postgres import PostgresContainer - -from app.api.deps import ( - CurrentVendorId, - RLSCursorDep, - get_db, - get_rls_cursor, - get_settings, -) -from app.api.main import api_router -from app.core.config import Settings -from app.core.exception_handlers import ( - api_exception_handler, - general_exception_handler, - validation_exception_handler, -) -from app.core.exceptions import APIException -from app.core.security import create_access_token - - -MIGRATIONS_DIR = str(Path(__file__).parents[3] / "migrations") -API_V1 = "/api/v1" - -_test_router = _APIRouter() - - -@_test_router.get("/protected-test") -def _protected_test(vendor_id: CurrentVendorId, cursor: RLSCursorDep) -> dict: - cursor.execute("SELECT current_setting('app.vendor_id', true)") - row = cursor.fetchone() - db_vendor_id = row[0] if row else None - return {"vendor_id": vendor_id, "db_vendor_id": db_vendor_id} - - -@pytest.fixture(scope="function") -def pg_container() -> typing.Generator[PostgresContainer, None, None]: - with PostgresContainer( - "postgres:18.2-alpine3.23", driver=None - ).with_volume_mapping( - MIGRATIONS_DIR, "/docker-entrypoint-initdb.d" - ) as container: - yield container - - -@pytest.fixture(scope="function") -def test_settings() -> Settings: - return Settings( - SECRET_KEY="integration-test-secret-key-32bytes!", - PROJECT_NAME="test", - POSTGRES_SERVER="localhost", - POSTGRES_USER="test", - POSTGRES_PASSWORD="test", - POSTGRES_DB="test", - ACCESS_TOKEN_EXPIRE_MINUTES=60, - REFRESH_TOKEN_EXPIRE_DAYS=7, - ) - - -@pytest.fixture(scope="function") -def client( - pg_container: PostgresContainer, test_settings: Settings -) -> typing.Generator[TestClient, None, None]: - """TestClient with a lightweight test app (no real lifespan).""" - - @asynccontextmanager - async def _noop_lifespan(app: FastAPI): # noqa: RUF029 - yield - - test_app = FastAPI(lifespan=_noop_lifespan) - test_app.include_router(api_router, prefix=API_V1) - test_app.include_router(_test_router, prefix=API_V1) - test_app.add_exception_handler(APIException, api_exception_handler) - test_app.add_exception_handler( - RequestValidationError, validation_exception_handler - ) - test_app.add_exception_handler(Exception, general_exception_handler) - - def _override_get_db() -> typing.Generator[Cursor, None, None]: - with connect(pg_container.get_connection_url()) as conn: - with conn.cursor() as cur: - yield cur - - def _override_get_settings() -> Settings: - return test_settings - - test_app.dependency_overrides[get_db] = _override_get_db - test_app.dependency_overrides[get_settings] = _override_get_settings - - def _override_get_rls_cursor( - vendor_id: CurrentVendorId, - ) -> typing.Generator[Cursor, None, None]: - with connect(pg_container.get_connection_url()) as conn: - with conn.cursor() as cur: - cur.execute("SELECT app.set_app_context(%s)", (vendor_id,)) - yield cur - - test_app.dependency_overrides[get_rls_cursor] = _override_get_rls_cursor - - with TestClient(test_app) as tc: - yield tc - - -def _signup( - client: TestClient, - email: str = "vendor@test.com", - password: str = "SecurePass123!", -) -> dict: - return client.post( - f"{API_V1}/auth/signup", - json={ - "email": email, - "password": password, - "client_id": "integration-test", - }, - ).json() - - -def _login( - client: TestClient, - email: str = "vendor@test.com", - password: str = "SecurePass123!", -) -> dict: - return client.post( - f"{API_V1}/auth/login", - json={ - "email": email, - "password": password, - "client_id": "integration-test", - }, - ).json() - - -@pytest.mark.integration -class TestSignup: - def test_signup_creates_vendor_201(self, client: TestClient): - resp = client.post( - f"{API_V1}/auth/signup", - json={ - "email": "signup-test@example.com", - "password": "StrongPass1!", - "client_id": "test-client", - }, - ) - assert resp.status_code == 201 - body = resp.json() - assert "data" in body - assert body["data"]["vendor"]["email"] == "signup-test@example.com" - assert "id" in body["data"]["vendor"] - - def test_signup_duplicate_email_409(self, client: TestClient): - email = "dup@example.com" - # First signup succeeds - resp1 = client.post( - f"{API_V1}/auth/signup", - json={"email": email, "password": "Pass12345!", "client_id": "c1"}, - ) - assert resp1.status_code == 201 - - # Second signup with same email fails - resp2 = client.post( - f"{API_V1}/auth/signup", - json={"email": email, "password": "Pass12345!", "client_id": "c1"}, - ) - assert resp2.status_code == 409 - - def test_signup_weak_password_422(self, client: TestClient): - resp = client.post( - f"{API_V1}/auth/signup", - json={ - "email": "weak@example.com", - "password": "short", - "client_id": "c1", - }, - ) - assert resp.status_code == 422 - - def test_signup_missing_client_id_422(self, client: TestClient): - resp = client.post( - f"{API_V1}/auth/signup", - json={"email": "no-client@example.com", "password": "StrongPass1!"}, - ) - assert resp.status_code == 422 - - -@pytest.mark.integration -class TestLogin: - def test_login_returns_token_pair(self, client: TestClient): - email = "login-test@example.com" - _signup(client, email=email) - - resp = client.post( - f"{API_V1}/auth/login", - json={ - "email": email, - "password": "SecurePass123!", - "client_id": "c1", - }, - ) - assert resp.status_code == 200 - data = resp.json()["data"] - assert "access_token" in data - assert "refresh_token" in data - assert data["token_type"] == "bearer" - - def test_login_wrong_password_401(self, client: TestClient): - email = "login-fail@example.com" - _signup(client, email=email) - - resp = client.post( - f"{API_V1}/auth/login", - json={ - "email": email, - "password": "WrongPassword!", - "client_id": "c1", - }, - ) - assert resp.status_code == 401 - - def test_login_nonexistent_email_401(self, client: TestClient): - resp = client.post( - f"{API_V1}/auth/login", - json={ - "email": "nobody@example.com", - "password": "Pass12345!", - "client_id": "c1", - }, - ) - assert resp.status_code == 401 - - -@pytest.mark.integration -class TestRefresh: - def test_refresh_issues_new_tokens(self, client: TestClient): - email = "refresh-test@example.com" - _signup(client, email=email) - login_data = _login(client, email=email)["data"] - - resp = client.post( - f"{API_V1}/auth/refresh", - json={ - "refresh_token": login_data["refresh_token"], - "client_id": "c1", - }, - ) - assert resp.status_code == 200 - data = resp.json()["data"] - assert "access_token" in data - assert "refresh_token" in data - - def test_refresh_with_access_token_fails_401(self, client: TestClient): - email = "refresh-bad@example.com" - _signup(client, email=email) - login_data = _login(client, email=email)["data"] - - resp = client.post( - f"{API_V1}/auth/refresh", - json={ - "refresh_token": login_data["access_token"], # wrong token type - "client_id": "c1", - }, - ) - assert resp.status_code == 401 - - def test_refresh_with_garbage_token_fails_401(self, client: TestClient): - resp = client.post( - f"{API_V1}/auth/refresh", - json={"refresh_token": "not.a.real.token", "client_id": "c1"}, - ) - assert resp.status_code == 401 - - -@pytest.mark.integration -class TestProtectedEndpoints: - """Verify that protected endpoints reject invalid/missing tokens - via the full HTTP layer (Bearer parsing → dependency injection → - exception-to-HTTP translation). - """ - - def test_missing_token_401(self, client: TestClient): - resp = client.get(f"{API_V1}/protected-test") - assert resp.status_code == 401 - - def test_expired_token_401( - self, client: TestClient, test_settings: Settings - ): - token = create_access_token( - str(uuid.uuid4()), - test_settings, - expires_delta=timedelta(seconds=-1), - ) - resp = client.get( - f"{API_V1}/protected-test", - headers={"Authorization": f"Bearer {token}"}, - ) - assert resp.status_code == 401 - - def test_valid_token_returns_vendor_id(self, client: TestClient): - email = "protected-test@example.com" - signup_data = _signup(client, email=email) - created_vendor_id = signup_data["data"]["vendor"]["id"] - login_resp = client.post( - f"{API_V1}/auth/login", - json={ - "email": email, - "password": "SecurePass123!", - "client_id": "c1", - }, - ) - assert login_resp.status_code == 200 - token = login_resp.json()["data"]["access_token"] - - resp = client.get( - f"{API_V1}/protected-test", - headers={"Authorization": f"Bearer {token}"}, - ) - assert resp.status_code == 200 - body = resp.json() - assert body["vendor_id"] == created_vendor_id - assert body["db_vendor_id"] == created_vendor_id diff --git a/backend/tests/internal/test_base32_crockford.py b/backend/tests/internal/test_base32_crockford.py new file mode 100644 index 0000000..499d4cc --- /dev/null +++ b/backend/tests/internal/test_base32_crockford.py @@ -0,0 +1,341 @@ +import pytest + +from app.internal import base32_crockford + + +@pytest.mark.unit +@pytest.mark.parametrize( + ("number", "expected"), + [ + pytest.param(0, "0", id="zero"), + pytest.param(1, "1", id="one"), + pytest.param(31, "Z", id="max_single_char"), + pytest.param(32, "10", id="base_rollover"), + pytest.param(1234, "16J", id="large_integer"), + ], +) +def test_encode_basic(number, expected): + """ + Verifies that `app.internal.base32_crockford.encode` returns the expected Crockford representation for basic integer inputs. + This matters because activation and license code generation depend on deterministic base-32 encoding. + + Covers: + - `app.internal.base32_crockford.encode` + + Rationale: + A small parametrized set is enough here because the contract under test is the explicit mapping for representative integers. + + Fixtures: + None. + + Parametrize: + number: Integer input to encode. + expected: Expected Crockford-encoded string. + Cases: + - — the zero value encodes to the single zero symbol. + - — a single-digit positive integer remains a single symbol. + - — the maximum one-symbol value maps to `Z`. + - — the first rollover to two symbols is encoded correctly. + - — a larger multi-symbol number encodes deterministically. + """ + encoded = base32_crockford.encode(number) + assert encoded == expected, ( + f"Expected encode({number}) to return '{expected}', got '{encoded}'" + ) + + +@pytest.mark.unit +@pytest.mark.parametrize( + ("number", "checksum", "expected"), + [ + pytest.param(0, True, "00", id="zero_with_checksum"), + pytest.param(1234, True, "16JD", id="large_integer_with_checksum"), + ], +) +def test_encode_with_checksum(number, checksum, expected): + """ + Verifies that `app.internal.base32_crockford.encode` appends the expected checksum symbol when checksum mode is enabled. + This matters because checksum-bearing activation and license codes rely on deterministic check-symbol generation. + + Covers: + - `app.internal.base32_crockford.encode` + + Rationale: + The parametrized cases document the public checksum contract directly from representative values. + + Fixtures: + None. + + Parametrize: + number: Integer input to encode. + checksum: Whether checksum mode is enabled. + expected: Expected encoded output including the checksum symbol. + Cases: + - — zero includes the expected checksum suffix. + - — a multi-symbol integer includes the expected checksum suffix. + """ + encoded = base32_crockford.encode(number, checksum=checksum) + assert encoded == expected, ( + f"Expected encode({number}, checksum={checksum}) to return '{expected}', got '{encoded}'" + ) + + +@pytest.mark.unit +@pytest.mark.parametrize( + ("number", "split_length", "expected"), + [ + pytest.param(1234567, 3, "15N-M7", id="split_length_3"), + pytest.param(1234567, 1, "1-5-N-M-7", id="split_length_1"), + ], +) +def test_encode_with_split_length(number, split_length, expected): + """ + Verifies that `app.internal.base32_crockford.encode` inserts hyphen separators at the requested split length. + This matters because human-facing codes are grouped for readability and must preserve deterministic formatting. + + Covers: + - `app.internal.base32_crockford.encode` + + Rationale: + The grouped outputs are asserted directly because separator placement is part of the public formatting contract. + + Fixtures: + None. + + Parametrize: + number: Integer input to encode. + split_length: Group size to insert between separators. + expected: Expected grouped encoded output. + Cases: + - — groups a multi-symbol output into chunks of three. + - — inserts separators between every symbol. + """ + encoded = base32_crockford.encode(number, split_length=split_length) + assert encoded == expected, ( + f"Expected encode({number}, split_length={split_length}) to return '{expected}', got '{encoded}'" + ) + + +@pytest.mark.unit +def test_encode_negative_raises(): + """ + Verifies that `app.internal.base32_crockford.encode` rejects negative integers. + This matters because the encoding contract is defined only for non-negative values. + + Covers: + - `app.internal.base32_crockford.encode` + + Rationale: + A single negative case is sufficient because the validation rule is not data-dependent beyond sign. + + Fixtures: + None. + """ + with pytest.raises(ValueError, match="is not a positive integer"): + base32_crockford.encode(-1) + + +@pytest.mark.unit +def test_encode_negative_split_raises(): + """ + Verifies that `app.internal.base32_crockford.encode` rejects a negative split length. + This matters because grouping configuration should fail fast when it is not a positive integer. + + Covers: + - `app.internal.base32_crockford.encode` + + Rationale: + The split-length validation is a direct input guard and does not require multiple cases here. + + Fixtures: + None. + """ + with pytest.raises(ValueError, match="is not a positive integer"): + base32_crockford.encode(1, split_length=-1) + + +@pytest.mark.unit +@pytest.mark.parametrize( + ("symbol_string", "expected"), + [ + pytest.param("0", 0, id="zero"), + pytest.param("16J", 1234, id="large_integer"), + pytest.param("1-6-J", 1234, id="with_hyphens"), + ], +) +def test_decode_basic(symbol_string, expected): + """ + Verifies that `app.internal.base32_crockford.decode` parses basic Crockford strings and grouped input correctly. + This matters because human-entered codes may include separators while still needing to decode to the same integer. + + Covers: + - `app.internal.base32_crockford.decode` + + Rationale: + The parametrized cases cover plain, grouped, and multi-symbol inputs that represent the documented decode boundary. + + Fixtures: + None. + + Parametrize: + symbol_string: Input string to decode. + expected: Expected decoded integer. + Cases: + - — decodes the zero symbol. + - — decodes a multi-symbol string. + - — ignores grouping separators during decode. + """ + decoded = base32_crockford.decode(symbol_string) + assert decoded == expected, ( + f"Expected decode('{symbol_string}') to return {expected}, got {decoded}" + ) + + +@pytest.mark.unit +@pytest.mark.parametrize( + ("symbol_string", "expected"), + [ + pytest.param("00", 0, id="zero_with_checksum"), + pytest.param("16JD", 1234, id="large_integer_with_checksum"), + ], +) +def test_decode_with_checksum(symbol_string, expected): + """ + Verifies that `app.internal.base32_crockford.decode` accepts valid checksum-bearing strings when checksum mode is enabled. + This matters because checksum-validated codes must round-trip from their public representation. + + Covers: + - `app.internal.base32_crockford.decode` + + Rationale: + The test asserts representative checksum-valid inputs directly because the external contract is the accepted string form. + + Fixtures: + None. + + Parametrize: + symbol_string: Input string including a checksum symbol. + expected: Expected decoded integer. + Cases: + - — decodes the minimal checksum-bearing string. + - — decodes a multi-symbol checksum-bearing string. + """ + decoded = base32_crockford.decode(symbol_string, checksum=True) + assert decoded == expected, ( + f"Expected decode('{symbol_string}', checksum=True) to return {expected}, got {decoded}" + ) + + +@pytest.mark.unit +def test_decode_invalid_checksum_raises(): + """ + Verifies that `app.internal.base32_crockford.decode` rejects an input whose checksum symbol does not match the payload. + This matters because checksum validation is the integrity check for human-entered codes. + + Covers: + - `app.internal.base32_crockford.decode` + + Rationale: + One invalid checksum case is sufficient because the contract is simply that mismatched checksums fail. + + Fixtures: + None. + """ + with pytest.raises(ValueError, match="invalid check symbol"): + base32_crockford.decode("16J6", checksum=True) + + +@pytest.mark.unit +@pytest.mark.parametrize( + ("symbol_string", "expected"), + [ + pytest.param("1234-5678", "12345678", id="remove_hyphens"), + pytest.param("IiLlOo", "111100", id="substitute_i_l_o"), + pytest.param("abc", "ABC", id="to_uppercase"), + ], +) +def test_normalize_basic(symbol_string, expected): + """ + Verifies that `app.internal.base32_crockford.normalize` removes separators, uppercases symbols, and substitutes ambiguous characters. + This matters because human-entered codes must normalize into the canonical alphabet before decode. + + Covers: + - `app.internal.base32_crockford.normalize` + + Rationale: + The cases document the supported normalization behaviors directly from representative inputs. + + Fixtures: + None. + + Parametrize: + symbol_string: Raw input string to normalize. + expected: Expected canonical normalized output. + Cases: + - — strips grouping separators. + - — substitutes ambiguous Crockford characters. + - — uppercases alphabetic input. + """ + normalized = base32_crockford.normalize(symbol_string) + assert normalized == expected, ( + f"Expected normalize('{symbol_string}') to return '{expected}', got '{normalized}'" + ) + + +@pytest.mark.unit +def test_normalize_strict_raises(): + """ + Verifies that `app.internal.base32_crockford.normalize` rejects inputs that would require normalization when strict mode is enabled. + This matters because strict callers may require already-canonical code strings. + + Covers: + - `app.internal.base32_crockford.normalize` + + Rationale: + The chosen input exercises the normalization path that strict mode is meant to forbid. + + Fixtures: + None. + """ + with pytest.raises(ValueError, match="requires normalization"): + base32_crockford.normalize("IiLlOo", strict=True) + + +@pytest.mark.unit +def test_normalize_invalid_chars_raises(): + """ + Verifies that `app.internal.base32_crockford.normalize` rejects strings containing invalid characters. + This matters because invalid symbols should fail before decode and checksum logic consume them. + + Covers: + - `app.internal.base32_crockford.normalize` + + Rationale: + A single invalid-character example is sufficient because the rule being exercised is the input alphabet guard. + + Fixtures: + None. + """ + with pytest.raises(ValueError, match="contains invalid characters"): + base32_crockford.normalize( + "U" + ) # U is check-only or invalid depending on context, in normalize it fails regex + + +@pytest.mark.unit +def test_normalize_non_string_raises(): + """ + Verifies that `app.internal.base32_crockford.normalize` rejects non-string input types. + This matters because the normalizer contract is defined only for string inputs. + + Covers: + - `app.internal.base32_crockford.normalize` + + Rationale: + The non-string guard is a direct type contract with no additional setup needed. + + Fixtures: + None. + """ + with pytest.raises(TypeError, match="string is of invalid type"): + base32_crockford.normalize(123) diff --git a/backend/tests/schemas/test_auth.py b/backend/tests/schemas/test_auth.py new file mode 100644 index 0000000..8c42661 --- /dev/null +++ b/backend/tests/schemas/test_auth.py @@ -0,0 +1,205 @@ +from __future__ import annotations + +import pytest +from pydantic import ValidationError + +from app.schemas.auth import ( + LoginRequest, + RefreshRequest, + SignupRequest, + SignupResponse, + TokenPair, + VendorOut, +) + + +def build_auth_request_payload(faker) -> dict[str, str]: + """ + Builds a valid auth request payload for schema validation tests. + + Used by: + test_auth_request_models_accept_valid_payloads - supplies valid input for both auth request models. + test_signup_request_rejects_invalid_payloads - provides the baseline payload before one field is invalidated. + + Args: + faker: `Faker` session fixture used to generate realistic auth field values. + + Returns: + dict[str, str]: A JSON-style payload containing a valid email, password, and client id. + """ + return { + "email": faker.email(), + "password": faker.password(length=16, special_chars=True), + "client_id": faker.uuid4(), + } + + +@pytest.mark.unit +@pytest.mark.parametrize( + "model_class", + [ + pytest.param(SignupRequest, id="signup_request"), + pytest.param(LoginRequest, id="login_request"), + ], +) +def test_auth_request_models_accept_valid_payloads(faker, model_class) -> None: + """ + Verifies that `app.schemas.auth.SignupRequest` and `app.schemas.auth.LoginRequest` accept the same valid auth payload shape. + This matters because both request models define the public input contract for the auth API. + + Covers: + - `app.schemas.auth.SignupRequest` + - `app.schemas.auth.LoginRequest` + + Rationale: + One parametrized test keeps the overlapping request-model contract documented in one place. + + Fixtures: + faker: Session-scoped `Faker` instance used to generate valid auth field values. + + Parametrize: + model_class: The auth request schema being instantiated. + Cases: + - — validates the signup request schema. + - — validates the login request schema. + """ + payload = build_auth_request_payload(faker) + model = model_class(**payload) + + assert model.email == payload["email"], ( + f"Expected model email '{payload['email']}', got '{model.email}'" + ) + assert model.password == payload["password"], ( + f"Expected model password to round-trip, got '{model.password}'" + ) + assert model.client_id == payload["client_id"], ( + f"Expected model client_id '{payload['client_id']}', got '{model.client_id}'" + ) + + +@pytest.mark.unit +@pytest.mark.parametrize( + ("scenario", "expected_fields"), + [ + pytest.param("invalid_email", {"email"}, id="invalid_email"), + pytest.param("short_password", {"password"}, id="short_password"), + pytest.param( + "missing_client_id", {"client_id"}, id="missing_client_id" + ), + ], +) +def test_signup_request_rejects_invalid_payloads( + faker, scenario: str, expected_fields: set[str] +) -> None: + """ + Verifies that `app.schemas.auth.SignupRequest` rejects invalid email, short-password, and missing-client-id payloads. + This matters because the signup schema is the first validation boundary for auth input. + + Covers: + - `app.schemas.auth.SignupRequest` + + Rationale: + The test mutates one valid payload into several invalid shapes so all failure cases stay anchored to the same baseline request. + + Fixtures: + faker: Session-scoped `Faker` instance used to generate the baseline valid payload and invalid field values. + + Parametrize: + scenario: Identifies which invalid signup payload shape is being exercised. + expected_fields: The schema field names expected in the validation error output. + Cases: + - — supplies a non-email string. + - — supplies a password shorter than the schema minimum. + - — omits the required client id field. + """ + payload = build_auth_request_payload(faker) + if scenario == "invalid_email": + payload["email"] = faker.word() + elif scenario == "short_password": + payload["password"] = faker.password(length=7, special_chars=False) + else: + payload.pop("client_id") + + with pytest.raises(ValidationError, match="validation error") as exc_info: + SignupRequest(**payload) + + error_fields = {error["loc"][0] for error in exc_info.value.errors()} + assert expected_fields <= error_fields, ( + f"Expected validation errors for {expected_fields}, got {error_fields}" + ) + + +@pytest.mark.unit +def test_refresh_request_requires_long_enough_token(faker) -> None: + """ + Verifies that `app.schemas.auth.RefreshRequest` rejects a refresh token that is shorter than the schema requires. + This matters because malformed token payloads should fail schema validation before auth service logic runs. + + Covers: + - `app.schemas.auth.RefreshRequest` + + Rationale: + The test uses the shortest failing token value to isolate the schema-length constraint. + + Fixtures: + faker: Session-scoped `Faker` instance used to generate the token and client id inputs. + """ + with pytest.raises(ValidationError, match="validation error") as exc_info: + RefreshRequest( + refresh_token=faker.pystr(min_chars=7, max_chars=7), + client_id=faker.uuid4(), + ) + + error_fields = {error["loc"][0] for error in exc_info.value.errors()} + assert {"refresh_token"} <= error_fields, ( + f"Expected refresh_token validation error, got {error_fields}" + ) + + +@pytest.mark.unit +def test_token_pair_defaults_token_type_to_bearer(faker) -> None: + """ + Verifies that `app.schemas.auth.TokenPair` defaults `token_type` to `bearer`. + This matters because callers rely on the response model to emit the expected auth scheme without setting it manually. + + Covers: + - `app.schemas.auth.TokenPair` + + Rationale: + The test omits only the token type so the defaulting behavior is the sole thing being exercised. + + Fixtures: + faker: Session-scoped `Faker` instance used to generate token strings. + """ + token_pair = TokenPair( + access_token=faker.sha256(raw_output=False), + refresh_token=faker.sha256(raw_output=False), + ) + + assert token_pair.token_type == "bearer", ( + f"Expected default token_type 'bearer', got '{token_pair.token_type}'" + ) + + +@pytest.mark.unit +def test_signup_response_wraps_vendor_out(faker) -> None: + """ + Verifies that `app.schemas.auth.SignupResponse` wraps a `VendorOut` instance under the `vendor` field. + This matters because the signup API response contract exposes the created vendor through this envelope. + + Covers: + - `app.schemas.auth.VendorOut` + - `app.schemas.auth.SignupResponse` + + Rationale: + The response schema is exercised directly because the contract under test is pure model composition. + + Fixtures: + faker: Session-scoped `Faker` instance used to generate vendor field values. + """ + vendor = VendorOut(id=faker.uuid4(), email=faker.email()) + response = SignupResponse(vendor=vendor) + + assert response.vendor == vendor, ( + f"Expected SignupResponse to wrap {vendor}, got {response.vendor}" + ) diff --git a/backend/tests/schemas/test_response.py b/backend/tests/schemas/test_response.py index e8da933..b06adc1 100644 --- a/backend/tests/schemas/test_response.py +++ b/backend/tests/schemas/test_response.py @@ -1,10 +1,3 @@ -""" -Unit tests for Pydantic response schemas in app.schemas.response. - -Covers: ErrorBodyResponse field storage, required-field enforcement, -details defaulting, ErrorResponse envelope shape, and ErrorDetail validation. -""" - import pytest from fastapi import status from pydantic import ValidationError @@ -23,20 +16,34 @@ @pytest.mark.unit -def test_error_body_response_stores_all_fields(): - """ErrorBodyResponse must accept and round-trip all fields.""" +def test_error_body_response_stores_all_fields(faker): + """ + Verifies that `app.schemas.response.ErrorBodyResponse` stores and round-trips all explicitly provided fields. + This matters because the API error contract depends on the response schema preserving code, message, status, details, and request id exactly. + + Covers: + - `app.schemas.response.ErrorBodyResponse` + + Rationale: + The model is exercised directly because the contract under test is pure schema storage and validation behavior. + + Fixtures: + faker: Session-scoped `Faker` instance used to generate message and request-id values. + """ + msg = faker.sentence() + request_id = faker.uuid4() error_body = ErrorBodyResponse( code=ErrorCode.VALIDATION_FAILED, - message="Test message", + message=msg, http_status=status.HTTP_400_BAD_REQUEST, details=[], - request_id="req-123", + request_id=request_id, ) assert error_body.code == ErrorCode.VALIDATION_FAILED, ( f"Expected code VALIDATION_FAILED, got {error_body.code}" ) - assert error_body.message == "Test message", ( - f"Expected message 'Test message', got '{error_body.message}'" + assert error_body.message == msg, ( + f"Expected message '{msg}', got '{error_body.message}'" ) assert error_body.http_status == status.HTTP_400_BAD_REQUEST, ( f"Expected http_status 400, got {error_body.http_status}" @@ -44,38 +51,53 @@ def test_error_body_response_stores_all_fields(): assert error_body.details == [], ( f"Expected empty details, got {error_body.details}" ) - assert error_body.request_id == "req-123", ( - f"Expected request_id 'req-123', got '{error_body.request_id}'" + assert error_body.request_id == request_id, ( + f"Expected request_id '{request_id}', got '{error_body.request_id}'" ) @pytest.mark.unit @pytest.mark.parametrize( - "kwargs", + "scenario", [ - pytest.param( - dict( - code=ErrorCode.VALIDATION_FAILED, - message="Test message", - request_id="req-123", - ), - id="missing_http_status", - ), - pytest.param( - dict( - code=ErrorCode.VALIDATION_FAILED, - message="Test message", - http_status=status.HTTP_400_BAD_REQUEST, - ), - id="missing_request_id", - ), + pytest.param("missing_http_status", id="missing_http_status"), + pytest.param("missing_request_id", id="missing_request_id"), ], ) -def test_error_body_response_required_field_raises_validation_error(kwargs): - """Omitting a required field must raise ValidationError.""" - with pytest.raises(ValidationError) as exc_info: +def test_error_body_response_required_field_raises_validation_error( + faker, scenario: str +): + """ + Verifies that `app.schemas.response.ErrorBodyResponse` rejects payloads missing required fields. + This matters because callers must not be able to construct partial error bodies that break the API response contract. + + Covers: + - `app.schemas.response.ErrorBodyResponse` + + Rationale: + The test starts from a complete payload and removes one required field per case so the validation failure stays isolated. + + Fixtures: + faker: Session-scoped `Faker` instance used to generate message and request-id values. + + Parametrize: + scenario: Identifies which required field is omitted from the error body payload. + Cases: + - — omits the HTTP status field. + - — omits the request id field. + """ + kwargs = { + "code": ErrorCode.VALIDATION_FAILED, + "message": faker.sentence(), + "request_id": faker.uuid4(), + "http_status": status.HTTP_400_BAD_REQUEST, + } + kwargs.pop( + "http_status" if scenario == "missing_http_status" else "request_id" + ) + + with pytest.raises(ValidationError, match="Field required") as exc_info: ErrorBodyResponse(**kwargs) - # Optionally verify the missing field is in the error error_fields = {e["loc"][0] for e in exc_info.value.errors()} expected_missing = {"http_status", "request_id"} - kwargs.keys() assert expected_missing & error_fields, ( @@ -84,13 +106,25 @@ def test_error_body_response_required_field_raises_validation_error(kwargs): @pytest.mark.unit -def test_error_body_response_details_default_to_empty_list(): - """Omitting details when constructing ErrorBodyResponse must yield [].""" +def test_error_body_response_details_default_to_empty_list(faker): + """ + Verifies that `app.schemas.response.ErrorBodyResponse` defaults `details` to an empty list when the field is omitted. + This matters because API error responses should not require callers to pass an explicit empty list for no-detail cases. + + Covers: + - `app.schemas.response.ErrorBodyResponse` + + Rationale: + The test omits only the details field so the defaulting behavior is the sole contract being exercised. + + Fixtures: + faker: Session-scoped `Faker` instance used to generate message and request-id values. + """ body = ErrorBodyResponse( code=ErrorCode.VALIDATION_FAILED, - message="Test", + message=faker.sentence(), http_status=status.HTTP_400_BAD_REQUEST, - request_id="req-1", + request_id=faker.uuid4(), ) assert body.details == [], ( f"Expected empty details list by default, got {body.details}" @@ -103,14 +137,27 @@ def test_error_body_response_details_default_to_empty_list(): @pytest.mark.unit -def test_error_response_envelope_structure(): - """ErrorResponse must wrap an ErrorBodyResponse in the error field.""" +def test_error_response_envelope_structure(faker): + """ + Verifies that `app.schemas.response.ErrorResponse` wraps an `ErrorBodyResponse` under the `error` field. + This matters because the API emits errors through the envelope shape defined by this response model. + + Covers: + - `app.schemas.response.ErrorBodyResponse` + - `app.schemas.response.ErrorResponse` + + Rationale: + The schema composition is exercised directly because the contract under test is model nesting rather than route behavior. + + Fixtures: + faker: Session-scoped `Faker` instance used to generate message and request-id values. + """ error_response = ErrorResponse( error=ErrorBodyResponse( code=ErrorCode.VALIDATION_FAILED, - message="Test", + message=faker.sentence(), http_status=status.HTTP_400_BAD_REQUEST, - request_id="req-123", + request_id=faker.uuid4(), ) ) assert isinstance(error_response.error, ErrorBodyResponse), ( @@ -129,33 +176,72 @@ def test_error_response_envelope_structure(): @pytest.mark.unit -def test_error_detail_requires_message_field(): - """ErrorDetail without message must raise ValidationError.""" - with pytest.raises(ValidationError): - ErrorDetail(field="test_field") # missing required 'message' +def test_error_detail_requires_message_field(faker): + """ + Verifies that `app.schemas.response.ErrorDetail` rejects construction when the required message field is missing. + This matters because every API error detail must provide a human-readable message. + + Covers: + - `app.schemas.response.ErrorDetail` + + Rationale: + The failure is a direct schema validation rule, so a single missing-message case is sufficient. + + Fixtures: + faker: Session-scoped `Faker` instance used to generate the field name. + """ + with pytest.raises(ValidationError, match="Field required"): + ErrorDetail(field=faker.word()) @pytest.mark.unit -def test_error_detail_accepts_field_as_none(): - """ErrorDetail.field is optional — None must be stored as-is.""" - detail = ErrorDetail(field=None, message="Test error") +def test_error_detail_accepts_field_as_none(faker): + """ + Verifies that `app.schemas.response.ErrorDetail` accepts `field=None` and preserves it. + This matters because some API errors apply to the request body as a whole rather than a specific field. + + Covers: + - `app.schemas.response.ErrorDetail` + + Rationale: + The test provides only the optional-field scenario so the schema behavior stays focused on that contract. + + Fixtures: + faker: Session-scoped `Faker` instance used to generate the detail message. + """ + msg = faker.sentence() + detail = ErrorDetail(field=None, message=msg) assert detail.field is None, ( f"Expected field to be None, got {detail.field}" ) - assert detail.message == "Test error", ( - f"Expected message 'Test error', got '{detail.message}'" + assert detail.message == msg, ( + f"Expected message '{msg}', got '{detail.message}'" ) @pytest.mark.unit -def test_error_detail_with_both_fields_populated(): - """ErrorDetail must store both field and message when both are provided.""" - detail = ErrorDetail(field="email", message="Invalid email format") - assert detail.field == "email", ( - f"Expected field 'email', got '{detail.field}'" +def test_error_detail_with_both_fields_populated(faker): + """ + Verifies that `app.schemas.response.ErrorDetail` stores both `field` and `message` when both are provided. + This matters because field-specific API validation errors need to preserve both pieces of information. + + Covers: + - `app.schemas.response.ErrorDetail` + + Rationale: + This is a direct schema round-trip assertion with no external dependencies or patches. + + Fixtures: + faker: Session-scoped `Faker` instance used to generate the field name and message. + """ + field = faker.word() + msg = faker.sentence() + detail = ErrorDetail(field=field, message=msg) + assert detail.field == field, ( + f"Expected field '{field}', got '{detail.field}'" ) - assert detail.message == "Invalid email format", ( - f"Expected message 'Invalid email format', got '{detail.message}'" + assert detail.message == msg, ( + f"Expected message '{msg}', got '{detail.message}'" ) @@ -165,15 +251,27 @@ def test_error_detail_with_both_fields_populated(): @pytest.mark.unit -def test_error_body_response_rejects_invalid_error_code(): - """ErrorBodyResponse.code is typed as ErrorCode; an arbitrary string that is - not a valid enum member must be rejected with a ValidationError.""" - with pytest.raises(ValidationError): +def test_error_body_response_rejects_invalid_error_code(faker): + """ + Verifies that `app.schemas.response.ErrorBodyResponse` rejects arbitrary strings for the typed `ErrorCode` field. + This matters because API error responses must use one of the defined enum codes rather than an uncontrolled string. + + Covers: + - `app.schemas.response.ErrorBodyResponse` + - `app.schemas.response.ErrorCode` + + Rationale: + A single invalid enum string is sufficient because the contract is that non-members fail schema validation. + + Fixtures: + faker: Session-scoped `Faker` instance used to generate message and request-id values. + """ + with pytest.raises(ValidationError, match="Input should be"): ErrorBodyResponse( code="NOT_A_REAL_CODE", - message="Test", + message=faker.sentence(), http_status=status.HTTP_400_BAD_REQUEST, - request_id="req-1", + request_id=faker.uuid4(), ) @@ -184,9 +282,18 @@ def test_error_body_response_rejects_invalid_error_code(): @pytest.mark.unit def test_error_response_requires_error_field(): - """ErrorResponse.error is required. Omitting it must - raise a ValidationError rather than silently - constructing an empty envelope. """ - with pytest.raises(ValidationError): + Verifies that `app.schemas.response.ErrorResponse` rejects construction when the required `error` field is omitted. + This matters because the API error envelope must always contain an error payload rather than an empty shell. + + Covers: + - `app.schemas.response.ErrorResponse` + + Rationale: + The missing-field case directly expresses the required-envelope contract with no additional setup. + + Fixtures: + None. + """ + with pytest.raises(ValidationError, match="Field required"): ErrorResponse() diff --git a/backend/tests/integration/__init__.py b/backend/tests/services/__init__.py similarity index 100% rename from backend/tests/integration/__init__.py rename to backend/tests/services/__init__.py diff --git a/backend/tests/services/test_auth.py b/backend/tests/services/test_auth.py new file mode 100644 index 0000000..4c97138 --- /dev/null +++ b/backend/tests/services/test_auth.py @@ -0,0 +1,419 @@ +from __future__ import annotations + +from datetime import datetime, timedelta, timezone + +import jwt +import pytest +from psycopg import Connection +from pwdlib.hashers import argon2 + +from app.core.config import Settings +from app.core.exceptions import AuthenticationException, ConflictException +from app.core.security import ( + JWT_ALGORITHM, + create_access_token, + create_refresh_token, + decode_token, + verify_password, +) +from app.services.auth import login, refresh, signup + + +def build_signup_args(faker) -> tuple[str, str, str]: + """ + Builds signup credentials and client identity for auth service tests. + + Used by: + test_signup_success - supplies the happy-path signup inputs. + test_signup_duplicate_email_raises_conflict - provisions the original and duplicate signup inputs. + test_login_success_returns_access_and_refresh_tokens - provisions a vendor before login. + test_login_rejects_invalid_credentials - supplies the baseline valid credentials before one field is varied. + test_login_persists_upgraded_hash - creates the legacy-hash vendor credentials. + test_refresh_success_returns_new_token_pair - provisions the vendor and client id used for refresh. + + Args: + faker: `Faker` session fixture used to generate auth field values. + + Returns: + tuple[str, str, str]: A unique email, plaintext password, and client id. + """ + return ( + faker.email(), + faker.password(length=16, special_chars=True), + faker.uuid4(), + ) + + +def build_refresh_payload(faker, **overrides: str) -> dict[str, str | datetime]: + """ + Builds a refresh-token payload for auth service boundary tests. + + Used by: + test_refresh_rejects_invalid_tokens - creates malformed and unknown-vendor refresh payloads. + test_refresh_rejects_missing_vendor_id_claim - creates a signed refresh token with a missing vendor id claim. + + Args: + faker: `Faker` session fixture used to generate a vendor id. + overrides: Payload field replacements applied on top of the default refresh-token claims. + + Returns: + dict[str, str | datetime]: A refresh-token payload containing token type, vendor id, and expiry claims. + """ + payload: dict[str, str | datetime] = { + "token_type": "refresh", + "vendor_id": faker.uuid4(), + "exp": datetime.now(timezone.utc) + timedelta(minutes=5), + } + payload.update(overrides) + return payload + + +@pytest.mark.integration +def test_signup_success( + db_conn: Connection, app_settings: Settings, faker +) -> None: + """ + Verifies that `app.services.auth.signup` creates a vendor and returns the created vendor in the service response. + This matters because the auth service is the business-layer contract behind the public signup route. + + Covers: + - `app.services.auth.signup` + + Rationale: + The test uses a real transactional database cursor because signup correctness depends on persisted vendor state. + + Fixtures: + db_conn: Transactional database connection rolled back after the test. + app_settings: Shared `Settings` object used for auth service configuration. + faker: Session-scoped `Faker` instance used to generate signup inputs. + """ + email, password, client_id = build_signup_args(faker) + with db_conn.cursor() as db_cursor: + result = signup(db_cursor, email, password, client_id, app_settings) + + assert result.vendor.email == email, ( + f"Expected created vendor email '{email}', got '{result.vendor.email}'" + ) + assert result.vendor.id, "Expected signup to return a persisted vendor id" + + +@pytest.mark.integration +def test_signup_duplicate_email_raises_conflict( + db_conn: Connection, app_settings: Settings, faker +) -> None: + """ + Verifies that `app.services.auth.signup` raises a conflict when a vendor with the same email already exists. + This matters because duplicate vendor creation must be rejected before callers issue tokens or create parallel accounts. + + Covers: + - `app.services.auth.signup` + + Rationale: + The service is exercised twice against the same transactional cursor so the duplicate path is proven with real database state instead of patched collaborators. + + Fixtures: + db_conn: Transactional database connection rolled back after the test. + app_settings: Shared `Settings` object used for auth service configuration. + faker: Session-scoped `Faker` instance used to generate signup inputs. + """ + email, password, client_id = build_signup_args(faker) + with db_conn.cursor() as db_cursor: + signup(db_cursor, email, password, client_id, app_settings) + + with pytest.raises( + ConflictException, match="A vendor with this email already exists" + ): + signup(db_cursor, email, password, client_id, app_settings) + + +@pytest.mark.integration +def test_login_success_returns_access_and_refresh_tokens( + db_conn: Connection, app_settings: Settings, faker +) -> None: + """ + Verifies that `app.services.auth.login` returns a bearer token pair for valid credentials. + This matters because the auth service is responsible for issuing the tokens consumed by the API layer. + + Covers: + - `app.services.auth.login` + - `app.core.security.decode_token` + + Rationale: + The test signs up a real vendor first and decodes the issued access token to confirm the service minted the expected token type. + + Fixtures: + db_conn: Transactional database connection rolled back after the test. + app_settings: Shared `Settings` object used to sign and decode tokens. + faker: Session-scoped `Faker` instance used to generate auth inputs. + """ + email, password, client_id = build_signup_args(faker) + with db_conn.cursor() as db_cursor: + signup(db_cursor, email, password, client_id, app_settings) + result = login(db_cursor, email, password, client_id, app_settings) + + payload = decode_token(result.access_token, app_settings) + assert result.access_token, ( + "Expected login to return a non-empty access token" + ) + assert result.refresh_token, ( + "Expected login to return a non-empty refresh token" + ) + assert result.token_type == "bearer", ( + f"Expected token_type 'bearer', got '{result.token_type}'" + ) + assert payload["token_type"] == "access", ( + f"Expected access token payload type 'access', got '{payload['token_type']}'" + ) + + +@pytest.mark.integration +@pytest.mark.parametrize( + ("use_unknown_email", "use_wrong_password"), + [ + pytest.param(True, False, id="unknown_email"), + pytest.param(False, True, id="wrong_password"), + ], +) +def test_login_rejects_invalid_credentials( + db_conn: Connection, + app_settings: Settings, + faker, + *, + use_unknown_email: bool, + use_wrong_password: bool, +) -> None: + """ + Verifies that `app.services.auth.login` rejects unknown-email and wrong-password attempts with the same authentication error. + This matters because the service must enforce credential validation consistently regardless of which input field is wrong. + + Covers: + - `app.services.auth.login` + + Rationale: + A single parametrized test varies one credential dimension at a time while keeping the persisted vendor state constant. + + Fixtures: + db_conn: Transactional database connection rolled back after the test. + app_settings: Shared `Settings` object used by the auth service. + faker: Session-scoped `Faker` instance used to generate valid and alternate credentials. + + Parametrize: + use_unknown_email: Whether the service call swaps in an email that does not exist. + use_wrong_password: Whether the service call swaps in an incorrect password. + Cases: + - — uses an unrecognized email with the correct password. + - — uses the persisted email with an incorrect password. + """ + email, password, client_id = build_signup_args(faker) + alternate_email = faker.email() + alternate_password = faker.password(length=16, special_chars=True) + with db_conn.cursor() as db_cursor: + signup(db_cursor, email, password, client_id, app_settings) + + with pytest.raises( + AuthenticationException, match="Invalid credentials" + ): + login( + db_cursor, + alternate_email if use_unknown_email else email, + alternate_password if use_wrong_password else password, + client_id, + app_settings, + ) + + +@pytest.mark.integration +def test_login_persists_upgraded_hash( + db_conn: Connection, app_settings: Settings, faker +) -> None: + """ + Verifies that `app.services.auth.login` upgrades a legacy password hash in storage after a successful login. + This matters because the service is responsible for migrating old password hashes to the preferred format during authentication. + + Covers: + - `app.services.auth.login` + - `app.core.security.verify_password` + + Rationale: + This test uses a real vendor row with an Argon2 legacy hash so the persistence side effect is verified against the database rather than patched lookup helpers. This shape came from REM-004. + + Fixtures: + db_conn: Transactional database connection rolled back after the test. + app_settings: Shared `Settings` object used by the auth service. + faker: Session-scoped `Faker` instance used to generate auth inputs. + """ + email, password, client_id = build_signup_args(faker) + legacy_hash = argon2.Argon2Hasher().hash(password) + with db_conn.cursor() as db_cursor: + db_cursor.execute( + """ + INSERT INTO app."vendors" ("email", "password_hash") + VALUES (%s, %s) + RETURNING "id" + """, + (email, legacy_hash), + ) + vendor_id = str(db_cursor.fetchone()[0]) + + result = login(db_cursor, email, password, client_id, app_settings) + db_cursor.execute( + 'SELECT "password_hash" FROM app."vendors" WHERE "id" = %s', + (vendor_id,), + ) + upgraded_hash = db_cursor.fetchone()[0] + + valid, updated_hash = verify_password(password, upgraded_hash) + assert result.access_token, ( + "Expected login to succeed after upgrading a legacy password hash" + ) + assert upgraded_hash != legacy_hash, ( + "Expected login to replace the legacy hash with the preferred hash format" + ) + assert valid is True, ( + "Expected upgraded password hash to verify successfully" + ) + assert updated_hash is None, ( + "Expected preferred password hashes to require no further upgrade" + ) + + +@pytest.mark.integration +def test_refresh_success_returns_new_token_pair( + db_conn: Connection, app_settings: Settings, faker +) -> None: + """ + Verifies that `app.services.auth.refresh` exchanges a valid refresh token for a replacement token pair. + This matters because the auth service owns the token-refresh contract used by the API route. + + Covers: + - `app.services.auth.login` + - `app.services.auth.refresh` + + Rationale: + The test performs the full signup-login-refresh sequence through the real service functions because refresh correctness depends on issued token state. + + Fixtures: + db_conn: Transactional database connection rolled back after the test. + app_settings: Shared `Settings` object used to sign and decode tokens. + faker: Session-scoped `Faker` instance used to generate auth inputs. + """ + email, password, client_id = build_signup_args(faker) + with db_conn.cursor() as db_cursor: + signup(db_cursor, email, password, client_id, app_settings) + tokens = login(db_cursor, email, password, client_id, app_settings) + result = refresh( + tokens.refresh_token, client_id, db_cursor, app_settings + ) + + assert result.access_token, ( + "Expected refresh to return a non-empty replacement access token" + ) + assert result.refresh_token, ( + "Expected refresh to return a non-empty replacement refresh token" + ) + + +@pytest.mark.integration +@pytest.mark.parametrize( + ("token_factory", "expected_message"), + [ + pytest.param( + lambda faker, settings: create_access_token( + faker.uuid4(), settings + ), + "Invalid token type", + id="access_token", + ), + pytest.param( + lambda faker, settings: create_refresh_token( + faker.uuid4(), settings, expires_delta=timedelta(seconds=-1) + ), + "Invalid or expired refresh token", + id="expired_refresh_token", + ), + pytest.param( + lambda faker, settings: jwt.encode( + build_refresh_payload(faker, vendor_id=faker.word()), + settings.SECRET_KEY, + algorithm=JWT_ALGORITHM, + ), + "Invalid token payload", + id="malformed_vendor_id", + ), + pytest.param( + lambda faker, settings: jwt.encode( + build_refresh_payload(faker), + settings.SECRET_KEY, + algorithm=JWT_ALGORITHM, + ), + "Vendor not found", + id="unknown_vendor", + ), + ], +) +def test_refresh_rejects_invalid_tokens( + db_conn: Connection, + app_settings: Settings, + faker, + token_factory, + expected_message: str, +) -> None: + """ + Verifies that `app.services.auth.refresh` rejects invalid refresh tokens, malformed payloads, and unknown-vendor claims. + This matters because the service must not mint new tokens from unusable or untrusted refresh input. + + Covers: + - `app.services.auth.refresh` + + Rationale: + The test signs real token variants instead of patching decode helpers, so it documents the actual service boundary. This shape came from REM-003. + + Fixtures: + db_conn: Transactional database connection rolled back after the test. + app_settings: Shared `Settings` object used to sign token variants. + faker: Session-scoped `Faker` instance used to generate claims and ids. + + Parametrize: + token_factory: Produces the invalid refresh token variant for the scenario. + expected_message: The authentication error expected from the service. + Cases: + - — supplies an access token where a refresh token is required. + - — supplies a refresh token whose expiry is already in the past. + - — supplies a refresh token whose vendor id claim is not a UUID. + - — supplies a refresh token whose vendor id does not exist in the database. + """ + refresh_token = token_factory(faker, app_settings) + with db_conn.cursor() as db_cursor: + with pytest.raises(AuthenticationException, match=expected_message): + refresh(refresh_token, faker.uuid4(), db_cursor, app_settings) + + +@pytest.mark.integration +def test_refresh_rejects_missing_vendor_id_claim( + db_conn: Connection, app_settings: Settings, faker +) -> None: + """ + Verifies that `app.services.auth.refresh` rejects a refresh token whose vendor id claim is missing. + This matters because the refresh contract requires a resolvable vendor identity before new tokens can be issued. + + Covers: + - `app.services.auth.refresh` + + Rationale: + The test signs a real refresh payload with `vendor_id=None` so the payload-validation branch is exercised without internal patching. This shape came from REM-003. + + Fixtures: + db_conn: Transactional database connection rolled back after the test. + app_settings: Shared `Settings` object used to sign the malformed refresh token. + faker: Session-scoped `Faker` instance used to generate claim values and client id. + """ + refresh_token = jwt.encode( + build_refresh_payload(faker, vendor_id=None), + app_settings.SECRET_KEY, + algorithm=JWT_ALGORITHM, + ) + with db_conn.cursor() as db_cursor: + with pytest.raises( + AuthenticationException, match="Invalid token payload" + ): + refresh(refresh_token, faker.uuid4(), db_cursor, app_settings) diff --git a/backend/tests/services/test_license_gen.py b/backend/tests/services/test_license_gen.py new file mode 100644 index 0000000..b8d5863 --- /dev/null +++ b/backend/tests/services/test_license_gen.py @@ -0,0 +1,127 @@ +from __future__ import annotations + +import hashlib +from datetime import datetime + +import pytest +from cryptography.hazmat.primitives.asymmetric.ed25519 import Ed25519PrivateKey +from cryptography.hazmat.primitives.serialization import ( + Encoding, + NoEncryption, + PrivateFormat, + PublicFormat, +) +from uuid6 import uuid7 + +from app.core.exceptions import LicenseKeyGenerationError +from app.domain.license import LicenseFile, NodeLockedLicense +from app.services.license_gen import verify_license + + +@pytest.fixture +def sample_license(faker) -> NodeLockedLicense: + """ + Provides a valid `NodeLockedLicense` instance for license-generation and verification tests. + + Scope: function — the license payload includes timestamped values and each test should receive its own independent model instance. + + Provides: + A `NodeLockedLicense` model with realistic ids, timestamps, and a synthetic device fingerprint. + + Dependencies: + faker: Supplies the generated fingerprint input. + + Teardown: + None. + + Note: + The fixture provides only the license data; each test still generates its own signing key pair. + """ + now = datetime.now().timestamp() + return NodeLockedLicense( + id=uuid7(), + vendor_id=uuid7(), + client_id=uuid7(), + expires_at=now + 3600, + max_grace_secs=0, + created_at=now, + meta_data=None, + device_fingerprint=( + # TODO: this is fine for now but should be replaced with actual + # device fingerprint using the appropriate class + f"sha256:{hashlib.sha256(faker.binary(length=64)).hexdigest()}" + ), + session_limit=1, + ) + + +@pytest.mark.unit +def test_verify_license_accepts_matching_signature( + sample_license: NodeLockedLicense, +) -> None: + """ + Verifies that `app.services.license_gen.verify_license` accepts the signature produced for the same license payload and key pair. + This matters because license verification is the integrity boundary for signed license files. + + Covers: + - `app.domain.license.LicenseFile` + - `app.services.license_gen.verify_license` + + Rationale: + The test uses a real Ed25519 keypair and a real license signature so it documents the project-level signing and verification round trip. Historical third-party-coupled assertions were removed under REM-012. + + Fixtures: + sample_license: Valid node-locked license model used to construct and verify the signature. + """ + signer = Ed25519PrivateKey.generate() + private_key_pem = signer.private_bytes( + encoding=Encoding.PEM, + format=PrivateFormat.PKCS8, + encryption_algorithm=NoEncryption(), + ) + public_key_pem = signer.public_key().public_bytes( + encoding=Encoding.PEM, format=PublicFormat.SubjectPublicKeyInfo + ) + license_file = LicenseFile( + license_data=sample_license, private_key_pem=private_key_pem + ) + + verify_license(sample_license, license_file.signature, public_key_pem) + + +@pytest.mark.unit +def test_verify_license_rejects_tampered_signature( + sample_license: NodeLockedLicense, +) -> None: + """ + Verifies that `app.services.license_gen.verify_license` rejects a signature that has been tampered with after signing. + This matters because the verification boundary must fail closed when license signatures are altered. + + Covers: + - `app.domain.license.LicenseFile` + - `app.services.license_gen.verify_license` + + Rationale: + The test alters one character of a real signature so the failure is measured at the project verification boundary rather than on library internals. Historical third-party-coupled assertions were removed under REM-012. + + Fixtures: + sample_license: Valid node-locked license model used to construct the original signature. + """ + signer = Ed25519PrivateKey.generate() + private_key_pem = signer.private_bytes( + encoding=Encoding.PEM, + format=PrivateFormat.PKCS8, + encryption_algorithm=NoEncryption(), + ) + public_key_pem = signer.public_key().public_bytes( + encoding=Encoding.PEM, format=PublicFormat.SubjectPublicKeyInfo + ) + signature = LicenseFile( + license_data=sample_license, private_key_pem=private_key_pem + ).signature + tampered_signature = signature[:-1] + ("1" if signature[-1] != "1" else "2") + + with pytest.raises( + LicenseKeyGenerationError, match="License signature verification failed" + ): + verify_license(sample_license, tampered_signature, public_key_pem) diff --git a/backend/tests/test_main.py b/backend/tests/test_main.py new file mode 100644 index 0000000..48d2bb0 --- /dev/null +++ b/backend/tests/test_main.py @@ -0,0 +1,228 @@ +from __future__ import annotations + +from contextlib import contextmanager +from types import SimpleNamespace + +import pytest +from fastapi import FastAPI + +from app import main as app_main + + +def build_fake_connection(error: Exception | None = None) -> SimpleNamespace: + """ + Builds a connection-like object that records executed statements and can + raise a configured error. + + Used by: + - test_lifespan_success_sets_state_and_closes_pool - records the startup + connectivity query. + - test_lifespan_connection_failure_closes_pool_and_wraps_error - forces + the connectivity-check failure path. + + Args: + error: `Exception | None` raised when `execute` is called, if provided. + + Returns: + SimpleNamespace: A connection substitute with `error`, `executed`, and + `execute` attributes. + """ + connection = SimpleNamespace(error=error, executed=[]) + + def execute(statement: str) -> None: + if connection.error is not None: + raise connection.error + connection.executed.append(statement) + + connection.execute = execute + return connection + + +def build_fake_pool(connection: SimpleNamespace) -> SimpleNamespace: + """ + Builds a pool-like object that yields the provided fake connection and + counts close calls. + + Used by: + - test_lifespan_success_sets_state_and_closes_pool - supplies the pool + stored on app state during the successful lifespan path. + - test_lifespan_connection_failure_closes_pool_and_wraps_error - + supplies the pool closed after a startup failure. + + Args: + connection: `SimpleNamespace` connection substitute returned by + `build_fake_connection`. + + Returns: + SimpleNamespace: A pool substitute with `connection_instance`, + `close_calls`, `connection()`, and `close()` attributes. + """ + pool = SimpleNamespace(connection_instance=connection, close_calls=0) + + @contextmanager + def connection_context(): + yield connection + + def close() -> None: + pool.close_calls += 1 + + pool.connection = connection_context + pool.close = close + return pool + + +@pytest.mark.unit +@pytest.mark.parametrize( + ("tags", "name", "expected_route_id"), + [ + pytest.param(["auth"], "signup", "auth-signup", id="tagged_route"), + pytest.param([], "health", "default-health", id="default_tag"), + ], +) +def test_custom_generate_unique_id_uses_expected_prefix( + tags: list[str], name: str, expected_route_id: str +) -> None: + """ + Verifies that `app.main.custom_generate_unique_id` prefixes generated + This matters because route ids feed generated OpenAPI operation names + and should stay stable across tagged and untagged routes. + + Covers: + - `app.main.custom_generate_unique_id` + + Rationale: + The route object is represented with a `SimpleNamespace` because the + function depends only on `tags` and `name`. + + Fixtures: + None. + + Parametrize: + tags: Route tags presented to the unique-id helper. + name: Route name presented to the unique-id helper. + expected_route_id: Expected generated identifier. + Cases: + - — uses the first tag as the route-id prefix. + - — falls back to `default` when tags are absent. + """ + route = SimpleNamespace(tags=tags, name=name) + route_id = app_main.custom_generate_unique_id(route) + + assert route_id == expected_route_id, ( + f"Expected generated route id '{expected_route_id}', got '{route_id}'" + ) + + +@pytest.mark.unit +@pytest.mark.anyio +async def test_lifespan_success_sets_state_and_closes_pool(monkeypatch) -> None: + """ + Verifies that `app.main.lifespan` stores settings and the database pool + on `app.state`, performs a startup connectivity check, and closes the pool + on shutdown. This matters because application startup must both initialize + shared state and clean up resources reliably. + + Covers: + - `app.main.lifespan` + + Rationale: + This test monkeypatches `app.main.Settings` and + `app.main.ConnectionPool` because startup constructors are not + injectable in the current design. The fake pool lets the test assert + observable lifespan behavior without opening a real pool. + + Fixtures: + monkeypatch: Pytest fixture used to replace `app.main.Settings` and + `app.main.ConnectionPool` during the test. + """ + settings = SimpleNamespace( + PROJECT_NAME="unit-project", DATABASE_DSN="postgresql://unit-test" + ) + pool = build_fake_pool(build_fake_connection()) + app = FastAPI() + + def build_pool(dsn: str, *, open: bool) -> SimpleNamespace: # noqa: A002 + assert dsn == str(settings.DATABASE_DSN), ( + f"Expected ConnectionPool DSN '{settings.DATABASE_DSN}', got '{dsn}'" + ) + assert open is True, ( + "Expected lifespan to construct ConnectionPool with open=True" + ) + return pool + + monkeypatch.setattr(app_main, "Settings", lambda: settings) + monkeypatch.setattr(app_main, "ConnectionPool", build_pool) + + async with app_main.lifespan(app): + assert app.state.settings is settings, ( + "Expected lifespan to store settings on app.state.settings" + ) + assert app.state.db_pool is pool, ( + "Expected lifespan to store the database pool on app.state.db_pool" + ) + assert app.title == settings.PROJECT_NAME, ( + f"Expected app title '{settings.PROJECT_NAME}', got '{app.title}'" + ) + + assert pool.connection_instance.executed == ["SELECT 1"], ( + f"Expected startup connectivity query ['SELECT 1'], got " + f"{pool.connection_instance.executed}" + ) + assert pool.close_calls == 1, ( + f"Expected lifespan shutdown to close the pool once, got " + f"{pool.close_calls}" + ) + + +@pytest.mark.unit +@pytest.mark.anyio +async def test_lifespan_connection_failure_closes_pool_and_wraps_error( + monkeypatch, +) -> None: + """ + Verifies that `app.main.lifespan` closes the pool and wraps startup + connection failures in the expected runtime error. This matters because + startup failures must preserve the root cause while still cleaning up + partially initialized resources. + + Covers: + - `app.main.lifespan` + + Rationale: + This test monkeypatches `app.main.Settings` and + `app.main.ConnectionPool` because startup constructors are not + injectable in the current design. The fake pool forces the failure path + without touching real infrastructure. + + Fixtures: + monkeypatch: Pytest fixture used to replace `app.main.Settings` and + `app.main.ConnectionPool` during the test. + + """ + root_error = RuntimeError("cannot connect") + settings = SimpleNamespace( + PROJECT_NAME="unit-project", DATABASE_DSN="postgresql://unit-test" + ) + pool = build_fake_pool(build_fake_connection(error=root_error)) + app = FastAPI() + + monkeypatch.setattr(app_main, "Settings", lambda: settings) + monkeypatch.setattr(app_main, "ConnectionPool", lambda dsn, *, open: pool) # noqa: A006 + + with pytest.raises( + RuntimeError, match="Database connectivity check failed" + ) as exc_info: + async with app_main.lifespan(app): + pass + + assert exc_info.value.__cause__ is root_error, ( + "Expected lifespan to preserve the original connection error as " + "__cause__" + ) + assert getattr(app.state, "db_pool", None) is None, ( + "Expected failed startup to avoid exposing db_pool on application state" + ) + assert pool.close_calls == 1, ( + f"Expected failed startup to close the pool once, got " + f"{pool.close_calls}" + ) diff --git a/backend/tests/test_pre_start.py b/backend/tests/test_pre_start.py new file mode 100644 index 0000000..ff28f85 --- /dev/null +++ b/backend/tests/test_pre_start.py @@ -0,0 +1,219 @@ +from __future__ import annotations + +from contextlib import contextmanager +from types import SimpleNamespace + +import pytest + +from app import pre_start + + +def build_fake_connection(error: Exception | None = None) -> SimpleNamespace: + """ + Builds a connection-like object that records executed SQL and can raise a + configured error. + + Used by: + - test_init_success_executes_connectivity_check_and_closes_pool - + records the bootstrap connectivity query. + - test_init_failure_still_closes_pool - forces the connectivity-check + failure path. + + Args: + error: `Exception | None` raised when `execute` is called, if provided. + + Returns: + SimpleNamespace: A connection substitute with `error`, `executed`, and + `execute` attributes. + """ + connection = SimpleNamespace(error=error, executed=[]) + + def execute(statement: str) -> None: + if connection.error is not None: + raise connection.error + connection.executed.append(statement) + + connection.execute = execute + return connection + + +def build_fake_pool(connection: SimpleNamespace) -> SimpleNamespace: + """ + Builds a pool-like object that yields the provided fake connection and + counts close calls. + + Used by: + - test_init_success_executes_connectivity_check_and_closes_pool - + supplies the temporary pool used during successful bootstrap. + - test_init_failure_still_closes_pool - supplies the temporary pool + closed after the connectivity failure path. + + Args: + - connection: `SimpleNamespace` connection substitute returned by + `build_fake_connection`. + + Returns: + - SimpleNamespace: A pool substitute with `connection_instance`, + `close_calls`, `connection()`, and `close()` attributes. + """ + pool = SimpleNamespace(connection_instance=connection, close_calls=0) + + @contextmanager + def connection_context(): + yield connection + + def close() -> None: + pool.close_calls += 1 + + pool.connection = connection_context + pool.close = close + return pool + + +@pytest.mark.unit +def test_init_success_executes_connectivity_check_and_closes_pool( + monkeypatch, +) -> None: + """ + Verifies that `app.pre_start.init` opens a pool, performs the connectivity + check, and closes the temporary pool on success. This matters because the + pre-start bootstrap path is the application's startup sanity check before + the main process begins serving. + + Covers: + - `app.pre_start.init` + + Rationale: + This test monkeypatches `app.pre_start.Settings` and + `app.pre_start.ConnectionPool` because the bootstrap constructors are + not injectable in the current design. The fake pool makes the + connectivity and cleanup behavior observable without opening a real + pool. + + Fixtures: + - monkeypatch: Pytest fixture used to replace `app.pre_start.Settings` + and `app.pre_start.ConnectionPool` during the test. + + """ + settings = type( + "SettingsStub", (), {"DATABASE_DSN": "postgresql://unit-test"} + )() + pool = build_fake_pool(build_fake_connection()) + + monkeypatch.setattr(pre_start, "Settings", lambda: settings) + monkeypatch.setattr(pre_start, "ConnectionPool", lambda dsn, open: pool) # noqa: A006 + + pre_start.init.__wrapped__() + + assert pool.connection_instance.executed == ["SELECT 1"], ( + f"Expected init to execute ['SELECT 1'], got " + f"{pool.connection_instance.executed}" + ) + assert pool.close_calls == 1, ( + f"Expected init to close the temporary pool once, got " + f"{pool.close_calls}" + ) + + +@pytest.mark.unit +def test_init_failure_still_closes_pool(monkeypatch) -> None: + """ + Verifies that `app.pre_start.init` still closes the temporary pool when the + connectivity check fails. This matters because the bootstrap path must not + leak pool resources during startup failures. + + Covers: + - `app.pre_start.init` + + Rationale: + This test monkeypatches `app.pre_start.Settings` and + `app.pre_start.ConnectionPool` because the bootstrap constructors are + not injectable in the current design. The fake pool forces the failure + path while keeping cleanup behavior observable. + + Fixtures: + - monkeypatch: Pytest fixture used to replace `app.pre_start.Settings` + and `app.pre_start.ConnectionPool` during the test. + + """ + root_error = RuntimeError("db not reachable") + settings = type( + "SettingsStub", (), {"DATABASE_DSN": "postgresql://unit-test"} + )() + pool = build_fake_pool(build_fake_connection(error=root_error)) + + monkeypatch.setattr(pre_start, "Settings", lambda: settings) + monkeypatch.setattr(pre_start, "ConnectionPool", lambda dsn, open: pool) # noqa: A006 + + with pytest.raises(RuntimeError, match="db not reachable"): + pre_start.init.__wrapped__() + + assert pool.close_calls == 1, ( + f"Expected init failure path to close the pool once, got " + f"{pool.close_calls}" + ) + + +@pytest.mark.unit +def test_init_propagates_pool_creation_error(monkeypatch) -> None: + """ + Verifies that `app.pre_start.init` propagates pool-construction errors + instead of suppressing them. This matters because startup should fail loudly + when the pool cannot even be created. + + Covers: + - `app.pre_start.init` + + Rationale: + This test monkeypatches `app.pre_start.Settings` and + `app.pre_start.ConnectionPool` because the bootstrap constructors are + not injectable in the current design. The patched constructor raises + immediately to isolate the pool-creation error path. + + Fixtures: + - monkeypatch: Pytest fixture used to replace `app.pre_start.Settings` + and `app.pre_start.ConnectionPool` during the test. + """ + root_error = OSError("constructor failed") + settings = type( + "SettingsStub", (), {"DATABASE_DSN": "postgresql://unit-test"} + )() + + def raise_pool_error(dsn: str, open: bool): # noqa: A002, FBT001 + raise root_error + + monkeypatch.setattr(pre_start, "Settings", lambda: settings) + monkeypatch.setattr(pre_start, "ConnectionPool", raise_pool_error) + + with pytest.raises(OSError, match="constructor failed"): + pre_start.init.__wrapped__() + + +@pytest.mark.unit +def test_main_calls_init(monkeypatch) -> None: + """ + Verifies that `app.pre_start.main` delegates directly to + `app.pre_start.init`. This matters because the module entry point should + execute the same bootstrap path as direct calls to `init`. + + Covers: + - `app.pre_start.main` + + Rationale: + This test monkeypatches `app.pre_start.init` because the goal is to + confirm delegation, not to rerun the bootstrap side effects. + + Fixtures: + - monkeypatch: Pytest fixture used to replace `app.pre_start.init` + during the test. + + """ + calls: list[str] = [] + + def fake_init() -> None: + calls.append("called") + + monkeypatch.setattr(pre_start, "init", fake_init) + pre_start.main() + + assert calls == ["called"], f"Expected main to call init once, got {calls}" diff --git a/backend/tests/unit/__init__.py b/backend/tests/unit/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/backend/tests/unit/test_auth.py b/backend/tests/unit/test_auth.py deleted file mode 100644 index 8697d20..0000000 --- a/backend/tests/unit/test_auth.py +++ /dev/null @@ -1,307 +0,0 @@ -"""Unit tests for JWT-based vendor authentication. - -Tests cover: -- Password hashing (bcrypt) -- Token creation and decoding -- Auth service logic (signup, login, refresh) -- Auth dependency (JWT validation) -""" - -from __future__ import annotations - -import uuid -from datetime import timedelta -from unittest.mock import MagicMock, patch - -import jwt as pyjwt -import pytest - -from app.api.deps import get_current_vendor_id -from app.core.config import Settings -from app.core.exceptions import AuthenticationException, ConflictException -from app.core.security import ( - create_access_token, - create_refresh_token, - decode_token, - get_password_hash, - verify_password, -) -from app.services.auth import login, refresh, signup - - -@pytest.fixture -def settings() -> Settings: - """Minimal settings for security tests.""" - return Settings( - SECRET_KEY="test-secret-key-for-unit-tests", - PROJECT_NAME="test", - POSTGRES_SERVER="localhost", - POSTGRES_USER="test", - POSTGRES_PASSWORD="test", - POSTGRES_DB="test", - ACCESS_TOKEN_EXPIRE_MINUTES=60, - REFRESH_TOKEN_EXPIRE_DAYS=7, - ) - - -@pytest.mark.unit -class TestPasswordHashing: - def test_hash_and_verify(self): - plain = "SuperSecret123!" - hashed = get_password_hash(plain) - - assert hashed != plain - assert hashed.startswith("$2") # bcrypt prefix - valid, _ = verify_password(plain, hashed) - assert valid is True - - def test_wrong_password_fails(self): - hashed = get_password_hash("correct-password") - valid, _ = verify_password("wrong-password", hashed) - assert valid is False - - -@pytest.mark.unit -class TestTokens: - def test_access_token_claims(self, settings: Settings): - vendor_id = str(uuid.uuid4()) - token = create_access_token(vendor_id, settings) - - payload = decode_token(token, settings) - assert payload["vendor_id"] == vendor_id - assert payload["token_type"] == "access" - assert "exp" in payload - - def test_refresh_token_claims(self, settings: Settings): - vendor_id = str(uuid.uuid4()) - token = create_refresh_token(vendor_id, settings) - - payload = decode_token(token, settings) - assert payload["vendor_id"] == vendor_id - assert payload["token_type"] == "refresh" - assert "exp" in payload - - def test_expired_token_raises(self, settings: Settings): - vendor_id = str(uuid.uuid4()) - token = create_access_token( - vendor_id, settings, expires_delta=timedelta(seconds=-1) - ) - - with pytest.raises(pyjwt.ExpiredSignatureError): - decode_token(token, settings) - - def test_invalid_signature_raises(self, settings: Settings): - vendor_id = str(uuid.uuid4()) - token = create_access_token(vendor_id, settings) - - bad_settings = Settings( - SECRET_KEY="different-secret", - PROJECT_NAME="test", - POSTGRES_SERVER="localhost", - POSTGRES_USER="test", - POSTGRES_PASSWORD="test", - POSTGRES_DB="test", - ) - with pytest.raises(pyjwt.InvalidSignatureError): - decode_token(token, bad_settings) - - -@pytest.mark.unit -class TestAuthService: - """Test auth service with mocked CRUD layer.""" - - def test_signup_success(self, settings: Settings): - cursor = MagicMock() - vendor_id = str(uuid.uuid4()) - - with ( - patch("app.services.auth.get_vendor_by_email", return_value=None), - patch( - "app.services.auth.create_vendor", - return_value={"id": vendor_id, "email": "v@test.com"}, - ), - ): - result = signup( - cursor, "v@test.com", "password123", "client-1", settings - ) - - assert result.vendor.id == vendor_id - assert result.vendor.email == "v@test.com" - - def test_signup_duplicate_email_raises(self, settings: Settings): - cursor = MagicMock() - - with patch( - "app.services.auth.get_vendor_by_email", - return_value={ - "id": "x", - "email": "v@test.com", - "password_hash": "h", - }, - ): - with pytest.raises(ConflictException): - signup( - cursor, "v@test.com", "password123", "client-1", settings - ) - - def test_login_success(self, settings: Settings): - cursor = MagicMock() - vendor_id = str(uuid.uuid4()) - hashed = get_password_hash("password123") - - with patch( - "app.services.auth.get_vendor_by_email", - return_value={ - "id": vendor_id, - "email": "v@test.com", - "password_hash": hashed, - }, - ): - result = login( - cursor, "v@test.com", "password123", "client-1", settings - ) - - assert result.access_token - assert result.refresh_token - assert result.token_type == "bearer" - - payload = decode_token(result.access_token, settings) - assert payload["vendor_id"] == vendor_id - assert payload["token_type"] == "access" - - def test_login_wrong_email_raises(self, settings: Settings): - cursor = MagicMock() - - with patch("app.services.auth.get_vendor_by_email", return_value=None): - with pytest.raises(AuthenticationException): - login( - cursor, "bad@test.com", "password123", "client-1", settings - ) - - def test_login_wrong_password_raises(self, settings: Settings): - cursor = MagicMock() - hashed = get_password_hash("correct-password") - - with patch( - "app.services.auth.get_vendor_by_email", - return_value={ - "id": "x", - "email": "v@test.com", - "password_hash": hashed, - }, - ): - with pytest.raises(AuthenticationException): - login( - cursor, "v@test.com", "wrong-password", "client-1", settings - ) - - def test_refresh_success(self, settings: Settings): - cursor = MagicMock() - vendor_id = str(uuid.uuid4()) - rt = create_refresh_token(vendor_id, settings) - - with patch( - "app.services.auth.get_vendor_by_id", - return_value={"id": vendor_id, "email": "v@test.com"}, - ): - result = refresh(rt, "client-1", cursor, settings) - - assert result.access_token - assert result.refresh_token - - def test_refresh_with_access_token_raises(self, settings: Settings): - cursor = MagicMock() - vendor_id = str(uuid.uuid4()) - at = create_access_token(vendor_id, settings) - - with pytest.raises(AuthenticationException, match="Invalid token type"): - refresh(at, "client-1", cursor, settings) - - def test_refresh_expired_raises(self, settings: Settings): - cursor = MagicMock() - vendor_id = str(uuid.uuid4()) - rt = create_refresh_token( - vendor_id, settings, expires_delta=timedelta(seconds=-1) - ) - - with pytest.raises(AuthenticationException): - refresh(rt, "client-1", cursor, settings) - - def test_refresh_deleted_vendor_raises(self, settings: Settings): - cursor = MagicMock() - vendor_id = str(uuid.uuid4()) - rt = create_refresh_token(vendor_id, settings) - - with patch("app.services.auth.get_vendor_by_id", return_value=None): - with pytest.raises( - AuthenticationException, match="Vendor not found" - ): - refresh(rt, "client-1", cursor, settings) - - def test_signup_concurrent_insert_conflict(self, settings: Settings): - """Pre-read shows no existing vendor, but the insert collides - (create_vendor returns None due to ON CONFLICT DO NOTHING). - """ - cursor = MagicMock() - - with ( - patch("app.services.auth.get_vendor_by_email", return_value=None), - patch("app.services.auth.create_vendor", return_value=None), - ): - with pytest.raises(ConflictException): - signup( - cursor, "race@test.com", "password123", "client-1", settings - ) - - -@pytest.mark.unit -class TestGetCurrentVendorId: - def test_valid_access_token(self, settings: Settings): - vendor_id = str(uuid.uuid4()) - token = create_access_token(vendor_id, settings) - creds = MagicMock() - creds.credentials = token - - result = get_current_vendor_id(creds, settings) - assert result == vendor_id - - def test_missing_credentials_raises(self, settings: Settings): - with pytest.raises(AuthenticationException, match="Missing"): - get_current_vendor_id(None, settings) - - def test_refresh_token_rejected(self, settings: Settings): - vendor_id = str(uuid.uuid4()) - token = create_refresh_token(vendor_id, settings) - creds = MagicMock() - creds.credentials = token - - with pytest.raises(AuthenticationException, match="Invalid token type"): - get_current_vendor_id(creds, settings) - - def test_expired_token_raises(self, settings: Settings): - vendor_id = str(uuid.uuid4()) - token = create_access_token( - vendor_id, settings, expires_delta=timedelta(seconds=-1) - ) - creds = MagicMock() - creds.credentials = token - - with pytest.raises(AuthenticationException): - get_current_vendor_id(creds, settings) - - def test_garbage_token_raises(self, settings: Settings): - creds = MagicMock() - creds.credentials = "not.a.jwt" - - with pytest.raises(AuthenticationException): - get_current_vendor_id(creds, settings) - - def test_malformed_vendor_id_raises(self, settings: Settings): - token = create_access_token("invalid-uuid", settings) - creds = MagicMock() - creds.credentials = token - - with pytest.raises( - AuthenticationException, match="Invalid token payload" - ): - get_current_vendor_id(creds, settings) diff --git a/backend/tests/unit/test_license_key_generator.py b/backend/tests/unit/test_license_key_generator.py deleted file mode 100644 index d2a26ef..0000000 --- a/backend/tests/unit/test_license_key_generator.py +++ /dev/null @@ -1,308 +0,0 @@ -"""Unit tests for the license key generator module. - -Tests cover: -- Single key format validation against the required pattern. -- Cryptographic randomness (keys are non-deterministic). -- Batch generation with intra-batch uniqueness guarantees. -- Collision detection and bounded retry behaviour. -- LicenseKeyGenerationError raised after retry exhaustion. -- BatchMetadata attachment and propagation. -- Edge cases: count validation, empty/None existing-key sets. -""" - -from __future__ import annotations - -from unittest.mock import patch - -import pytest - -from app.core.exceptions import LicenseKeyGenerationError -from app.schemas.response import ErrorCode -from app.services.license_key_generator import ( - ALPHABET, - LICENSE_KEY_PATTERN, - MAX_RETRIES, - BatchMetadata, - GeneratedLicenseKey, - generate_license_key, - generate_license_keys_batch, -) - - -@pytest.mark.unit -class TestLicenseKeyFormat: - """Verify generated keys match the ``XXXX-XXXX-XXXX-XXXX`` pattern.""" - - def test_single_key_matches_pattern(self): - result = generate_license_key() - assert LICENSE_KEY_PATTERN.match(result.key), ( - f"Key {result.key!r} does not match the required pattern" - ) - - def test_key_length_is_19_characters(self): - result = generate_license_key() - assert len(result.key) == 19 - - def test_key_has_four_segments(self): - result = generate_license_key() - segments = result.key.split("-") - assert len(segments) == 4 - - def test_each_segment_has_four_characters(self): - result = generate_license_key() - for segment in result.key.split("-"): - assert len(segment) == 4 - - def test_key_uses_only_uppercase_alphanumeric(self): - result = generate_license_key() - raw = result.key.replace("-", "") - assert all(c in ALPHABET for c in raw) - - def test_multiple_keys_all_match_pattern(self): - for _ in range(50): - result = generate_license_key() - assert LICENSE_KEY_PATTERN.match(result.key) - - -@pytest.mark.unit -class TestKeyUniqueness: - """Verify keys are unique across invocations.""" - - def test_generated_keys_are_unique(self): - keys = {generate_license_key().key for _ in range(100)} - assert len(keys) == 100 - - def test_collision_with_existing_keys_triggers_retry(self): - first = generate_license_key() - existing = {first.key} - second = generate_license_key(existing_keys=existing) - assert second.key != first.key - assert second.key not in existing - - -@pytest.mark.unit -class TestCollisionRetryExhaustion: - """Verify LicenseKeyGenerationError is raised when retries are exhausted.""" - - def test_raises_after_max_retries(self): - colliding_key = "AAAA-BBBB-CCCC-DDDD" - existing = {colliding_key} - - with patch( - "app.services.license_key_generator._generate_raw_key", - return_value=colliding_key, - ): - with pytest.raises(LicenseKeyGenerationError) as exc_info: - generate_license_key(existing_keys=existing) - - assert str(MAX_RETRIES) in str(exc_info.value) - - def test_error_has_correct_error_code(self): - colliding_key = "XXXX-YYYY-ZZZZ-1234" - existing = {colliding_key} - - with patch( - "app.services.license_key_generator._generate_raw_key", - return_value=colliding_key, - ): - with pytest.raises(LicenseKeyGenerationError) as exc_info: - generate_license_key(existing_keys=existing) - - assert ( - exc_info.value.error_code - == ErrorCode.LICENSE_KEY_GENERATION_ERROR - ) - - def test_error_has_500_status(self): - colliding_key = "AAAA-BBBB-CCCC-DDDD" - existing = {colliding_key} - - with patch( - "app.services.license_key_generator._generate_raw_key", - return_value=colliding_key, - ): - with pytest.raises(LicenseKeyGenerationError) as exc_info: - generate_license_key(existing_keys=existing) - - assert exc_info.value.http_status == 500 - - def test_succeeds_on_last_retry(self): - colliding_key = "AAAA-BBBB-CCCC-DDDD" - unique_key = "ZZZZ-9999-YYYY-8888" - existing = {colliding_key} - - side_effects = [colliding_key] * (MAX_RETRIES - 1) + [unique_key] - - with patch( - "app.services.license_key_generator._generate_raw_key", - side_effect=side_effects, - ): - result = generate_license_key(existing_keys=existing) - assert result.key == unique_key - - def test_invalid_format_triggers_retry(self): - invalid_key = "aaaa-bbbb-cccc-dddd" - valid_key = "AAAA-BBBB-CCCC-DDDD" - - with patch( - "app.services.license_key_generator._generate_raw_key", - side_effect=[invalid_key, valid_key], - ): - result = generate_license_key() - assert result.key == valid_key - - -@pytest.mark.unit -class TestBatchMetadata: - """Verify batch metadata is correctly attached to generated keys.""" - - def test_default_metadata_is_empty(self): - result = generate_license_key() - assert result.metadata.batch_id is None - assert result.metadata.campaign is None - assert result.metadata.issued_by is None - - def test_metadata_attached_to_single_key(self): - meta = BatchMetadata( - batch_id="batch-001", - campaign="summer-sale", - issued_by="admin@example.com", - ) - result = generate_license_key(metadata=meta) - - assert result.metadata.batch_id == "batch-001" - assert result.metadata.campaign == "summer-sale" - assert result.metadata.issued_by == "admin@example.com" - - def test_metadata_propagated_in_batch(self): - meta = BatchMetadata( - batch_id="batch-002", campaign="launch", issued_by="system" - ) - results = generate_license_keys_batch(count=5, metadata=meta) - - for result in results: - assert result.metadata.batch_id == "batch-002" - assert result.metadata.campaign == "launch" - assert result.metadata.issued_by == "system" - - def test_partial_metadata(self): - meta = BatchMetadata(batch_id="batch-003") - result = generate_license_key(metadata=meta) - - assert result.metadata.batch_id == "batch-003" - assert result.metadata.campaign is None - assert result.metadata.issued_by is None - - def test_metadata_is_frozen(self): - meta = BatchMetadata(batch_id="batch-004") - with pytest.raises(AttributeError): - meta.batch_id = "modified" # type: ignore[misc] - - -@pytest.mark.unit -class TestBatchGeneration: - """Verify batch generation produces correct counts and unique keys.""" - - def test_batch_returns_correct_count(self): - results = generate_license_keys_batch(count=10) - assert len(results) == 10 - - def test_batch_keys_are_unique(self): - results = generate_license_keys_batch(count=50) - keys = [r.key for r in results] - assert len(set(keys)) == 50 - - def test_batch_keys_all_match_pattern(self): - results = generate_license_keys_batch(count=20) - for result in results: - assert LICENSE_KEY_PATTERN.match(result.key) - - def test_batch_excludes_existing_keys(self): - existing = {"AAAA-BBBB-CCCC-DDDD", "EEEE-FFFF-0000-1111"} - results = generate_license_keys_batch(count=5, existing_keys=existing) - - for result in results: - assert result.key not in existing - - def test_batch_count_zero_raises_value_error(self): - with pytest.raises(ValueError, match="count must be >= 1"): - generate_license_keys_batch(count=0) - - def test_batch_count_negative_raises_value_error(self): - with pytest.raises(ValueError, match="count must be >= 1"): - generate_license_keys_batch(count=-1) - - def test_batch_single_item(self): - results = generate_license_keys_batch(count=1) - assert len(results) == 1 - assert LICENSE_KEY_PATTERN.match(results[0].key) - - -@pytest.mark.unit -class TestExistingKeysHandling: - """Verify edge cases around the existing_keys parameter.""" - - def test_none_existing_keys_accepted(self): - result = generate_license_key(existing_keys=None) - assert LICENSE_KEY_PATTERN.match(result.key) - - def test_empty_set_existing_keys_accepted(self): - result = generate_license_key(existing_keys=set()) - assert LICENSE_KEY_PATTERN.match(result.key) - - def test_batch_with_none_existing_keys(self): - results = generate_license_keys_batch(count=3, existing_keys=None) - assert len(results) == 3 - - -@pytest.mark.unit -class TestGeneratedLicenseKeyDataclass: - """Verify the GeneratedLicenseKey container behaves correctly.""" - - def test_key_is_accessible(self): - result = generate_license_key() - assert isinstance(result.key, str) - - def test_metadata_is_accessible(self): - result = generate_license_key() - assert isinstance(result.metadata, BatchMetadata) - - def test_return_type(self): - result = generate_license_key() - assert isinstance(result, GeneratedLicenseKey) - - -@pytest.mark.unit -class TestLicenseKeyPattern: - """Verify the LICENSE_KEY_PATTERN regex itself.""" - - @pytest.mark.parametrize( - "key", - [ - pytest.param("ABCD-EFGH-1234-5678", id="valid_mixed"), - pytest.param("AAAA-BBBB-CCCC-DDDD", id="valid_all_alpha"), - pytest.param("1111-2222-3333-4444", id="valid_all_numeric"), - pytest.param("A1B2-C3D4-E5F6-G7H8", id="valid_alternating"), - ], - ) - def test_valid_keys_match(self, key): - assert LICENSE_KEY_PATTERN.match(key) - - @pytest.mark.parametrize( - "key", - [ - pytest.param("abcd-efgh-1234-5678", id="lowercase_rejected"), - pytest.param("ABCD-EFGH-1234", id="three_segments_rejected"), - pytest.param( - "ABCDE-FGHI-1234-5678", id="five_char_segment_rejected" - ), - pytest.param("ABCD EFGH 1234 5678", id="spaces_rejected"), - pytest.param("ABCD-EFGH-1234-567!", id="special_char_rejected"), - pytest.param("", id="empty_string_rejected"), - pytest.param( - "ABCD-EFGH-1234-5678-9ABC", id="five_segments_rejected" - ), - ], - ) - def test_invalid_keys_rejected(self, key): - assert LICENSE_KEY_PATTERN.match(key) is None diff --git a/migrations/03_app.sql b/migrations/03_app.sql index fac052a..1519667 100644 --- a/migrations/03_app.sql +++ b/migrations/03_app.sql @@ -203,7 +203,7 @@ DO $$ BEGIN "license_id" UUID PRIMARY KEY REFERENCES app."licenses"("id") ON DELETE RESTRICT, - "license_key" TEXT NOT NULL UNIQUE, + "activation_code" TEXT NOT NULL UNIQUE, "device_fingerprint_hash" TEXT, "max_sessions" INTEGER NOT NULL DEFAULT 1, CONSTRAINT "node_locked_max_sessions_positive" @@ -215,8 +215,8 @@ END $$; COMMENT ON TABLE "app"."node_locked_license_data" IS 'Extension table for the node-locked license subtype. Presence of a row indicates the parent license is node-locked. Future subtypes each get their own extension table; no type discriminator is needed on app."licenses". Write through app.v_license_node_locked only. Direct writes bypass audit triggers.'; COMMENT ON COLUMN "app"."node_locked_license_data"."license_id" IS 'FK to and PK of the parent license (app."licenses"). Enforces a strict 1:1 relationship. RESTRICT prevents parent deletion while this extension row exists.'; -COMMENT ON COLUMN "app"."node_locked_license_data"."license_key" IS 'Cryptographically random activation key distributed to the customer. Globally unique across all licenses.'; -COMMENT ON COLUMN "app"."node_locked_license_data"."device_fingerprint_hash" IS 'SHA-256 hash of device identifiers (BIOS UUID + CPU serial + disk serial). Computed server-side. NULL until first activation; locked to the value stored in app."sessions"."device_fingerprint_hash" on the first successful heartbeat for this license.'; +COMMENT ON COLUMN "app"."node_locked_license_data"."activation_code" IS 'Unique activation code that is provided on the purchase of license. Generated from the license_id on server-side'; +COMMENT ON COLUMN "app"."node_locked_license_data"."device_fingerprint_hash" IS 'SHA-256 hash of device identifiers. Computed server-side. NULL until first activation; locked to the value stored in app."sessions"."device_fingerprint_hash" on the first successful heartbeat for this license.'; COMMENT ON COLUMN "app"."node_locked_license_data"."max_sessions" IS 'Maximum number of concurrent ACTIVE sessions permitted on this device. Default 1. Prevents multi-process execution of a single node-locked license.'; -- ============================================================ @@ -471,7 +471,7 @@ DO $$ BEGIN app."licenses"."updated_at", app."licenses"."deleted_at", -- Node-locked extension columns - app."node_locked_license_data"."license_key", + app."node_locked_license_data"."activation_code", app."node_locked_license_data"."device_fingerprint_hash", app."node_locked_license_data"."max_sessions" FROM "app"."licenses" diff --git a/migrations/04_audit.sql b/migrations/04_audit.sql index a651e90..2d65a9a 100644 --- a/migrations/04_audit.sql +++ b/migrations/04_audit.sql @@ -109,7 +109,7 @@ SET LOCAL ROLE "audit_owner"; -- MODIFIED: -- {"metadata": } -- {"device_fingerprint_hash": ""} --- {"license_key": "rotated"} -- value never recorded +-- {"activation_code": "rotated"} -- value never recorded -- TOKEN_ROTATED: null — fact of rotation is sufficient -- PASSWORD_CHANGED: null — hash values never recorded -- diff --git a/migrations/06_license_key_updates.sql b/migrations/06_license_key_updates.sql deleted file mode 100644 index 6248431..0000000 --- a/migrations/06_license_key_updates.sql +++ /dev/null @@ -1,48 +0,0 @@ --- ============================================================ --- Migration : License Key Updates — Batch Metadata & Key Width --- Platform : LaaS (License as a Service) --- Database : PostgreSQL 18 --- Run order : 06 — after 05_rls.sql --- Depends on: 03_app.sql --- ============================================================ --- --- PURPOSE --- 1. Widens node_locked_license_data.license_key from TEXT to --- VARCHAR(24) to enforce the 19-char format upper bound. --- 2. Adds optional batch metadata columns (batch_id, campaign, --- issued_by) to the licenses table. --- --- IDEMPOTENCY --- Safe to re-run. ALTER COLUMN TYPE is a no-op when already --- the target type; ADD COLUMN uses IF NOT EXISTS. --- --- TRANSACTION --- Wrapped in BEGIN / COMMIT — all-or-nothing. --- ============================================================ - -BEGIN; - -SET LOCAL ROLE app_owner; - --- ------------------------------------------------------- --- 1. Widen license_key to VARCHAR(24) --- ------------------------------------------------------- -ALTER TABLE app."node_locked_license_data" - ALTER COLUMN "license_key" TYPE VARCHAR(24); - -COMMENT ON COLUMN app."node_locked_license_data"."license_key" - IS 'Cryptographically random activation key (format XXXX-XXXX-XXXX-XXXX, 19 chars). VARCHAR(24) allows headroom for future format changes.'; - --- ------------------------------------------------------- --- 2. Add batch metadata columns to licenses --- ------------------------------------------------------- -ALTER TABLE app."licenses" - ADD COLUMN IF NOT EXISTS "batch_id" TEXT, - ADD COLUMN IF NOT EXISTS "campaign" TEXT, - ADD COLUMN IF NOT EXISTS "issued_by" TEXT; - -COMMENT ON COLUMN app."licenses"."batch_id" IS 'Optional identifier grouping licenses that belong to the same issuance batch.'; -COMMENT ON COLUMN app."licenses"."campaign" IS 'Optional marketing or distribution campaign associated with the license.'; -COMMENT ON COLUMN app."licenses"."issued_by" IS 'Optional identifier (user-id, email, or service name) of the entity that triggered license creation.'; - -COMMIT; diff --git a/migrations/07_audit_triggers.sql b/migrations/07_audit_triggers.sql index bf356d4..92df524 100644 --- a/migrations/07_audit_triggers.sql +++ b/migrations/07_audit_triggers.sql @@ -55,7 +55,7 @@ -- Previous values only — not before/after pairs. -- The current value is always queryable from the live table. -- Sensitive values (password_hash, session_token_hash, --- license_key value) are never recorded. Presence of the +-- activation_code value) are never recorded. Presence of the -- action code is sufficient to know the change occurred. -- -- VENDOR ACTOR RESOLUTION @@ -257,11 +257,11 @@ BEGIN -- on the base table fires automatically when not supplied. INSERT INTO app."node_locked_license_data" ( "license_id", - "license_key", + "activation_code", "device_fingerprint_hash" ) VALUES ( v_license_id, - NEW."license_key", + NEW."activation_code", NEW."device_fingerprint_hash" ); @@ -293,7 +293,7 @@ BEGIN WHERE "id" = v_license_id; UPDATE app."node_locked_license_data" SET - "license_key" = NEW."license_key", + "activation_code" = NEW."activation_code", "device_fingerprint_hash" = NEW."device_fingerprint_hash", "max_sessions" = NEW."max_sessions" WHERE "license_id" = v_license_id; @@ -357,10 +357,10 @@ BEGIN jsonb_build_object('device_fingerprint_hash', OLD."device_fingerprint_hash"); END IF; - IF OLD."license_key" IS DISTINCT FROM NEW."license_key" THEN + IF OLD."activation_code" IS DISTINCT FROM NEW."activation_code" THEN -- Key value is never recorded — fact of rotation is sufficient. v_modified_diff := v_modified_diff || - jsonb_build_object('license_key', 'rotated'); + jsonb_build_object('activation_code', 'rotated'); END IF; IF v_modified_diff != '{}'::JSONB THEN diff --git a/migrations/down/06_license_key_updates_down.sql b/migrations/down/06_license_key_updates_down.sql deleted file mode 100644 index 0a02523..0000000 --- a/migrations/down/06_license_key_updates_down.sql +++ /dev/null @@ -1,31 +0,0 @@ --- ============================================================ --- Downgrade : License Key Updates --- Platform : LaaS (License as a Service) --- Database : PostgreSQL 18 --- Run order : 06 — downgrade counterpart of 06_license_key_updates.sql --- ============================================================ --- --- PURPOSE --- Reverts the changes from 06_license_key_updates.sql: --- 1. Restores license_key back to TEXT. --- 2. Drops batch metadata columns from licenses. --- --- IDEMPOTENCY --- Safe to re-run. DROP COLUMN uses IF EXISTS. --- ============================================================ - -BEGIN; - -SET LOCAL ROLE app_owner; - --- Restore license_key to TEXT -ALTER TABLE app."node_locked_license_data" - ALTER COLUMN "license_key" TYPE TEXT; - --- Drop batch metadata columns -ALTER TABLE app."licenses" - DROP COLUMN IF EXISTS "batch_id", - DROP COLUMN IF EXISTS "campaign", - DROP COLUMN IF EXISTS "issued_by"; - -COMMIT; diff --git a/migrations/tests/helpers.py b/migrations/tests/helpers.py index 82f7f5e..1afbb2c 100644 --- a/migrations/tests/helpers.py +++ b/migrations/tests/helpers.py @@ -1,8 +1,8 @@ from __future__ import annotations import hashlib -import shlex import re +import shlex import time import uuid from datetime import datetime, timezone @@ -128,14 +128,14 @@ def insert_license( def insert_node_locked( conn: psycopg.Connection, license_id: uuid.UUID, - license_key: str, + activation_code: str, max_sessions: int = 1, ) -> None: conn.execute( 'INSERT INTO app."node_locked_license_data" ' - '("license_id", "license_key", "max_sessions") ' + '("license_id", "activation_code", "max_sessions") ' "VALUES (%s, %s, %s)", - (license_id, license_key, max_sessions), + (license_id, activation_code, max_sessions), ) diff --git a/migrations/tests/test_constraints.py b/migrations/tests/test_constraints.py index 9992d18..7a19e2e 100644 --- a/migrations/tests/test_constraints.py +++ b/migrations/tests/test_constraints.py @@ -117,7 +117,7 @@ def test_license_key_unique_enforced(superconn): @pytest.mark.parametrize( - "table_name,expected_cols", + ("table_name", "expected_cols"), [ pytest.param( "audit_log_vendor_actors", diff --git a/migrations/tests/test_indexes.py b/migrations/tests/test_indexes.py index 441e28d..1abe2db 100644 --- a/migrations/tests/test_indexes.py +++ b/migrations/tests/test_indexes.py @@ -1,6 +1,5 @@ from __future__ import annotations -import psycopg import psycopg.rows import pytest @@ -8,7 +7,7 @@ @pytest.mark.parametrize( - "schema,table,index_name", + ("schema", "table", "index_name"), [ pytest.param( "app", "vendors", "vendors_email_lower_idx", id="vendors_email_lower" diff --git a/migrations/tests/test_partitioning.py b/migrations/tests/test_partitioning.py index 80f262c..2cf1356 100644 --- a/migrations/tests/test_partitioning.py +++ b/migrations/tests/test_partitioning.py @@ -11,7 +11,7 @@ @pytest.mark.parametrize( - "email,heartbeat_at,partition", + ("email", "heartbeat_at", "partition"), [ pytest.param( "part-q1@example.com", diff --git a/migrations/tests/test_privileges.py b/migrations/tests/test_privileges.py index 0b3775d..83edee7 100644 --- a/migrations/tests/test_privileges.py +++ b/migrations/tests/test_privileges.py @@ -17,7 +17,7 @@ @pytest.mark.parametrize( - "role,sql_stmt,params", + ("role", "sql_stmt", "params"), [ pytest.param( "app_reader_rls", @@ -81,7 +81,7 @@ def test_app_deleter_can_delete(superconn): @pytest.mark.parametrize( - "role,sql_stmt,params", + ("role", "sql_stmt", "params"), [ pytest.param( "app_reader_rls", diff --git a/migrations/tests/test_rls_isolation.py b/migrations/tests/test_rls_isolation.py index 4a3160c..dd6a3c5 100644 --- a/migrations/tests/test_rls_isolation.py +++ b/migrations/tests/test_rls_isolation.py @@ -55,7 +55,7 @@ def test_vendor_isolation_insert(superconn): superconn.execute( 'INSERT INTO app."v_license_node_locked" ' - '("vendor_id","license_status_code","max_grace_secs","license_key") ' + '("vendor_id","license_status_code","max_grace_secs","activation_code") ' "VALUES (%s,%s,%s,%s)", (vendor_a_id, "ACTIVE", 60, "key_insert_77_a"), ) @@ -63,7 +63,7 @@ def test_vendor_isolation_insert(superconn): with pytest.raises(psycopg.errors.InsufficientPrivilege): superconn.execute( 'INSERT INTO app."v_license_node_locked" ' - '("vendor_id","license_status_code","max_grace_secs","license_key") ' + '("vendor_id","license_status_code","max_grace_secs","activation_code") ' "VALUES (%s,%s,%s,%s)", (vendor_b_id, "ACTIVE", 60, "key_insert_77_b"), ) diff --git a/migrations/tests/test_rls_structure.py b/migrations/tests/test_rls_structure.py index 3f6d443..9b69ea5 100644 --- a/migrations/tests/test_rls_structure.py +++ b/migrations/tests/test_rls_structure.py @@ -27,7 +27,7 @@ def test_rls_enabled_on_tenant_table(superconn, table): @pytest.mark.parametrize( - "table,policy", + ("table", "policy"), [ pytest.param("licenses", "licenses_select_own", id="licenses_select"), pytest.param("licenses", "licenses_insert_own", id="licenses_insert"), @@ -93,7 +93,7 @@ def test_rls_enabled_on_audit_table(superconn, table): @pytest.mark.parametrize( - "table,policy", + ("table", "policy"), [ pytest.param("audit_logs", "audit_logs_insert_writer", id="audit_logs_insert"), pytest.param( @@ -226,7 +226,7 @@ def test_audit_immutability_function_owned_by_audit_owner(superconn): @pytest.mark.parametrize( - "function_name,expected_owner", + ("function_name", "expected_owner"), [ pytest.param( "_insert_log", "audit_owner", id="insert_log_owned_by_audit_owner" diff --git a/migrations/tests/test_seed_data.py b/migrations/tests/test_seed_data.py index 4c1b905..0a4e121 100644 --- a/migrations/tests/test_seed_data.py +++ b/migrations/tests/test_seed_data.py @@ -7,7 +7,7 @@ @pytest.mark.parametrize( - "table_name,expected_codes", + ("table_name", "expected_codes"), [ pytest.param( "license_statuses", @@ -49,7 +49,7 @@ def test_error_codes_seed_count(superconn): @pytest.mark.parametrize( - "table_name,expected_count", + ("table_name", "expected_count"), [ pytest.param("error_codes", 12, id="error_codes"), pytest.param("actions", 15, id="actions"), diff --git a/uv.lock b/uv.lock index 6652a70..85a2be1 100644 --- a/uv.lock +++ b/uv.lock @@ -92,6 +92,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/ee/82/82745642d3c46e7cea25e1885b014b033f4693346ce46b7f47483cf5d448/argon2_cffi_bindings-25.1.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:da0c79c23a63723aa5d782250fbf51b768abca630285262fb5144ba5ae01e520", size = 29187, upload-time = "2025-07-30T10:02:03.674Z" }, ] +[[package]] +name = "base58" +version = "2.1.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/7f/45/8ae61209bb9015f516102fa559a2914178da1d5868428bd86a1b4421141d/base58-2.1.1.tar.gz", hash = "sha256:c5d0cb3f5b6e81e8e35da5754388ddcc6d0d14b6c6a132cb93d69ed580a7278c", size = 6528, upload-time = "2021-10-30T22:12:17.858Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/4a/45/ec96b29162a402fc4c1c5512d114d7b3787b9d1c2ec241d9568b4816ee23/base58-2.1.1-py3-none-any.whl", hash = "sha256:11a36f4d3ce51dfc1043f3218591ac4eb1ceb172919cebe05b52a5bcc8d245c2", size = 5621, upload-time = "2021-10-30T22:12:16.658Z" }, +] + [[package]] name = "bcrypt" version = "5.0.0" @@ -476,6 +485,71 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/0d/4a/331fe2caf6799d591109bb9c08083080f6de90a823695d412a935622abb2/coverage-7.13.4-py3-none-any.whl", hash = "sha256:1af1641e57cf7ba1bd67d677c9abdbcd6cc2ab7da3bca7fa1e2b7e50e65f2ad0", size = 211242, upload-time = "2026-02-09T12:59:02.032Z" }, ] +[package.optional-dependencies] +toml = [ + { name = "tomli", marker = "python_full_version <= '3.11'" }, +] + +[[package]] +name = "cryptography" +version = "46.0.5" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "cffi", marker = "platform_python_implementation != 'PyPy'" }, + { name = "typing-extensions", marker = "python_full_version < '3.11'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/60/04/ee2a9e8542e4fa2773b81771ff8349ff19cdd56b7258a0cc442639052edb/cryptography-46.0.5.tar.gz", hash = "sha256:abace499247268e3757271b2f1e244b36b06f8515cf27c4d49468fc9eb16e93d", size = 750064, upload-time = "2026-02-10T19:18:38.255Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f7/81/b0bb27f2ba931a65409c6b8a8b358a7f03c0e46eceacddff55f7c84b1f3b/cryptography-46.0.5-cp311-abi3-macosx_10_9_universal2.whl", hash = "sha256:351695ada9ea9618b3500b490ad54c739860883df6c1f555e088eaf25b1bbaad", size = 7176289, upload-time = "2026-02-10T19:17:08.274Z" }, + { url = "https://files.pythonhosted.org/packages/ff/9e/6b4397a3e3d15123de3b1806ef342522393d50736c13b20ec4c9ea6693a6/cryptography-46.0.5-cp311-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:c18ff11e86df2e28854939acde2d003f7984f721eba450b56a200ad90eeb0e6b", size = 4275637, upload-time = "2026-02-10T19:17:10.53Z" }, + { url = "https://files.pythonhosted.org/packages/63/e7/471ab61099a3920b0c77852ea3f0ea611c9702f651600397ac567848b897/cryptography-46.0.5-cp311-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:4d7e3d356b8cd4ea5aff04f129d5f66ebdc7b6f8eae802b93739ed520c47c79b", size = 4424742, upload-time = "2026-02-10T19:17:12.388Z" }, + { url = "https://files.pythonhosted.org/packages/37/53/a18500f270342d66bf7e4d9f091114e31e5ee9e7375a5aba2e85a91e0044/cryptography-46.0.5-cp311-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:50bfb6925eff619c9c023b967d5b77a54e04256c4281b0e21336a130cd7fc263", size = 4277528, upload-time = "2026-02-10T19:17:13.853Z" }, + { url = "https://files.pythonhosted.org/packages/22/29/c2e812ebc38c57b40e7c583895e73c8c5adb4d1e4a0cc4c5a4fdab2b1acc/cryptography-46.0.5-cp311-abi3-manylinux_2_28_ppc64le.whl", hash = "sha256:803812e111e75d1aa73690d2facc295eaefd4439be1023fefc4995eaea2af90d", size = 4947993, upload-time = "2026-02-10T19:17:15.618Z" }, + { url = "https://files.pythonhosted.org/packages/6b/e7/237155ae19a9023de7e30ec64e5d99a9431a567407ac21170a046d22a5a3/cryptography-46.0.5-cp311-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:3ee190460e2fbe447175cda91b88b84ae8322a104fc27766ad09428754a618ed", size = 4456855, upload-time = "2026-02-10T19:17:17.221Z" }, + { url = "https://files.pythonhosted.org/packages/2d/87/fc628a7ad85b81206738abbd213b07702bcbdada1dd43f72236ef3cffbb5/cryptography-46.0.5-cp311-abi3-manylinux_2_31_armv7l.whl", hash = "sha256:f145bba11b878005c496e93e257c1e88f154d278d2638e6450d17e0f31e558d2", size = 3984635, upload-time = "2026-02-10T19:17:18.792Z" }, + { url = "https://files.pythonhosted.org/packages/84/29/65b55622bde135aedf4565dc509d99b560ee4095e56989e815f8fd2aa910/cryptography-46.0.5-cp311-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:e9251e3be159d1020c4030bd2e5f84d6a43fe54b6c19c12f51cde9542a2817b2", size = 4277038, upload-time = "2026-02-10T19:17:20.256Z" }, + { url = "https://files.pythonhosted.org/packages/bc/36/45e76c68d7311432741faf1fbf7fac8a196a0a735ca21f504c75d37e2558/cryptography-46.0.5-cp311-abi3-manylinux_2_34_ppc64le.whl", hash = "sha256:47fb8a66058b80e509c47118ef8a75d14c455e81ac369050f20ba0d23e77fee0", size = 4912181, upload-time = "2026-02-10T19:17:21.825Z" }, + { url = "https://files.pythonhosted.org/packages/6d/1a/c1ba8fead184d6e3d5afcf03d569acac5ad063f3ac9fb7258af158f7e378/cryptography-46.0.5-cp311-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:4c3341037c136030cb46e4b1e17b7418ea4cbd9dd207e4a6f3b2b24e0d4ac731", size = 4456482, upload-time = "2026-02-10T19:17:25.133Z" }, + { url = "https://files.pythonhosted.org/packages/f9/e5/3fb22e37f66827ced3b902cf895e6a6bc1d095b5b26be26bd13c441fdf19/cryptography-46.0.5-cp311-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:890bcb4abd5a2d3f852196437129eb3667d62630333aacc13dfd470fad3aaa82", size = 4405497, upload-time = "2026-02-10T19:17:26.66Z" }, + { url = "https://files.pythonhosted.org/packages/1a/df/9d58bb32b1121a8a2f27383fabae4d63080c7ca60b9b5c88be742be04ee7/cryptography-46.0.5-cp311-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:80a8d7bfdf38f87ca30a5391c0c9ce4ed2926918e017c29ddf643d0ed2778ea1", size = 4667819, upload-time = "2026-02-10T19:17:28.569Z" }, + { url = "https://files.pythonhosted.org/packages/ea/ed/325d2a490c5e94038cdb0117da9397ece1f11201f425c4e9c57fe5b9f08b/cryptography-46.0.5-cp311-abi3-win32.whl", hash = "sha256:60ee7e19e95104d4c03871d7d7dfb3d22ef8a9b9c6778c94e1c8fcc8365afd48", size = 3028230, upload-time = "2026-02-10T19:17:30.518Z" }, + { url = "https://files.pythonhosted.org/packages/e9/5a/ac0f49e48063ab4255d9e3b79f5def51697fce1a95ea1370f03dc9db76f6/cryptography-46.0.5-cp311-abi3-win_amd64.whl", hash = "sha256:38946c54b16c885c72c4f59846be9743d699eee2b69b6988e0a00a01f46a61a4", size = 3480909, upload-time = "2026-02-10T19:17:32.083Z" }, + { url = "https://files.pythonhosted.org/packages/00/13/3d278bfa7a15a96b9dc22db5a12ad1e48a9eb3d40e1827ef66a5df75d0d0/cryptography-46.0.5-cp314-cp314t-macosx_10_9_universal2.whl", hash = "sha256:94a76daa32eb78d61339aff7952ea819b1734b46f73646a07decb40e5b3448e2", size = 7119287, upload-time = "2026-02-10T19:17:33.801Z" }, + { url = "https://files.pythonhosted.org/packages/67/c8/581a6702e14f0898a0848105cbefd20c058099e2c2d22ef4e476dfec75d7/cryptography-46.0.5-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:5be7bf2fb40769e05739dd0046e7b26f9d4670badc7b032d6ce4db64dddc0678", size = 4265728, upload-time = "2026-02-10T19:17:35.569Z" }, + { url = "https://files.pythonhosted.org/packages/dd/4a/ba1a65ce8fc65435e5a849558379896c957870dd64fecea97b1ad5f46a37/cryptography-46.0.5-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:fe346b143ff9685e40192a4960938545c699054ba11d4f9029f94751e3f71d87", size = 4408287, upload-time = "2026-02-10T19:17:36.938Z" }, + { url = "https://files.pythonhosted.org/packages/f8/67/8ffdbf7b65ed1ac224d1c2df3943553766914a8ca718747ee3871da6107e/cryptography-46.0.5-cp314-cp314t-manylinux_2_28_aarch64.whl", hash = "sha256:c69fd885df7d089548a42d5ec05be26050ebcd2283d89b3d30676eb32ff87dee", size = 4270291, upload-time = "2026-02-10T19:17:38.748Z" }, + { url = "https://files.pythonhosted.org/packages/f8/e5/f52377ee93bc2f2bba55a41a886fd208c15276ffbd2569f2ddc89d50e2c5/cryptography-46.0.5-cp314-cp314t-manylinux_2_28_ppc64le.whl", hash = "sha256:8293f3dea7fc929ef7240796ba231413afa7b68ce38fd21da2995549f5961981", size = 4927539, upload-time = "2026-02-10T19:17:40.241Z" }, + { url = "https://files.pythonhosted.org/packages/3b/02/cfe39181b02419bbbbcf3abdd16c1c5c8541f03ca8bda240debc467d5a12/cryptography-46.0.5-cp314-cp314t-manylinux_2_28_x86_64.whl", hash = "sha256:1abfdb89b41c3be0365328a410baa9df3ff8a9110fb75e7b52e66803ddabc9a9", size = 4442199, upload-time = "2026-02-10T19:17:41.789Z" }, + { url = "https://files.pythonhosted.org/packages/c0/96/2fcaeb4873e536cf71421a388a6c11b5bc846e986b2b069c79363dc1648e/cryptography-46.0.5-cp314-cp314t-manylinux_2_31_armv7l.whl", hash = "sha256:d66e421495fdb797610a08f43b05269e0a5ea7f5e652a89bfd5a7d3c1dee3648", size = 3960131, upload-time = "2026-02-10T19:17:43.379Z" }, + { url = "https://files.pythonhosted.org/packages/d8/d2/b27631f401ddd644e94c5cf33c9a4069f72011821cf3dc7309546b0642a0/cryptography-46.0.5-cp314-cp314t-manylinux_2_34_aarch64.whl", hash = "sha256:4e817a8920bfbcff8940ecfd60f23d01836408242b30f1a708d93198393a80b4", size = 4270072, upload-time = "2026-02-10T19:17:45.481Z" }, + { url = "https://files.pythonhosted.org/packages/f4/a7/60d32b0370dae0b4ebe55ffa10e8599a2a59935b5ece1b9f06edb73abdeb/cryptography-46.0.5-cp314-cp314t-manylinux_2_34_ppc64le.whl", hash = "sha256:68f68d13f2e1cb95163fa3b4db4bf9a159a418f5f6e7242564fc75fcae667fd0", size = 4892170, upload-time = "2026-02-10T19:17:46.997Z" }, + { url = "https://files.pythonhosted.org/packages/d2/b9/cf73ddf8ef1164330eb0b199a589103c363afa0cf794218c24d524a58eab/cryptography-46.0.5-cp314-cp314t-manylinux_2_34_x86_64.whl", hash = "sha256:a3d1fae9863299076f05cb8a778c467578262fae09f9dc0ee9b12eb4268ce663", size = 4441741, upload-time = "2026-02-10T19:17:48.661Z" }, + { url = "https://files.pythonhosted.org/packages/5f/eb/eee00b28c84c726fe8fa0158c65afe312d9c3b78d9d01daf700f1f6e37ff/cryptography-46.0.5-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:c4143987a42a2397f2fc3b4d7e3a7d313fbe684f67ff443999e803dd75a76826", size = 4396728, upload-time = "2026-02-10T19:17:50.058Z" }, + { url = "https://files.pythonhosted.org/packages/65/f4/6bc1a9ed5aef7145045114b75b77c2a8261b4d38717bd8dea111a63c3442/cryptography-46.0.5-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:7d731d4b107030987fd61a7f8ab512b25b53cef8f233a97379ede116f30eb67d", size = 4652001, upload-time = "2026-02-10T19:17:51.54Z" }, + { url = "https://files.pythonhosted.org/packages/86/ef/5d00ef966ddd71ac2e6951d278884a84a40ffbd88948ef0e294b214ae9e4/cryptography-46.0.5-cp314-cp314t-win32.whl", hash = "sha256:c3bcce8521d785d510b2aad26ae2c966092b7daa8f45dd8f44734a104dc0bc1a", size = 3003637, upload-time = "2026-02-10T19:17:52.997Z" }, + { url = "https://files.pythonhosted.org/packages/b7/57/f3f4160123da6d098db78350fdfd9705057aad21de7388eacb2401dceab9/cryptography-46.0.5-cp314-cp314t-win_amd64.whl", hash = "sha256:4d8ae8659ab18c65ced284993c2265910f6c9e650189d4e3f68445ef82a810e4", size = 3469487, upload-time = "2026-02-10T19:17:54.549Z" }, + { url = "https://files.pythonhosted.org/packages/e2/fa/a66aa722105ad6a458bebd64086ca2b72cdd361fed31763d20390f6f1389/cryptography-46.0.5-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:4108d4c09fbbf2789d0c926eb4152ae1760d5a2d97612b92d508d96c861e4d31", size = 7170514, upload-time = "2026-02-10T19:17:56.267Z" }, + { url = "https://files.pythonhosted.org/packages/0f/04/c85bdeab78c8bc77b701bf0d9bdcf514c044e18a46dcff330df5448631b0/cryptography-46.0.5-cp38-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:7d1f30a86d2757199cb2d56e48cce14deddf1f9c95f1ef1b64ee91ea43fe2e18", size = 4275349, upload-time = "2026-02-10T19:17:58.419Z" }, + { url = "https://files.pythonhosted.org/packages/5c/32/9b87132a2f91ee7f5223b091dc963055503e9b442c98fc0b8a5ca765fab0/cryptography-46.0.5-cp38-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:039917b0dc418bb9f6edce8a906572d69e74bd330b0b3fea4f79dab7f8ddd235", size = 4420667, upload-time = "2026-02-10T19:18:00.619Z" }, + { url = "https://files.pythonhosted.org/packages/a1/a6/a7cb7010bec4b7c5692ca6f024150371b295ee1c108bdc1c400e4c44562b/cryptography-46.0.5-cp38-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:ba2a27ff02f48193fc4daeadf8ad2590516fa3d0adeeb34336b96f7fa64c1e3a", size = 4276980, upload-time = "2026-02-10T19:18:02.379Z" }, + { url = "https://files.pythonhosted.org/packages/8e/7c/c4f45e0eeff9b91e3f12dbd0e165fcf2a38847288fcfd889deea99fb7b6d/cryptography-46.0.5-cp38-abi3-manylinux_2_28_ppc64le.whl", hash = "sha256:61aa400dce22cb001a98014f647dc21cda08f7915ceb95df0c9eaf84b4b6af76", size = 4939143, upload-time = "2026-02-10T19:18:03.964Z" }, + { url = "https://files.pythonhosted.org/packages/37/19/e1b8f964a834eddb44fa1b9a9976f4e414cbb7aa62809b6760c8803d22d1/cryptography-46.0.5-cp38-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:3ce58ba46e1bc2aac4f7d9290223cead56743fa6ab94a5d53292ffaac6a91614", size = 4453674, upload-time = "2026-02-10T19:18:05.588Z" }, + { url = "https://files.pythonhosted.org/packages/db/ed/db15d3956f65264ca204625597c410d420e26530c4e2943e05a0d2f24d51/cryptography-46.0.5-cp38-abi3-manylinux_2_31_armv7l.whl", hash = "sha256:420d0e909050490d04359e7fdb5ed7e667ca5c3c402b809ae2563d7e66a92229", size = 3978801, upload-time = "2026-02-10T19:18:07.167Z" }, + { url = "https://files.pythonhosted.org/packages/41/e2/df40a31d82df0a70a0daf69791f91dbb70e47644c58581d654879b382d11/cryptography-46.0.5-cp38-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:582f5fcd2afa31622f317f80426a027f30dc792e9c80ffee87b993200ea115f1", size = 4276755, upload-time = "2026-02-10T19:18:09.813Z" }, + { url = "https://files.pythonhosted.org/packages/33/45/726809d1176959f4a896b86907b98ff4391a8aa29c0aaaf9450a8a10630e/cryptography-46.0.5-cp38-abi3-manylinux_2_34_ppc64le.whl", hash = "sha256:bfd56bb4b37ed4f330b82402f6f435845a5f5648edf1ad497da51a8452d5d62d", size = 4901539, upload-time = "2026-02-10T19:18:11.263Z" }, + { url = "https://files.pythonhosted.org/packages/99/0f/a3076874e9c88ecb2ecc31382f6e7c21b428ede6f55aafa1aa272613e3cd/cryptography-46.0.5-cp38-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:a3d507bb6a513ca96ba84443226af944b0f7f47dcc9a399d110cd6146481d24c", size = 4452794, upload-time = "2026-02-10T19:18:12.914Z" }, + { url = "https://files.pythonhosted.org/packages/02/ef/ffeb542d3683d24194a38f66ca17c0a4b8bf10631feef44a7ef64e631b1a/cryptography-46.0.5-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:9f16fbdf4da055efb21c22d81b89f155f02ba420558db21288b3d0035bafd5f4", size = 4404160, upload-time = "2026-02-10T19:18:14.375Z" }, + { url = "https://files.pythonhosted.org/packages/96/93/682d2b43c1d5f1406ed048f377c0fc9fc8f7b0447a478d5c65ab3d3a66eb/cryptography-46.0.5-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:ced80795227d70549a411a4ab66e8ce307899fad2220ce5ab2f296e687eacde9", size = 4667123, upload-time = "2026-02-10T19:18:15.886Z" }, + { url = "https://files.pythonhosted.org/packages/45/2d/9c5f2926cb5300a8eefc3f4f0b3f3df39db7f7ce40c8365444c49363cbda/cryptography-46.0.5-cp38-abi3-win32.whl", hash = "sha256:02f547fce831f5096c9a567fd41bc12ca8f11df260959ecc7c3202555cc47a72", size = 3010220, upload-time = "2026-02-10T19:18:17.361Z" }, + { url = "https://files.pythonhosted.org/packages/48/ef/0c2f4a8e31018a986949d34a01115dd057bf536905dca38897bacd21fac3/cryptography-46.0.5-cp38-abi3-win_amd64.whl", hash = "sha256:556e106ee01aa13484ce9b0239bca667be5004efb0aabbed28d353df86445595", size = 3467050, upload-time = "2026-02-10T19:18:18.899Z" }, + { url = "https://files.pythonhosted.org/packages/eb/dd/2d9fdb07cebdf3d51179730afb7d5e576153c6744c3ff8fded23030c204e/cryptography-46.0.5-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:3b4995dc971c9fb83c25aa44cf45f02ba86f71ee600d81091c2f0cbae116b06c", size = 3476964, upload-time = "2026-02-10T19:18:20.687Z" }, + { url = "https://files.pythonhosted.org/packages/e9/6f/6cc6cc9955caa6eaf83660b0da2b077c7fe8ff9950a3c5e45d605038d439/cryptography-46.0.5-pp311-pypy311_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:bc84e875994c3b445871ea7181d424588171efec3e185dced958dad9e001950a", size = 4218321, upload-time = "2026-02-10T19:18:22.349Z" }, + { url = "https://files.pythonhosted.org/packages/3e/5d/c4da701939eeee699566a6c1367427ab91a8b7088cc2328c09dbee940415/cryptography-46.0.5-pp311-pypy311_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:2ae6971afd6246710480e3f15824ed3029a60fc16991db250034efd0b9fb4356", size = 4381786, upload-time = "2026-02-10T19:18:24.529Z" }, + { url = "https://files.pythonhosted.org/packages/ac/97/a538654732974a94ff96c1db621fa464f455c02d4bb7d2652f4edc21d600/cryptography-46.0.5-pp311-pypy311_pp73-manylinux_2_34_aarch64.whl", hash = "sha256:d861ee9e76ace6cf36a6a89b959ec08e7bc2493ee39d07ffe5acb23ef46d27da", size = 4217990, upload-time = "2026-02-10T19:18:25.957Z" }, + { url = "https://files.pythonhosted.org/packages/ae/11/7e500d2dd3ba891197b9efd2da5454b74336d64a7cc419aa7327ab74e5f6/cryptography-46.0.5-pp311-pypy311_pp73-manylinux_2_34_x86_64.whl", hash = "sha256:2b7a67c9cd56372f3249b39699f2ad479f6991e62ea15800973b956f4b73e257", size = 4381252, upload-time = "2026-02-10T19:18:27.496Z" }, + { url = "https://files.pythonhosted.org/packages/bc/58/6b3d24e6b9bc474a2dcdee65dfd1f008867015408a271562e4b690561a4d/cryptography-46.0.5-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:8456928655f856c6e1533ff59d5be76578a7157224dbd9ce6872f25055ab9ab7", size = 3407605, upload-time = "2026-02-10T19:18:29.233Z" }, +] + [[package]] name = "dnspython" version = "2.8.0" @@ -533,6 +607,18 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/ab/84/02fc1827e8cdded4aa65baef11296a9bbe595c474f0d6d758af082d849fd/execnet-2.1.2-py3-none-any.whl", hash = "sha256:67fba928dd5a544b783f6056f449e5e3931a5c378b128bc18501f7ea79e296ec", size = 40708, upload-time = "2025-11-12T09:56:36.333Z" }, ] +[[package]] +name = "faker" +version = "40.11.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "tzdata", marker = "sys_platform == 'win32'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/94/dc/b68e5378e5a7db0ab776efcdd53b6fe374b29d703e156fd5bb4c5437069e/faker-40.11.0.tar.gz", hash = "sha256:7c419299103b13126bd02ec14bd2b47b946edb5a5eedf305e66a193b25f9a734", size = 1957570, upload-time = "2026-03-13T14:36:11.844Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b1/fa/a86c6ba66f0308c95b9288b1e3eaccd934b545646f63494a86f1ec2f8c8e/faker-40.11.0-py3-none-any.whl", hash = "sha256:0e9816c950528d2a37d74863f3ef389ea9a3a936cbcde0b11b8499942e25bf90", size = 1989457, upload-time = "2026-03-13T14:36:09.792Z" }, +] + [[package]] name = "fastapi" version = "0.129.0" @@ -952,50 +1038,64 @@ name = "permit" version = "0.1.0" source = { editable = "backend" } dependencies = [ + { name = "base58" }, + { name = "cryptography" }, { name = "fastapi", extra = ["standard"] }, { name = "httpx" }, - { name = "psycopg", extra = ["binary"] }, - { name = "psycopg-pool" }, + { name = "psycopg", extra = ["binary", "pool"] }, { name = "pwdlib", extra = ["argon2", "bcrypt"] }, - { name = "pydantic" }, - { name = "pydantic-settings" }, { name = "pyjwt" }, - { name = "python-multipart" }, { name = "tenacity" }, + { name = "uuid6" }, ] [package.dev-dependencies] dev = [ - { name = "coverage" }, + { name = "faker" }, { name = "prek" }, { name = "pytest" }, + { name = "pytest-cov" }, { name = "pytest-xdist" }, { name = "ruff" }, { name = "testcontainers" }, ] +test = [ + { name = "faker" }, + { name = "pytest" }, + { name = "pytest-cov" }, + { name = "pytest-xdist" }, + { name = "testcontainers" }, +] [package.metadata] requires-dist = [ + { name = "base58", specifier = ">=2.1.1" }, + { name = "cryptography", specifier = ">=44.0.0" }, { name = "fastapi", extras = ["standard"], specifier = ">=0.114.2,<1.0.0" }, { name = "httpx", specifier = ">=0.25.1,<1.0.0" }, - { name = "psycopg", extras = ["binary"], specifier = ">=3.1.13,<4.0.0" }, - { name = "psycopg-pool", specifier = ">=3.3.0,<4.0.0" }, + { name = "psycopg", extras = ["binary", "pool"], specifier = ">=3.1.13,<4.0.0" }, { name = "pwdlib", extras = ["argon2", "bcrypt"], specifier = ">=0.3.0" }, - { name = "pydantic", specifier = ">2.0" }, - { name = "pydantic-settings", specifier = ">=2.2.1,<3.0.0" }, { name = "pyjwt", specifier = ">=2.11.0,<3.0.0" }, - { name = "python-multipart", specifier = ">=0.0.7,<1.0.0" }, { name = "tenacity", specifier = ">=8.2.3,<9.0.0" }, + { name = "uuid6", specifier = ">=2025.0.1" }, ] [package.metadata.requires-dev] dev = [ - { name = "coverage", specifier = ">=7.4.3,<8.0.0" }, + { name = "faker", specifier = ">=40.11.0" }, { name = "prek", specifier = ">=0.2.24,<1.0.0" }, - { name = "pytest", specifier = ">=7.4.3,<8.0.0" }, - { name = "pytest-xdist", specifier = ">=3.5.0,<4.0.0" }, + { name = "pytest", specifier = ">=7.4.4" }, + { name = "pytest-cov", specifier = ">=7.0.0" }, + { name = "pytest-xdist", specifier = ">=3.8.0" }, { name = "ruff", specifier = ">=0.2.2,<1.0.0" }, - { name = "testcontainers", extras = ["postgres"], specifier = ">=4.0.0" }, + { name = "testcontainers", extras = ["postgres"], specifier = ">=4.14.1" }, +] +test = [ + { name = "faker", specifier = ">=40.11.0" }, + { name = "pytest", specifier = ">=7.4.4" }, + { name = "pytest-cov", specifier = ">=7.0.0" }, + { name = "pytest-xdist", specifier = ">=3.8.0" }, + { name = "testcontainers", extras = ["postgres"], specifier = ">=4.14.1" }, ] [[package]] @@ -1048,6 +1148,9 @@ wheels = [ binary = [ { name = "psycopg-binary", marker = "implementation_name != 'pypy'" }, ] +pool = [ + { name = "psycopg-pool" }, +] [[package]] name = "psycopg-binary" @@ -1349,6 +1452,20 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/51/ff/f6e8b8f39e08547faece4bd80f89d5a8de68a38b2d179cc1c4490ffa3286/pytest-7.4.4-py3-none-any.whl", hash = "sha256:b090cdf5ed60bf4c45261be03239c2c1c22df034fbffe691abe93cd80cea01d8", size = 325287, upload-time = "2023-12-31T12:00:13.963Z" }, ] +[[package]] +name = "pytest-cov" +version = "7.0.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "coverage", extra = ["toml"] }, + { name = "pluggy" }, + { name = "pytest" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/5e/f7/c933acc76f5208b3b00089573cf6a2bc26dc80a8aece8f52bb7d6b1855ca/pytest_cov-7.0.0.tar.gz", hash = "sha256:33c97eda2e049a0c5298e91f519302a1334c26ac65c1a483d6206fd458361af1", size = 54328, upload-time = "2025-09-09T10:57:02.113Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ee/49/1377b49de7d0c1ce41292161ea0f721913fa8722c19fb9c1e3aa0367eecb/pytest_cov-7.0.0-py3-none-any.whl", hash = "sha256:3b8e9558b16cc1479da72058bdecf8073661c7f57f7d3c5f22a1c23507f2d861", size = 22424, upload-time = "2025-09-09T10:57:00.695Z" }, +] + [[package]] name = "pytest-xdist" version = "3.8.0" @@ -1822,6 +1939,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/39/08/aaaad47bc4e9dc8c725e68f9d04865dbcb2052843ff09c97b08904852d84/urllib3-2.6.3-py3-none-any.whl", hash = "sha256:bf272323e553dfb2e87d9bfd225ca7b0f467b919d7bbd355436d3fd37cb0acd4", size = 131584, upload-time = "2026-01-07T16:24:42.685Z" }, ] +[[package]] +name = "uuid6" +version = "2025.0.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ca/b7/4c0f736ca824b3a25b15e8213d1bcfc15f8ac2ae48d1b445b310892dc4da/uuid6-2025.0.1.tar.gz", hash = "sha256:cd0af94fa428675a44e32c5319ec5a3485225ba2179eefcf4c3f205ae30a81bd", size = 13932, upload-time = "2025-07-04T18:30:35.186Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/3d/b2/93faaab7962e2aa8d6e174afb6f76be2ca0ce89fde14d3af835acebcaa59/uuid6-2025.0.1-py3-none-any.whl", hash = "sha256:80530ce4d02a93cdf82e7122ca0da3ebbbc269790ec1cb902481fa3e9cc9ff99", size = 6979, upload-time = "2025-07-04T18:30:34.001Z" }, +] + [[package]] name = "uvicorn" version = "0.41.0"