diff --git a/pyproject.toml b/pyproject.toml
index b0b3596..ade63c7 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -136,6 +136,7 @@ omit = [
"*/__pycache__/*",
"*/api_lib_autogen/*",
"*/conftest.py",
+ "*/main.py",
]
[tool.coverage.report]
diff --git a/tests/__init__.py b/tests/__init__.py
index 79a5ffc..5fd827c 100644
--- a/tests/__init__.py
+++ b/tests/__init__.py
@@ -13,4 +13,4 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-"""Test package for th_cli."""
\ No newline at end of file
+"""Test package for th_cli."""
diff --git a/tests/conftest.py b/tests/conftest.py
index da8ea2c..784f6fc 100644
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -26,12 +26,12 @@
import pytest
from click.testing import CliRunner
from faker import Faker
-
from httpx import Headers
from th_cli.api_lib_autogen import models as api_models
from th_cli.api_lib_autogen.api_client import ApiClient, AsyncApis, SyncApis
from th_cli.api_lib_autogen.exceptions import UnexpectedResponse
+from th_cli.colorize import set_colors_enabled
# Initialize faker instance for generating test data
fake = Faker()
@@ -91,17 +91,10 @@ def mock_json_config_file(temp_dir: Path) -> Path:
"setup_code": "20212223",
"discriminator": "3840",
"chip_use_paa_certs": False,
- "trace_log": False
- },
- "network": {
- "wifi": {
- "ssid": "TestNetwork",
- "password": "TestPassword123"
- }
+ "trace_log": False,
},
- "test_parameters": {
- "custom_param": "test_value"
- }
+ "network": {"wifi": {"ssid": "TestNetwork", "password": "TestPassword123"}},
+ "test_parameters": {"custom_param": "test_value"},
}
config_file = temp_dir / "test_config.json"
config_file.write_text(json.dumps(config_data, indent=2))
@@ -148,20 +141,15 @@ def sample_default_config_dict() -> dict:
"""Create a sample default configuration dictionary."""
return {
"network": {
- "wifi": {
- "ssid": "default_wifi",
- "password": "default_password"
- },
- "thread": {
- "operational_dataset_hex": "default_hex"
- }
+ "wifi": {"ssid": "default_wifi", "password": "default_password"},
+ "thread": {"operational_dataset_hex": "default_hex"},
},
"dut_config": {
"pairing_mode": "ble-wifi",
"setup_code": "20202021",
"discriminator": "3840",
- "trace_log": False
- }
+ "trace_log": False,
+ },
}
@@ -211,17 +199,19 @@ def sample_project() -> api_models.Project:
config={
"network": {
"wifi": {"ssid": "TestWiFi", "password": "testpassword"},
- "thread": {"operational_dataset_hex": "0e080000000000010000000300001235060004001fffe0020811111111222222220708fd"}
+ "thread": {
+ "operational_dataset_hex": "0e080000000000010000000300001235060004001fffe0020811111111222222220708fd" # noqa
+ },
},
"dut_config": {
"pairing_mode": "ble-wifi",
"setup_code": "20202021",
"discriminator": "3840",
- "trace_log": False
- }
+ "trace_log": False,
+ },
},
created_at=fake.date_time(),
- updated_at=fake.date_time()
+ updated_at=fake.date_time(),
)
@@ -235,17 +225,17 @@ def sample_projects() -> list[api_models.Project]:
config={
"network": {
"wifi": {"ssid": "test", "password": "test"},
- "thread": {"operational_dataset_hex": "test"}
+ "thread": {"operational_dataset_hex": "test"},
},
"dut_config": {
"pairing_mode": "ble-wifi",
"setup_code": "20202021",
"discriminator": "3840",
- "trace_log": False
- }
+ "trace_log": False,
+ },
},
created_at=fake.date_time(),
- updated_at=fake.date_time()
+ updated_at=fake.date_time(),
)
for i in range(1, 4)
]
@@ -265,7 +255,7 @@ def sample_test_collections() -> api_models.TestCollections:
public_id="FirstChipToolSuite",
version="1.0",
title="First Chip Tool Suite",
- description="Test suite for chip tool testing"
+ description="Test suite for chip tool testing",
),
test_cases={
"TC-ACE-1.1": api_models.TestCase(
@@ -273,7 +263,7 @@ def sample_test_collections() -> api_models.TestCollections:
public_id="TC-ACE-1.1",
version="1.0",
title="Test Case ACE 1.1",
- description="Access Control Entry test"
+ description="Access Control Entry test",
)
),
"TC-ACE-1.2": api_models.TestCase(
@@ -281,7 +271,7 @@ def sample_test_collections() -> api_models.TestCollections:
public_id="TC-ACE-1.2",
version="1.0",
title="Test Case ACE 1.2",
- description="Access Control Entry test 2"
+ description="Access Control Entry test 2",
)
),
"TC-CC-1.1": api_models.TestCase(
@@ -289,12 +279,12 @@ def sample_test_collections() -> api_models.TestCollections:
public_id="TC-CC-1.1",
version="1.0",
title="Test Case CC 1.1",
- description="Color Control test"
+ description="Color Control test",
)
- )
- }
+ ),
+ },
)
- }
+ },
),
"SDK Python Tests": api_models.TestCollection(
name="SDK Python Tests",
@@ -305,7 +295,7 @@ def sample_test_collections() -> api_models.TestCollections:
public_id="Python Testing Suite",
version="1.0",
title="Python Testing Suite",
- description="Python test suite"
+ description="Python test suite",
),
test_cases={
"TC_ACE_1_3": api_models.TestCase(
@@ -313,12 +303,12 @@ def sample_test_collections() -> api_models.TestCollections:
public_id="TC_ACE_1_3",
version="1.0",
title="Test Case ACE 1.3",
- description="Access Control Entry test 3"
+ description="Access Control Entry test 3",
)
)
- }
+ },
)
- }
+ },
),
"Custom SDK Python Tests": api_models.TestCollection(
name="Custom SDK Python Tests",
@@ -329,7 +319,7 @@ def sample_test_collections() -> api_models.TestCollections:
public_id="Python Testing Suite-custom",
version="1.0",
title="Python Testing Suite-custom",
- description="Python test suite custom"
+ description="Python test suite custom",
),
test_cases={
"TC_ACE_1_3-custom": api_models.TestCase(
@@ -337,13 +327,13 @@ def sample_test_collections() -> api_models.TestCollections:
public_id="TC_ACE_1_3-custom",
version="1.0",
title="Test Case ACE 1.3 Custom",
- description="Access Control Entry test 3 custom"
+ description="Access Control Entry test 3 custom",
)
)
- }
+ },
)
- }
- )
+ },
+ ),
}
)
@@ -380,9 +370,9 @@ def sample_test_run_execution() -> api_models.TestRunExecutionWithChildren:
title="Test Case ACE 1.1",
description="Access Control Entry test",
version="1.0",
- source_hash="abc123"
+ source_hash="abc123",
),
- test_step_executions=[]
+ test_step_executions=[],
)
],
test_suite_metadata=api_models.TestSuiteMetadata(
@@ -391,30 +381,23 @@ def sample_test_run_execution() -> api_models.TestRunExecutionWithChildren:
title="First Chip Tool Suite",
description="Test suite for chip tool testing",
version="1.0",
- source_hash="def456"
- )
+ source_hash="def456",
+ ),
)
- ]
+ ],
)
@pytest.fixture
def sample_test_runner_status() -> api_models.TestRunnerStatus:
"""Create a sample test runner status for testing."""
- return api_models.TestRunnerStatus(
- state=api_models.TestRunnerState.idle,
- test_run_execution_id=None
- )
+ return api_models.TestRunnerStatus(state=api_models.TestRunnerState.idle, test_run_execution_id=None)
@pytest.fixture
def mock_unexpected_response() -> UnexpectedResponse:
"""Create a mock UnexpectedResponse exception."""
- return UnexpectedResponse(
- status_code=404,
- content=b"Not Found",
- headers=Headers()
- )
+ return UnexpectedResponse(status_code=404, content=b"Not Found", headers=Headers())
@pytest.fixture
@@ -424,7 +407,7 @@ def mock_versions_info() -> dict[str, Any]:
"backend_version": "1.0.0",
"backend_sha": "abc123def",
"test_harness_version": "2.0.0",
- "test_harness_sha": "def456ghi"
+ "test_harness_sha": "def456ghi",
}
@@ -434,8 +417,6 @@ def disable_colors():
# Set environment variable to disable colors
os.environ["TH_CLI_NO_COLOR"] = "1"
- # Import and set colors disabled programmatically as well
- from th_cli.colorize import set_colors_enabled
set_colors_enabled(False)
yield
@@ -460,7 +441,7 @@ def get(self, key: str, default: Any = None) -> Any:
def dict(self) -> dict[str, Any]:
"""Convert response to dictionary."""
- if hasattr(self.data, 'dict'):
+ if hasattr(self.data, "dict"):
return self.data.dict()
return self.data if isinstance(self.data, dict) else {}
@@ -477,21 +458,16 @@ def generate_test_project_data(**overrides) -> dict[str, Any]:
"name": fake.company(),
"config": {
"network": {
- "wifi": {
- "ssid": fake.name(),
- "password": fake.password()
- },
- "thread": {
- "operational_dataset_hex": "test_hex_value"
- }
+ "wifi": {"ssid": fake.name(), "password": fake.password()},
+ "thread": {"operational_dataset_hex": "test_hex_value"},
},
"dut_config": {
"pairing_mode": "ble-wifi",
"setup_code": "20202021",
"discriminator": str(fake.random_int(min=0, max=4095)),
- "trace_log": False
- }
- }
+ "trace_log": False,
+ },
+ },
}
data.update(overrides)
return data
diff --git a/tests/test_abort_testing.py b/tests/test_abort_testing.py
index 9a8b727..8abf65a 100644
--- a/tests/test_abort_testing.py
+++ b/tests/test_abort_testing.py
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2025 Project CHIP Authors
+# Copyright (c) 2025-2026 Project CHIP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -19,7 +19,6 @@
import pytest
from click.testing import CliRunner
-from httpx import Headers
from th_cli.api_lib_autogen.exceptions import UnexpectedResponse
from th_cli.commands.abort_testing import abort_testing
@@ -69,8 +68,7 @@ def test_abort_testing_configuration_error(self, cli_runner: CliRunner) -> None:
"""Test abort testing with configuration error."""
# Arrange
with patch(
- "th_cli.commands.abort_testing.get_client",
- side_effect=ConfigurationError("Could not connect to server")
+ "th_cli.commands.abort_testing.get_client", side_effect=ConfigurationError("Could not connect to server")
):
# Act
result = cli_runner.invoke(abort_testing)
@@ -100,10 +98,7 @@ def test_abort_testing_api_error(self, cli_runner: CliRunner, mock_sync_apis: Mo
mock_api_client.close.assert_called_once()
def test_abort_testing_generic_exception(
- self,
- cli_runner: CliRunner,
- mock_sync_apis: Mock,
- mock_api_client: Mock
+ self, cli_runner: CliRunner, mock_sync_apis: Mock, mock_api_client: Mock
) -> None:
"""Test abort testing with generic exception."""
# Arrange
@@ -152,13 +147,16 @@ def test_abort_testing_help_message(self, cli_runner: CliRunner) -> None:
assert result.exit_code == 0
assert "Abort the current test run execution" in result.output
- @pytest.mark.parametrize("response_data", [
- {"detail": "Test execution aborted"},
- {"detail": "No active test execution"},
- {"detail": "Test Engine is not active."},
- {"message": "Operation completed"},
- {},
- ])
+ @pytest.mark.parametrize(
+ "response_data",
+ [
+ {"detail": "Test execution aborted"},
+ {"detail": "No active test execution"},
+ {"detail": "Test Engine is not active."},
+ {"message": "Operation completed"},
+ {},
+ ],
+ )
def test_abort_testing_various_response_formats(
self,
cli_runner: CliRunner,
@@ -182,26 +180,25 @@ def test_abort_testing_various_response_formats(
else:
assert "Testing aborted" in result.output
- @pytest.mark.parametrize("status_code,content", [
- (400, "Bad Request"),
- (401, "Unauthorized"),
- (403, "Forbidden"),
- (404, "Not Found"),
- (500, "Internal Server Error"),
- (503, "Service Unavailable")
- ])
+ @pytest.mark.parametrize(
+ "status_code,content",
+ [
+ (400, "Bad Request"),
+ (401, "Unauthorized"),
+ (403, "Forbidden"),
+ (404, "Not Found"),
+ (500, "Internal Server Error"),
+ (503, "Service Unavailable"),
+ ],
+ )
def test_abort_testing_various_api_errors(
- self,
- cli_runner: CliRunner,
- mock_sync_apis: Mock,
- status_code: int,
- content: str
+ self, cli_runner: CliRunner, mock_sync_apis: Mock, status_code: int, content: str
) -> None:
"""Test abort testing with various API error status codes."""
# Arrange
api_exception = UnexpectedResponse(
status_code=status_code,
- content=content.encode('utf-8'),
+ content=content.encode("utf-8"),
)
api = mock_sync_apis.test_run_executions_api.abort_testing_api_v1_test_run_executions_abort_testing_post
diff --git a/tests/test_available_tests.py b/tests/test_available_tests.py
index 4c99ed7..ab41384 100644
--- a/tests/test_available_tests.py
+++ b/tests/test_available_tests.py
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2025 Project CHIP Authors
+# Copyright (c) 2025-2026 Project CHIP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -19,11 +19,16 @@
import pytest
from click.testing import CliRunner
-from httpx import Headers
from th_cli.api_lib_autogen import models as api_models
from th_cli.api_lib_autogen.exceptions import UnexpectedResponse
-from th_cli.commands.available_tests import available_tests
+from th_cli.commands.available_tests import (
+ _extract_cluster_from_test_id,
+ _extract_test_cases,
+ _generate_compact,
+ _generate_grouped_by_cluster,
+ available_tests,
+)
from th_cli.exceptions import ConfigurationError
@@ -105,8 +110,7 @@ def test_available_tests_configuration_error(self, cli_runner: CliRunner) -> Non
"""Test available tests with configuration error."""
# Arrange
with patch(
- "th_cli.commands.available_tests.get_client",
- side_effect=ConfigurationError("Could not connect to server")
+ "th_cli.commands.available_tests.get_client", side_effect=ConfigurationError("Could not connect to server")
):
# Act
result = cli_runner.invoke(available_tests)
@@ -193,7 +197,7 @@ def test_available_tests_output_formats(
cli_runner: CliRunner,
mock_sync_apis: Mock,
sample_test_collections: api_models.TestCollections,
- json_flag: bool
+ json_flag: bool,
) -> None:
"""Test available tests with both JSON and YAML output formats."""
# Arrange
@@ -231,10 +235,7 @@ def test_available_tests_complex_test_structure(
test_suites={
"Suite1": api_models.TestSuite(
metadata=api_models.TestMetadata(
- public_id="Suite1",
- version="2.0",
- title="Test Suite 1",
- description="First test suite"
+ public_id="Suite1", version="2.0", title="Test Suite 1", description="First test suite"
),
test_cases={
"TC-TEST-1.1": api_models.TestCase(
@@ -242,7 +243,7 @@ def test_available_tests_complex_test_structure(
public_id="TC-TEST-1.1",
version="2.0",
title="Test Case 1.1",
- description="First test case"
+ description="First test case",
)
),
"TC-TEST-1.2": api_models.TestCase(
@@ -250,21 +251,18 @@ def test_available_tests_complex_test_structure(
public_id="TC-TEST-1.2",
version="2.0",
title="Test Case 1.2",
- description="Second test case"
+ description="Second test case",
)
- )
- }
+ ),
+ },
),
"Suite2": api_models.TestSuite(
metadata=api_models.TestMetadata(
- public_id="Suite2",
- version="1.5",
- title="Test Suite 2",
- description="Second test suite"
+ public_id="Suite2", version="1.5", title="Test Suite 2", description="Second test suite"
),
- test_cases={}
- )
- }
+ test_cases={},
+ ),
+ },
)
}
)
@@ -283,26 +281,25 @@ def test_available_tests_complex_test_structure(
assert "TC-TEST-1.1:" in result.output
assert "TC-TEST-1.2:" in result.output
- @pytest.mark.parametrize("status_code,content", [
- (400, "Bad Request"),
- (401, "Unauthorized"),
- (403, "Forbidden"),
- (404, "Not Found"),
- (500, "Internal Server Error"),
- (503, "Service Unavailable")
- ])
+ @pytest.mark.parametrize(
+ "status_code,content",
+ [
+ (400, "Bad Request"),
+ (401, "Unauthorized"),
+ (403, "Forbidden"),
+ (404, "Not Found"),
+ (500, "Internal Server Error"),
+ (503, "Service Unavailable"),
+ ],
+ )
def test_available_tests_various_api_errors(
- self,
- cli_runner: CliRunner,
- mock_sync_apis: Mock,
- status_code: int,
- content: str
+ self, cli_runner: CliRunner, mock_sync_apis: Mock, status_code: int, content: str
) -> None:
"""Test available tests with various API error status codes."""
# Arrange
api_exception = UnexpectedResponse(
status_code=status_code,
- content=content.encode('utf-8'),
+ content=content.encode("utf-8"),
)
api = mock_sync_apis.test_collections_api.read_test_collections_api_v1_test_collections__get
@@ -317,10 +314,7 @@ def test_available_tests_various_api_errors(
assert content in result.output
def test_available_tests_yaml_dump_functionality(
- self,
- cli_runner: CliRunner,
- mock_sync_apis: Mock,
- sample_test_collections: api_models.TestCollections
+ self, cli_runner: CliRunner, mock_sync_apis: Mock, sample_test_collections: api_models.TestCollections
) -> None:
"""Test that YAML output is properly formatted and readable."""
# Arrange
@@ -340,10 +334,7 @@ def test_available_tests_yaml_dump_functionality(
assert '"test_collections":' not in result.output
def test_available_tests_compact(
- self,
- cli_runner: CliRunner,
- mock_sync_apis: Mock,
- sample_test_collections: api_models.TestCollections
+ self, cli_runner: CliRunner, mock_sync_apis: Mock, sample_test_collections: api_models.TestCollections
) -> None:
"""Test --compact flag shows test IDs with titles."""
# Arrange
@@ -364,10 +355,7 @@ def test_available_tests_compact(
assert "test_collections:" not in result.output
def test_available_tests_group_by_cluster(
- self,
- cli_runner: CliRunner,
- mock_sync_apis: Mock,
- sample_test_collections: api_models.TestCollections
+ self, cli_runner: CliRunner, mock_sync_apis: Mock, sample_test_collections: api_models.TestCollections
) -> None:
"""Test --group-by-cluster flag groups tests by cluster."""
# Arrange
@@ -390,10 +378,7 @@ def test_available_tests_group_by_cluster(
assert " TC-CC-1.1" in result.output
def test_available_tests_cluster_filter(
- self,
- cli_runner: CliRunner,
- mock_sync_apis: Mock,
- sample_test_collections: api_models.TestCollections
+ self, cli_runner: CliRunner, mock_sync_apis: Mock, sample_test_collections: api_models.TestCollections
) -> None:
"""Test --cluster filter shows only tests from specified cluster."""
# Arrange
@@ -414,10 +399,7 @@ def test_available_tests_cluster_filter(
assert "TC-CC-1.1" not in result.output
def test_available_tests_cluster_filter_case_insensitive(
- self,
- cli_runner: CliRunner,
- mock_sync_apis: Mock,
- sample_test_collections: api_models.TestCollections
+ self, cli_runner: CliRunner, mock_sync_apis: Mock, sample_test_collections: api_models.TestCollections
) -> None:
"""Test --cluster filter is case insensitive."""
# Arrange
@@ -436,10 +418,7 @@ def test_available_tests_cluster_filter_case_insensitive(
assert "ID: TC-ACE-1.2" in result.output
def test_available_tests_cluster_and_compact_combined(
- self,
- cli_runner: CliRunner,
- mock_sync_apis: Mock,
- sample_test_collections: api_models.TestCollections
+ self, cli_runner: CliRunner, mock_sync_apis: Mock, sample_test_collections: api_models.TestCollections
) -> None:
"""Test combining --cluster and --compact flags."""
# Arrange
@@ -458,9 +437,6 @@ def test_available_tests_cluster_and_compact_combined(
assert "TC-ACE-1.1" not in result.output
def test_extract_cluster_from_test_id(self) -> None:
- """Test cluster extraction logic for various test ID patterns."""
- from th_cli.commands.available_tests import _extract_cluster_from_test_id
-
# Test various patterns
assert _extract_cluster_from_test_id("TC-ACE-1.1") == "ACE"
assert _extract_cluster_from_test_id("TC-CADMIN-1.2") == "CADMIN"
@@ -483,14 +459,9 @@ def test_extract_cluster_from_test_id(self) -> None:
assert _extract_cluster_from_test_id("SomeOtherTest") == "UNKNOWN"
def test_generate_compact(
- self,
- cli_runner: CliRunner,
- mock_sync_apis: Mock,
- sample_test_collections: api_models.TestCollections
+ self, cli_runner: CliRunner, mock_sync_apis: Mock, sample_test_collections: api_models.TestCollections
) -> None:
"""Test _generate_compact function with 4 elements per line."""
- from th_cli.commands.available_tests import _extract_test_cases, _generate_compact
-
# Arrange
api = mock_sync_apis.test_collections_api.read_test_collections_api_v1_test_collections__get
api.return_value = sample_test_collections
@@ -513,14 +484,9 @@ def test_generate_compact(
assert any("TC-ACE-1.1" in line for line in result_lines)
def test_generate_grouped_by_cluster(
- self,
- cli_runner: CliRunner,
- mock_sync_apis: Mock,
- sample_test_collections: api_models.TestCollections
+ self, cli_runner: CliRunner, mock_sync_apis: Mock, sample_test_collections: api_models.TestCollections
) -> None:
"""Test _generate_grouped_by_cluster function with 4 elements per line."""
- from th_cli.commands.available_tests import _extract_test_cases, _generate_grouped_by_cluster
-
# Arrange
api = mock_sync_apis.test_collections_api.read_test_collections_api_v1_test_collections__get
api.return_value = sample_test_collections
@@ -533,7 +499,7 @@ def test_generate_grouped_by_cluster(
result_lines = _generate_grouped_by_cluster(test_cases)
# Assert
- result_text = '\n'.join(result_lines)
+ result_text = "\n".join(result_lines)
assert "ACE:" in result_text
assert "CC:" in result_text
# Check that tests within clusters use uniform spacing format
@@ -544,13 +510,10 @@ def test_generate_grouped_by_cluster(
ace_line_found = True
break
# Should find at least one line with multiple ACE tests
- assert ace_line_found or len([tc for tc in test_cases if tc['cluster'] == 'ACE']) < 4
+ assert ace_line_found or len([tc for tc in test_cases if tc["cluster"] == "ACE"]) < 4
def test_available_tests_compact_four_per_line(
- self,
- cli_runner: CliRunner,
- mock_sync_apis: Mock,
- sample_test_collections: api_models.TestCollections
+ self, cli_runner: CliRunner, mock_sync_apis: Mock, sample_test_collections: api_models.TestCollections
) -> None:
"""Test --compact flag shows 4 elements per line."""
# Arrange
@@ -569,15 +532,12 @@ def test_available_tests_compact_four_per_line(
# Check that content contains spacing for multiple elements
call_content = mock_pager.call_args[0][0] # First argument (content)
# Should contain test cases with uniform spacing (using double spaces)
- lines = call_content.split('\n')
+ lines = call_content.split("\n")
uniform_spacing_found = any(" " in line and "TC-" in line for line in lines)
- assert uniform_spacing_found or call_content.count('\n') == 0 # Exception for small datasets
+ assert uniform_spacing_found or call_content.count("\n") == 0 # Exception for small datasets
def test_available_tests_group_by_cluster_four_per_line(
- self,
- cli_runner: CliRunner,
- mock_sync_apis: Mock,
- sample_test_collections: api_models.TestCollections
+ self, cli_runner: CliRunner, mock_sync_apis: Mock, sample_test_collections: api_models.TestCollections
) -> None:
"""Test --group-by-cluster flag shows 4 elements per line within clusters."""
# Arrange
@@ -599,10 +559,7 @@ def test_available_tests_group_by_cluster_four_per_line(
assert "CC:" in call_content
def test_available_tests_with_pagination_mock(
- self,
- cli_runner: CliRunner,
- mock_sync_apis: Mock,
- sample_test_collections: api_models.TestCollections
+ self, cli_runner: CliRunner, mock_sync_apis: Mock, sample_test_collections: api_models.TestCollections
) -> None:
"""Test available_tests command uses echo_via_pager."""
# Arrange
@@ -625,10 +582,7 @@ def test_available_tests_with_pagination_mock(
assert "TC-CC-1.1" in call_content
def test_available_tests_echo_via_pager_behavior(
- self,
- cli_runner: CliRunner,
- mock_sync_apis: Mock,
- sample_test_collections: api_models.TestCollections
+ self, cli_runner: CliRunner, mock_sync_apis: Mock, sample_test_collections: api_models.TestCollections
) -> None:
"""Test that echo_via_pager is called for all custom formatting options."""
# Arrange
@@ -648,10 +602,7 @@ def test_available_tests_echo_via_pager_behavior(
mock_pager.assert_called_once()
def test_available_tests_cluster_detailed_info(
- self,
- cli_runner: CliRunner,
- mock_sync_apis: Mock,
- sample_test_collections: api_models.TestCollections
+ self, cli_runner: CliRunner, mock_sync_apis: Mock, sample_test_collections: api_models.TestCollections
) -> None:
"""Test --cluster flag shows detailed information."""
# Arrange
diff --git a/tests/test_ffmpeg_converter.py b/tests/test_ffmpeg_converter.py
new file mode 100644
index 0000000..3415d20
--- /dev/null
+++ b/tests/test_ffmpeg_converter.py
@@ -0,0 +1,252 @@
+#
+# Copyright (c) 2026 Project CHIP Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+"""Unit tests for FFmpegStreamConverter and FFmpegNotInstalledError."""
+
+import subprocess
+from unittest.mock import MagicMock, Mock, patch
+
+import ffmpeg
+import pytest
+
+from th_cli.th_utils.ffmpeg_converter import FFMPEG_NOT_INSTALLED_MSG, FFmpegNotInstalledError, FFmpegStreamConverter
+
+# ---------------------------------------------------------------------------
+# FFmpegNotInstalledError
+# ---------------------------------------------------------------------------
+
+
+@pytest.mark.unit
+class TestFFmpegNotInstalledError:
+ """Tests for FFmpegNotInstalledError exception class."""
+
+ def test_default_message(self):
+ """Default message contains the installation instructions constant."""
+ error = FFmpegNotInstalledError()
+ assert error.message == FFMPEG_NOT_INSTALLED_MSG
+ assert FFMPEG_NOT_INSTALLED_MSG in str(error)
+
+ def test_custom_message(self):
+ """Custom message is stored in both .message and str representation."""
+ custom = "custom error"
+ error = FFmpegNotInstalledError(custom)
+ assert error.message == custom
+ assert custom in str(error)
+
+ def test_is_runtime_error(self):
+ """FFmpegNotInstalledError is a subclass of RuntimeError."""
+ assert issubclass(FFmpegNotInstalledError, RuntimeError)
+
+
+# ---------------------------------------------------------------------------
+# FFmpegStreamConverter.check_ffmpeg_installed
+# ---------------------------------------------------------------------------
+
+
+@pytest.mark.unit
+class TestCheckFfmpegInstalled:
+ """Tests for FFmpegStreamConverter.check_ffmpeg_installed static method."""
+
+ def test_returns_false_when_ffmpeg_not_in_path(self):
+ """Returns (False, message) when shutil.which returns None."""
+ with patch("th_cli.th_utils.ffmpeg_converter.shutil.which", return_value=None):
+ installed, msg = FFmpegStreamConverter.check_ffmpeg_installed()
+
+ assert installed is False
+ assert msg == FFMPEG_NOT_INSTALLED_MSG
+
+ def test_returns_true_when_ffmpeg_found_and_runs(self):
+ """Returns (True, '') when ffmpeg is in PATH and runs successfully."""
+ with patch("th_cli.th_utils.ffmpeg_converter.shutil.which", return_value="/usr/bin/ffmpeg"):
+ mock_result = Mock()
+ mock_result.returncode = 0
+ mock_result.stdout = "ffmpeg version 6.0 Copyright...\nmore info"
+
+ with patch("th_cli.th_utils.ffmpeg_converter.subprocess.run", return_value=mock_result):
+ installed, msg = FFmpegStreamConverter.check_ffmpeg_installed()
+
+ assert installed is True
+ assert msg == ""
+
+ def test_returns_false_when_ffmpeg_command_fails(self):
+ """Returns (False, error) when ffmpeg exits with non-zero return code."""
+ with patch("th_cli.th_utils.ffmpeg_converter.shutil.which", return_value="/usr/bin/ffmpeg"):
+ mock_result = Mock()
+ mock_result.returncode = 1
+ mock_result.stdout = ""
+
+ with patch("th_cli.th_utils.ffmpeg_converter.subprocess.run", return_value=mock_result):
+ installed, msg = FFmpegStreamConverter.check_ffmpeg_installed()
+
+ assert installed is False
+ assert "failed to execute" in msg
+
+ def test_returns_false_on_timeout(self):
+ """Returns (False, timeout message) when subprocess times out."""
+ with patch("th_cli.th_utils.ffmpeg_converter.shutil.which", return_value="/usr/bin/ffmpeg"):
+ with patch(
+ "th_cli.th_utils.ffmpeg_converter.subprocess.run",
+ side_effect=subprocess.TimeoutExpired(cmd="ffmpeg", timeout=5),
+ ):
+ installed, msg = FFmpegStreamConverter.check_ffmpeg_installed()
+
+ assert installed is False
+ assert "timed out" in msg
+
+ def test_returns_false_on_unexpected_exception(self):
+ """Returns (False, error description) for any other exception."""
+ with patch("th_cli.th_utils.ffmpeg_converter.shutil.which", return_value="/usr/bin/ffmpeg"):
+ with patch(
+ "th_cli.th_utils.ffmpeg_converter.subprocess.run",
+ side_effect=OSError("Permission denied"),
+ ):
+ installed, msg = FFmpegStreamConverter.check_ffmpeg_installed()
+
+ assert installed is False
+ assert "Error checking FFmpeg" in msg
+
+
+# ---------------------------------------------------------------------------
+# FFmpegStreamConverter.start_conversion
+# ---------------------------------------------------------------------------
+
+
+@pytest.mark.unit
+class TestStartConversion:
+ """Tests for FFmpegStreamConverter.start_conversion."""
+
+ def test_raises_when_ffmpeg_not_installed(self):
+ """start_conversion raises FFmpegNotInstalledError if ffmpeg is absent."""
+ converter = FFmpegStreamConverter()
+
+ with patch.object(
+ FFmpegStreamConverter,
+ "check_ffmpeg_installed",
+ return_value=(False, FFMPEG_NOT_INSTALLED_MSG),
+ ):
+ with pytest.raises(FFmpegNotInstalledError):
+ converter.start_conversion()
+
+ def test_returns_false_on_ffmpeg_error(self):
+ """start_conversion returns False when ffmpeg-python raises ffmpeg.Error."""
+ converter = FFmpegStreamConverter()
+
+ with patch.object(FFmpegStreamConverter, "check_ffmpeg_installed", return_value=(True, "")):
+ with patch("th_cli.th_utils.ffmpeg_converter.ffmpeg.run_async", side_effect=ffmpeg.Error("err", "", b"")):
+ result = converter.start_conversion()
+
+ assert result is False
+
+ def test_returns_false_on_unexpected_error(self):
+ """start_conversion returns False on any unexpected exception."""
+ converter = FFmpegStreamConverter()
+
+ with patch.object(FFmpegStreamConverter, "check_ffmpeg_installed", return_value=(True, "")):
+ with patch(
+ "th_cli.th_utils.ffmpeg_converter.ffmpeg.run_async",
+ side_effect=RuntimeError("unexpected"),
+ ):
+ result = converter.start_conversion()
+
+ assert result is False
+
+
+# ---------------------------------------------------------------------------
+# FFmpegStreamConverter.feed_data / get_converted_data / stop
+# ---------------------------------------------------------------------------
+
+
+@pytest.mark.unit
+class TestFeedDataAndGetConvertedData:
+ """Tests for feed_data, get_converted_data, and stop methods."""
+
+ def test_feed_data_writes_to_process_stdin(self):
+ """feed_data writes bytes to ffmpeg_process.stdin."""
+ converter = FFmpegStreamConverter()
+ mock_process = MagicMock()
+ converter.ffmpeg_process = mock_process
+
+ converter.feed_data(b"\x00\x01\x02")
+
+ mock_process.stdin.write.assert_called_once_with(b"\x00\x01\x02")
+ mock_process.stdin.flush.assert_called_once()
+
+ def test_feed_data_does_nothing_when_no_process(self):
+ """feed_data silently skips when ffmpeg_process is None."""
+ converter = FFmpegStreamConverter()
+ converter.ffmpeg_process = None
+
+ # Should not raise
+ converter.feed_data(b"\x00\x01\x02")
+
+ def test_feed_data_handles_write_error_gracefully(self):
+ """feed_data logs error and does not propagate exceptions."""
+ converter = FFmpegStreamConverter()
+ mock_process = MagicMock()
+ mock_process.stdin.write.side_effect = OSError("broken pipe")
+ converter.ffmpeg_process = mock_process
+
+ # Should not raise
+ converter.feed_data(b"\xde\xad\xbe\xef")
+
+ def test_get_converted_data_returns_queued_item(self):
+ """get_converted_data returns item from output_queue."""
+ converter = FFmpegStreamConverter()
+ converter.output_queue.put_nowait(b"mp4data")
+
+ result = converter.get_converted_data(timeout=0.1)
+
+ assert result == b"mp4data"
+
+ def test_get_converted_data_returns_none_on_empty_queue(self):
+ """get_converted_data returns None when queue is empty after timeout."""
+ converter = FFmpegStreamConverter()
+
+ result = converter.get_converted_data(timeout=0.05)
+
+ assert result is None
+
+ def test_stop_terminates_process(self):
+ """stop calls terminate/wait on the ffmpeg process."""
+ converter = FFmpegStreamConverter()
+ mock_process = MagicMock()
+ converter.ffmpeg_process = mock_process
+
+ converter.stop()
+
+ mock_process.stdin.close.assert_called_once()
+ mock_process.terminate.assert_called_once()
+ mock_process.wait.assert_called_once()
+ assert converter.ffmpeg_process is None
+
+ def test_stop_does_nothing_when_no_process(self):
+ """stop silently handles the case where ffmpeg_process is None."""
+ converter = FFmpegStreamConverter()
+ converter.ffmpeg_process = None
+
+ # Should not raise
+ converter.stop()
+
+ def test_stop_handles_exception_gracefully(self):
+ """stop logs error and clears ffmpeg_process even when terminate raises."""
+ converter = FFmpegStreamConverter()
+ mock_process = MagicMock()
+ mock_process.terminate.side_effect = OSError("already dead")
+ converter.ffmpeg_process = mock_process
+
+ # Should not raise
+ converter.stop()
+
+ assert converter.ffmpeg_process is None
diff --git a/tests/test_project_commands.py b/tests/test_project_commands.py
index 5db4f23..f551640 100644
--- a/tests/test_project_commands.py
+++ b/tests/test_project_commands.py
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2025 Project CHIP Authors
+# Copyright (c) 2025-2026 Project CHIP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -20,7 +20,6 @@
import pytest
from click.testing import CliRunner
-from httpx import Headers
from th_cli.api_lib_autogen import models as api_models
from th_cli.api_lib_autogen.exceptions import UnexpectedResponse
@@ -33,25 +32,21 @@ class TestCreateProjectCommand:
"""Test cases for the create_project command."""
def test_create_project_success_with_default_config(
- self,
- cli_runner: CliRunner,
- mock_sync_apis: Mock,
- mock_api_client: Mock,
- sample_project: api_models.Project
+ self, cli_runner: CliRunner, mock_sync_apis: Mock, mock_api_client: Mock, sample_project: api_models.Project
) -> None:
"""Test successful project creation with default configuration."""
# Arrange
default_config = {
"network": {
"wifi": {"ssid": "default", "password": "default"},
- "thread": {"operational_dataset_hex": "default"}
+ "thread": {"operational_dataset_hex": "default"},
},
"dut_config": {
"pairing_mode": "ble-wifi",
"setup_code": "20202021",
"discriminator": "3840",
- "trace_log": False
- }
+ "trace_log": False,
+ },
}
mock_sync_apis.projects_api.default_config_api_v1_projects_default_config_get.return_value = default_config
mock_sync_apis.projects_api.create_project_api_v1_projects__post.return_value = sample_project
@@ -69,25 +64,21 @@ def test_create_project_success_with_default_config(
mock_api_client.close.assert_called_once()
def test_create_project_success_with_custom_config(
- self,
- cli_runner: CliRunner,
- mock_sync_apis: Mock,
- sample_project: api_models.Project,
- mock_project_config: Path
+ self, cli_runner: CliRunner, mock_sync_apis: Mock, sample_project: api_models.Project, mock_project_config: Path
) -> None:
"""Test successful project creation with custom configuration file."""
# Arrange
default_config = {
"network": {
"wifi": {"ssid": "default", "password": "default"},
- "thread": {"operational_dataset_hex": "default"}
+ "thread": {"operational_dataset_hex": "default"},
},
"dut_config": {
"pairing_mode": "ble-wifi",
"setup_code": "20202021",
"discriminator": "3840",
- "trace_log": False
- }
+ "trace_log": False,
+ },
}
mock_sync_apis.projects_api.default_config_api_v1_projects_default_config_get.return_value = default_config
mock_sync_apis.projects_api.create_project_api_v1_projects__post.return_value = sample_project
@@ -112,14 +103,14 @@ def test_create_project_config_file_not_found(
default_config = {
"network": {
"wifi": {"ssid": "default", "password": "default"},
- "thread": {"operational_dataset_hex": "default"}
+ "thread": {"operational_dataset_hex": "default"},
},
"dut_config": {
"pairing_mode": "ble-wifi",
"setup_code": "20202021",
"discriminator": "3840",
- "trace_log": False
- }
+ "trace_log": False,
+ },
}
mock_sync_apis.projects_api.default_config_api_v1_projects_default_config_get.return_value = default_config
@@ -132,10 +123,7 @@ def test_create_project_config_file_not_found(
assert "File not found: nonexistent.json" in result.output
def test_create_project_invalid_json_config(
- self,
- cli_runner: CliRunner,
- mock_sync_apis: Mock,
- temp_dir: Path
+ self, cli_runner: CliRunner, mock_sync_apis: Mock, temp_dir: Path
) -> None:
"""Test project creation with invalid JSON in config file."""
# Arrange
@@ -145,14 +133,14 @@ def test_create_project_invalid_json_config(
default_config = {
"network": {
"wifi": {"ssid": "default", "password": "default"},
- "thread": {"operational_dataset_hex": "default"}
+ "thread": {"operational_dataset_hex": "default"},
},
"dut_config": {
"pairing_mode": "ble-wifi",
"setup_code": "20202021",
"discriminator": "3840",
- "trace_log": False
- }
+ "trace_log": False,
+ },
}
mock_sync_apis.projects_api.default_config_api_v1_projects_default_config_get.return_value = default_config
@@ -176,14 +164,14 @@ def test_create_project_api_error(
default_config = {
"network": {
"wifi": {"ssid": "default", "password": "default"},
- "thread": {"operational_dataset_hex": "default"}
+ "thread": {"operational_dataset_hex": "default"},
},
"dut_config": {
"pairing_mode": "ble-wifi",
"setup_code": "20202021",
"discriminator": "3840",
- "trace_log": False
- }
+ "trace_log": False,
+ },
}
mock_sync_apis.projects_api.default_config_api_v1_projects_default_config_get.return_value = default_config
@@ -308,10 +296,7 @@ class TestListProjectsCommand:
"""Test cases for the list_projects command."""
def test_list_projects_success_all_projects(
- self,
- cli_runner: CliRunner,
- mock_sync_apis: Mock,
- sample_projects: list[api_models.Project]
+ self, cli_runner: CliRunner, mock_sync_apis: Mock, sample_projects: list[api_models.Project]
) -> None:
"""Test successful listing of all projects."""
# Arrange
@@ -331,10 +316,7 @@ def test_list_projects_success_all_projects(
assert sample_project.name in result.output
def test_list_projects_success_specific_project(
- self,
- cli_runner: CliRunner,
- mock_sync_apis: Mock,
- sample_project: api_models.Project
+ self, cli_runner: CliRunner, mock_sync_apis: Mock, sample_project: api_models.Project
) -> None:
"""Test successful listing of a specific project by ID."""
# Arrange
@@ -351,10 +333,7 @@ def test_list_projects_success_specific_project(
mock_sync_apis.projects_api.read_project_api_v1_projects__id__get.assert_called_once_with(id=1)
def test_list_projects_json_output(
- self,
- cli_runner: CliRunner,
- mock_sync_apis: Mock,
- sample_projects: list[api_models.Project]
+ self, cli_runner: CliRunner, mock_sync_apis: Mock, sample_projects: list[api_models.Project]
) -> None:
"""Test listing projects with JSON output."""
# Arrange
@@ -371,10 +350,7 @@ def test_list_projects_json_output(
assert '"name":' in result.output
def test_list_projects_with_pagination(
- self,
- cli_runner: CliRunner,
- mock_sync_apis: Mock,
- sample_projects: list[api_models.Project]
+ self, cli_runner: CliRunner, mock_sync_apis: Mock, sample_projects: list[api_models.Project]
) -> None:
"""Test listing projects with pagination parameters."""
# Arrange
@@ -391,10 +367,7 @@ def test_list_projects_with_pagination(
)
def test_list_projects_archived(
- self,
- cli_runner: CliRunner,
- mock_sync_apis: Mock,
- sample_projects: list[api_models.Project]
+ self, cli_runner: CliRunner, mock_sync_apis: Mock, sample_projects: list[api_models.Project]
) -> None:
"""Test listing archived projects."""
# Arrange
@@ -469,11 +442,7 @@ class TestUpdateProjectCommand:
"""Test cases for the update_project command."""
def test_update_project_success(
- self,
- cli_runner: CliRunner,
- mock_sync_apis: Mock,
- sample_project: api_models.Project,
- mock_config: Path
+ self, cli_runner: CliRunner, mock_sync_apis: Mock, sample_project: api_models.Project, mock_config: Path
) -> None:
"""Test successful project update."""
# Arrange
@@ -505,10 +474,7 @@ def test_update_project_config_file_not_found(
assert "File not found: nonexistent.json" in result.output
def test_update_project_invalid_json_config(
- self,
- cli_runner: CliRunner,
- mock_sync_apis: Mock,
- temp_dir: Path
+ self, cli_runner: CliRunner, mock_sync_apis: Mock, temp_dir: Path
) -> None:
"""Test project update with invalid JSON in config file."""
# Arrange
@@ -524,11 +490,7 @@ def test_update_project_invalid_json_config(
assert "Error: Failed to parse JSON parameter" in result.output
def test_update_project_api_error(
- self,
- cli_runner: CliRunner,
- mock_sync_apis: Mock,
- mock_config: Path,
- sample_project: api_models.Project
+ self, cli_runner: CliRunner, mock_sync_apis: Mock, mock_config: Path, sample_project: api_models.Project
) -> None:
"""Test project update with API error."""
# Arrange
diff --git a/tests/test_run/camera/test_camera_http_server.py b/tests/test_run/camera/test_camera_http_server.py
index c539c27..acaab32 100644
--- a/tests/test_run/camera/test_camera_http_server.py
+++ b/tests/test_run/camera/test_camera_http_server.py
@@ -15,148 +15,378 @@
#
"""Unit tests for camera_http_server module."""
+import json
import queue
-from unittest.mock import Mock, patch
+from io import BytesIO
+from unittest.mock import MagicMock, Mock, patch
import pytest
from th_cli.test_run.camera.camera_http_server import CameraHTTPServer, VideoStreamingHandler
+# ---------------------------------------------------------------------------
+# Helpers
+# ---------------------------------------------------------------------------
+
+
+def _make_handler(path="/", method="GET", headers=None, body=b"", server_attrs=None):
+ """Build a VideoStreamingHandler instance without a real socket.
+
+ Bypasses __init__ entirely (which would try to parse an HTTP request)
+ and injects the attributes we care about manually.
+ """
+ handler = VideoStreamingHandler.__new__(VideoStreamingHandler)
+
+ # Minimal mock request / client address
+ handler.path = path
+ handler.command = method
+
+ # Mock headers as a dict-like object
+ mock_headers = MagicMock()
+ mock_headers.__contains__ = lambda self, key: key in (headers or {})
+ mock_headers.__getitem__ = lambda self, key: (headers or {})[key]
+ mock_headers.get = lambda key, default=None: (headers or {}).get(key, default)
+ handler.headers = mock_headers
+
+ # Readable body
+ handler.rfile = BytesIO(body)
+
+ # Writable output buffer
+ handler.wfile = BytesIO()
+
+ # Mock server with configurable attributes
+ mock_server = MagicMock()
+ for attr, value in (server_attrs or {}).items():
+ setattr(mock_server, attr, value)
+ handler.server = mock_server
+
+ # Stub out send_response / send_header / end_headers / send_error
+ # so we can inspect what was sent without a real socket
+ handler._response_code = None
+ handler._headers_sent = {}
+ handler._error_code = None
+
+ def _send_response(code, message=None):
+ handler._response_code = code
+
+ def _send_header(key, value):
+ handler._headers_sent[key] = value
+
+ def _end_headers():
+ pass
+
+ def _send_error(code, message=None):
+ handler._error_code = code
+
+ handler.send_response = _send_response
+ handler.send_header = _send_header
+ handler.end_headers = _end_headers
+ handler.send_error = _send_error
+
+ return handler
+
+
+# ---------------------------------------------------------------------------
+# CameraHTTPServer
+# ---------------------------------------------------------------------------
+
@pytest.mark.unit
-class TestCameraHTTPServer:
- """Tests for CameraHTTPServer class."""
+class TestCameraHTTPServerInit:
+ """Tests for CameraHTTPServer.__init__."""
- def test_init_default_port(self):
- """Test server initialization with default port."""
+ def test_default_port(self):
server = CameraHTTPServer()
assert server.port == 8999
+
+ def test_custom_port(self):
+ server = CameraHTTPServer(port=9001)
+ assert server.port == 9001
+
+ def test_server_and_thread_initially_none(self):
+ server = CameraHTTPServer()
assert server.server is None
assert server.server_thread is None
- def test_init_custom_port(self):
- """Test server initialization with custom port."""
- server = CameraHTTPServer(port=9000)
- assert server.port == 9000
- def test_start_server_success(self):
- """Test successful server start."""
- server = CameraHTTPServer(port=0) # Use port 0 for auto-assignment
- mp4_queue = queue.Queue()
- response_queue = queue.Queue()
- video_handler = Mock()
+@pytest.mark.unit
+class TestCameraHTTPServerStart:
+ """Tests for CameraHTTPServer.start."""
- with patch("th_cli.test_run.camera.camera_http_server.ThreadingHTTPServer") as mock_server_class:
- mock_server_instance = Mock()
- mock_server_class.return_value = mock_server_instance
+ def _start(self, server, **kwargs):
+ defaults = dict(
+ mp4_queue=queue.Queue(),
+ response_queue=queue.Queue(),
+ video_handler=Mock(),
+ prompt_options={"PASS": 1, "FAIL": 2},
+ prompt_text="Test prompt",
+ local_ip="192.168.1.1",
+ )
+ defaults.update(kwargs)
+ with patch("th_cli.test_run.camera.camera_http_server.ThreadingHTTPServer") as mock_cls:
+ mock_srv = Mock()
+ mock_cls.return_value = mock_srv
+ with patch("th_cli.test_run.camera.camera_http_server.threading.Thread") as mock_thread_cls:
+ mock_thread = Mock()
+ mock_thread_cls.return_value = mock_thread
+ server.start(**defaults)
+ return mock_srv, mock_thread
- with patch("threading.Thread") as mock_thread:
- mock_thread_instance = Mock()
- mock_thread.return_value = mock_thread_instance
+ def test_server_attributes_set_correctly(self):
+ server = CameraHTTPServer(port=0)
+ mp4_q = queue.Queue()
+ resp_q = queue.Queue()
+ mock_srv, _ = self._start(
+ server,
+ mp4_queue=mp4_q,
+ response_queue=resp_q,
+ prompt_options={"PASS": 1},
+ prompt_text="Hello",
+ local_ip="10.0.0.1",
+ )
+ assert mock_srv.mp4_queue is mp4_q
+ assert mock_srv.response_queue is resp_q
+ assert mock_srv.prompt_options == {"PASS": 1}
+ assert mock_srv.prompt_text == "Hello"
+ assert mock_srv.local_ip == "10.0.0.1"
+ assert mock_srv.allow_reuse_address is True
- server.start(
- mp4_queue=mp4_queue,
- response_queue=response_queue,
- video_handler=video_handler,
- prompt_options={"PASS": 1, "FAIL": 2},
- prompt_text="Test prompt",
- local_ip="192.168.1.100",
- )
+ def test_none_prompt_options_defaults_to_empty_dict(self):
+ server = CameraHTTPServer(port=0)
+ mock_srv, _ = self._start(server, prompt_options=None)
+ assert mock_srv.prompt_options == {}
- # Verify server creation and configuration
- mock_server_class.assert_called_once_with(("0.0.0.0", 0), VideoStreamingHandler)
- assert mock_server_instance.allow_reuse_address is True
- assert mock_server_instance.mp4_queue == mp4_queue
- assert mock_server_instance.response_queue == response_queue
- assert mock_server_instance.prompt_options == {"PASS": 1, "FAIL": 2}
- assert mock_server_instance.prompt_text == "Test prompt"
- assert mock_server_instance.local_ip == "192.168.1.100"
-
- # Verify thread creation and start
- mock_thread.assert_called_once()
- mock_thread_instance.start.assert_called_once()
-
- # Verify server and thread are stored
- assert server.server == mock_server_instance
- assert server.server_thread == mock_thread_instance
-
- def test_start_server_with_push_av(self):
- """Test server start with Push AV configuration."""
+ def test_none_local_ip_defaults_to_localhost(self):
server = CameraHTTPServer(port=0)
- mp4_queue = queue.Queue()
- response_queue = queue.Queue()
+ mock_srv, _ = self._start(server, local_ip=None)
+ assert mock_srv.local_ip == "localhost"
- with patch("th_cli.test_run.camera.camera_http_server.ThreadingHTTPServer") as mock_server_class:
- mock_server_instance = Mock()
- mock_server_class.return_value = mock_server_instance
+ def test_thread_is_started(self):
+ server = CameraHTTPServer(port=0)
+ _, mock_thread = self._start(server)
+ mock_thread.start.assert_called_once()
+
+ def test_server_and_thread_stored_on_instance(self):
+ server = CameraHTTPServer(port=0)
+ mock_srv, mock_thread = self._start(server)
+ assert server.server is mock_srv
+ assert server.server_thread is mock_thread
+
+ def test_push_av_flags_set(self):
+ server = CameraHTTPServer(port=0)
+ mock_srv, _ = self._start(
+ server,
+ is_push_av_verification=True,
+ push_av_server_url="https://device:1234",
+ )
+ assert mock_srv.is_push_av_verification is True
+ assert mock_srv.push_av_server_url == "https://device:1234"
- with patch("threading.Thread"):
+ def test_start_raises_on_server_creation_failure(self):
+ server = CameraHTTPServer(port=0)
+ with patch(
+ "th_cli.test_run.camera.camera_http_server.ThreadingHTTPServer",
+ side_effect=OSError("address in use"),
+ ):
+ with pytest.raises(OSError):
server.start(
- mp4_queue=mp4_queue,
- response_queue=response_queue,
+ mp4_queue=queue.Queue(),
+ response_queue=queue.Queue(),
video_handler=None,
- prompt_options={"PASS": 1, "FAIL": 2},
- prompt_text="Push AV verification",
- is_push_av_verification=True,
- push_av_server_url="https://localhost:1234",
- local_ip="192.168.1.100",
)
- assert mock_server_instance.is_push_av_verification is True
- assert mock_server_instance.push_av_server_url == "https://localhost:1234"
- def test_stop_server(self):
- """Test server stop."""
- server = CameraHTTPServer()
+@pytest.mark.unit
+class TestCameraHTTPServerStop:
+ """Tests for CameraHTTPServer.stop."""
- # Mock server and thread
- mock_server_instance = Mock()
- mock_thread_instance = Mock()
- server.server = mock_server_instance
- server.server_thread = mock_thread_instance
+ def test_stop_calls_shutdown_and_clears_references(self):
+ server = CameraHTTPServer()
+ mock_srv = Mock()
+ server.server = mock_srv
+ server.server_thread = Mock()
server.stop()
- # Verify shutdown called and cleanup
- mock_server_instance.shutdown.assert_called_once()
+ mock_srv.shutdown.assert_called_once()
assert server.server is None
assert server.server_thread is None
- def test_stop_server_when_not_running(self):
- """Test stopping server when it's not running."""
+ def test_stop_is_noop_when_not_started(self):
server = CameraHTTPServer()
+ server.stop() # must not raise
+ assert server.server is None
- # Should not raise an exception
- server.stop()
+ def test_stop_clears_state_even_when_shutdown_raises(self):
+ server = CameraHTTPServer()
+ mock_srv = Mock()
+ mock_srv.shutdown.side_effect = Exception("already dead")
+ server.server = mock_srv
+ server.server_thread = Mock()
+
+ server.stop() # must not propagate
assert server.server is None
+ assert server.server_thread is None
+
+
+# ---------------------------------------------------------------------------
+# VideoStreamingHandler routing
+# ---------------------------------------------------------------------------
@pytest.mark.unit
-class TestVideoStreamingHandler:
- """Tests for VideoStreamingHandler class."""
-
- def test_handler_class_exists(self):
- """Test that VideoStreamingHandler class exists and can be imported."""
- # Simple test to verify the class exists
- assert VideoStreamingHandler is not None
- assert hasattr(VideoStreamingHandler, "do_GET")
- assert hasattr(VideoStreamingHandler, "do_POST")
- assert hasattr(VideoStreamingHandler, "do_OPTIONS")
-
- def test_handler_has_required_methods(self):
- """Test that handler has all required HTTP methods."""
- required_methods = [
- "do_GET",
- "do_POST",
- "do_OPTIONS",
- "stream_live_video",
- "handle_response",
- "handle_streams_api",
- "handle_stream_proxy",
- "handle_simple_proxy",
- "serve_player",
- ]
-
- for method in required_methods:
- assert hasattr(VideoStreamingHandler, method), f"Missing method: {method}"
+class TestVideoStreamingHandlerRouting:
+ """Tests for do_GET and do_POST routing in VideoStreamingHandler."""
+
+ def test_do_get_root_calls_serve_player(self):
+ handler = _make_handler(path="/")
+ with patch.object(handler, "serve_player") as mock_serve:
+ handler.do_GET()
+ mock_serve.assert_called_once()
+
+ def test_do_get_video_live_calls_stream_live_video(self):
+ handler = _make_handler(path="/video_live.mp4")
+ with patch.object(handler, "stream_live_video") as mock_stream:
+ handler.do_GET()
+ mock_stream.assert_called_once()
+
+ def test_do_get_api_streams_calls_handle_streams_api(self):
+ handler = _make_handler(path="/api/streams")
+ with patch.object(handler, "handle_streams_api") as mock_streams:
+ handler.do_GET()
+ mock_streams.assert_called_once()
+
+ def test_do_get_api_stream_proxy_calls_handle_stream_proxy(self):
+ handler = _make_handler(path="/api/stream_proxy?url=http://x/y")
+ with patch.object(handler, "handle_stream_proxy") as mock_proxy:
+ handler.do_GET()
+ mock_proxy.assert_called_once()
+
+ def test_do_get_proxy_path_calls_handle_simple_proxy(self):
+ handler = _make_handler(path="/proxy/abc123")
+ with patch.object(handler, "handle_simple_proxy") as mock_simple:
+ handler.do_GET()
+ mock_simple.assert_called_once()
+
+ def test_do_get_unknown_path_sends_404(self):
+ handler = _make_handler(path="/not/a/real/path")
+ handler.do_GET()
+ assert handler._error_code == 404
+
+ def test_do_get_strips_query_string_for_routing(self):
+ """Query parameters must not break routing to serve_player."""
+ handler = _make_handler(path="/?nocache=1234")
+ with patch.object(handler, "serve_player") as mock_serve:
+ handler.do_GET()
+ mock_serve.assert_called_once()
+
+ def test_do_post_submit_response_calls_handle_response(self):
+ handler = _make_handler(path="/submit_response", method="POST")
+ with patch.object(handler, "handle_response") as mock_resp:
+ handler.do_POST()
+ mock_resp.assert_called_once()
+
+ def test_do_post_unknown_path_sends_404(self):
+ handler = _make_handler(path="/unknown", method="POST")
+ handler.do_POST()
+ assert handler._error_code == 404
+
+
+# ---------------------------------------------------------------------------
+# VideoStreamingHandler.handle_response
+# ---------------------------------------------------------------------------
+
+
+@pytest.mark.unit
+class TestHandleResponse:
+ """Tests for VideoStreamingHandler.handle_response — the only method with
+ substantial custom logic that can be unit-tested without a live network."""
+
+ def _make(self, body: bytes, headers: dict = None, response_queue=None):
+ content_length = str(len(body))
+ hdrs = {"Content-Length": content_length}
+ if headers:
+ hdrs.update(headers)
+ srv_attrs = {}
+ if response_queue is not None:
+ srv_attrs["response_queue"] = response_queue
+ handler = _make_handler(
+ path="/submit_response",
+ method="POST",
+ headers=hdrs,
+ body=body,
+ server_attrs=srv_attrs,
+ )
+ return handler
+
+ def test_valid_response_queued_and_200_returned(self):
+ resp_q = queue.Queue()
+ body = json.dumps({"response": 1}).encode()
+ handler = self._make(body, response_queue=resp_q)
+
+ handler.handle_response()
+
+ assert handler._response_code == 200
+ assert resp_q.get_nowait() == 1
+
+ def test_missing_content_length_header_returns_400(self):
+ handler = _make_handler(
+ path="/submit_response",
+ method="POST",
+ headers={}, # no Content-Length
+ body=b"",
+ )
+ handler.handle_response()
+ assert handler._error_code == 400
+
+ def test_invalid_json_returns_400(self):
+ body = b"not json at all"
+ handler = self._make(body, response_queue=queue.Queue())
+ handler.handle_response()
+ assert handler._response_code == 400
+ output = json.loads(handler.wfile.getvalue())
+ assert "Invalid JSON" in output["error"]
+
+ def test_missing_response_key_returns_400(self):
+ body = json.dumps({"other_key": 42}).encode()
+ handler = self._make(body, response_queue=queue.Queue())
+ handler.handle_response()
+ assert handler._response_code == 400
+
+ def test_non_integer_response_value_returns_400(self):
+ body = json.dumps({"response": "not-a-number"}).encode()
+ handler = self._make(body, response_queue=queue.Queue())
+ handler.handle_response()
+ assert handler._response_code == 400
+
+ def test_no_response_queue_on_server_returns_500(self):
+ body = json.dumps({"response": 1}).encode()
+ # server_attrs has no response_queue key → getattr returns None
+ handler = _make_handler(
+ path="/submit_response",
+ method="POST",
+ headers={"Content-Length": str(len(body))},
+ body=body,
+ server_attrs={},
+ )
+ # Make sure getattr(server, "response_queue", None) returns None
+ handler.server.response_queue = None
+ handler.handle_response()
+ assert handler._error_code == 500
+
+ def test_full_response_queue_returns_500(self):
+ full_q = queue.Queue(maxsize=1)
+ full_q.put_nowait(99) # fill it
+ body = json.dumps({"response": 1}).encode()
+ handler = self._make(body, response_queue=full_q)
+ handler.handle_response()
+ assert handler._error_code == 500
+
+ def test_success_response_body_is_valid_json(self):
+ resp_q = queue.Queue()
+ body = json.dumps({"response": 2}).encode()
+ handler = self._make(body, response_queue=resp_q)
+ handler.handle_response()
+ output = json.loads(handler.wfile.getvalue())
+ assert output == {"status": "success"}
diff --git a/tests/test_run/camera/test_camera_stream_handler.py b/tests/test_run/camera/test_camera_stream_handler.py
new file mode 100644
index 0000000..50e70cf
--- /dev/null
+++ b/tests/test_run/camera/test_camera_stream_handler.py
@@ -0,0 +1,133 @@
+#
+# Copyright (c) 2026 Project CHIP Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+"""Unit tests for CameraStreamHandler."""
+
+import asyncio
+import queue
+from pathlib import Path
+from unittest.mock import patch
+
+import pytest
+
+from th_cli.test_run.camera.camera_stream_handler import CameraStreamHandler
+
+# ---------------------------------------------------------------------------
+# Helpers
+# ---------------------------------------------------------------------------
+
+
+def _make_handler(output_dir=None) -> CameraStreamHandler:
+ """Construct a CameraStreamHandler with mocked sub-components."""
+ with patch("th_cli.test_run.camera.camera_stream_handler.VideoWebSocketManager"):
+ with patch("th_cli.test_run.camera.camera_stream_handler.CameraHTTPServer"):
+ if output_dir:
+ return CameraStreamHandler(output_dir=str(output_dir))
+ with patch.object(Path, "mkdir"):
+ return CameraStreamHandler()
+
+
+# ---------------------------------------------------------------------------
+# __init__
+# ---------------------------------------------------------------------------
+
+
+@pytest.mark.unit
+class TestCameraStreamHandlerInit:
+ def test_response_queue_is_queue(self):
+ h = _make_handler()
+ assert isinstance(h.response_queue, queue.Queue)
+
+ def test_mp4_queue_is_queue(self):
+ h = _make_handler()
+ assert isinstance(h.mp4_queue, queue.Queue)
+
+ def test_prompt_options_initially_empty(self):
+ h = _make_handler()
+ assert h.prompt_options == {}
+
+ def test_prompt_text_initially_empty_string(self):
+ h = _make_handler()
+ assert h.prompt_text == ""
+
+ def test_initialization_error_initially_none(self):
+ h = _make_handler()
+ assert h.initialization_error is None
+
+
+# ---------------------------------------------------------------------------
+# set_prompt_data
+# ---------------------------------------------------------------------------
+
+
+@pytest.mark.unit
+class TestSetPromptData:
+ def test_stores_prompt_text_and_options(self):
+ h = _make_handler()
+ h.set_prompt_data("Is the video clear?", {"PASS": 1, "FAIL": 2})
+ assert h.prompt_text == "Is the video clear?"
+ assert h.prompt_options == {"PASS": 1, "FAIL": 2}
+
+ def test_overwrites_previous_values(self):
+ h = _make_handler()
+ h.set_prompt_data("First", {"A": 1})
+ h.set_prompt_data("Second", {"B": 2})
+ assert h.prompt_text == "Second"
+ assert h.prompt_options == {"B": 2}
+
+ def test_empty_options_stored(self):
+ h = _make_handler()
+ h.set_prompt_data("No options", {})
+ assert h.prompt_options == {}
+
+
+# ---------------------------------------------------------------------------
+# wait_for_user_response
+# ---------------------------------------------------------------------------
+
+
+@pytest.mark.unit
+class TestWaitForUserResponse:
+ @pytest.mark.asyncio
+ async def test_returns_pre_queued_response_immediately(self):
+ h = _make_handler()
+ h.response_queue.put_nowait(1)
+ result = await h.wait_for_user_response(timeout=1.0)
+ assert result == 1
+
+ @pytest.mark.asyncio
+ async def test_returns_none_on_timeout(self):
+ h = _make_handler()
+ result = await h.wait_for_user_response(timeout=0.15)
+ assert result is None
+
+ @pytest.mark.asyncio
+ async def test_returns_response_enqueued_during_wait(self):
+ h = _make_handler()
+
+ async def enqueue_later():
+ await asyncio.sleep(0.05)
+ h.response_queue.put_nowait(2)
+
+ results = await asyncio.gather(enqueue_later(), h.wait_for_user_response(timeout=1.0))
+ assert results[1] == 2
+
+ @pytest.mark.asyncio
+ async def test_returns_first_response_when_multiple_queued(self):
+ h = _make_handler()
+ h.response_queue.put_nowait(10)
+ h.response_queue.put_nowait(20)
+ result = await h.wait_for_user_response(timeout=1.0)
+ assert result == 10
diff --git a/tests/test_run/camera/test_image_handler.py b/tests/test_run/camera/test_image_handler.py
new file mode 100644
index 0000000..df90d5a
--- /dev/null
+++ b/tests/test_run/camera/test_image_handler.py
@@ -0,0 +1,361 @@
+#
+# Copyright (c) 2026 Project CHIP Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+"""Unit tests for image_handler module."""
+
+import asyncio
+import json
+import queue
+from io import BytesIO
+from unittest.mock import MagicMock, Mock, patch
+
+import pytest
+
+from th_cli.test_run.camera.image_handler import ImageVerificationHandler, ImageVerificationHTTPHandler
+
+# ---------------------------------------------------------------------------
+# Helpers
+# ---------------------------------------------------------------------------
+
+
+def _make_http_handler(path="/", method="GET", headers=None, body=b"", server_attrs=None):
+ """Construct an ImageVerificationHTTPHandler without a live socket."""
+ handler = ImageVerificationHTTPHandler.__new__(ImageVerificationHTTPHandler)
+ handler.path = path
+ handler.command = method
+
+ mock_headers = MagicMock()
+ mock_headers.__contains__ = lambda self, key: key in (headers or {})
+ mock_headers.__getitem__ = lambda self, key: (headers or {})[key]
+ mock_headers.get = lambda key, default=None: (headers or {}).get(key, default)
+ handler.headers = mock_headers
+
+ handler.rfile = BytesIO(body)
+ handler.wfile = BytesIO()
+
+ mock_server = MagicMock()
+ for attr, value in (server_attrs or {}).items():
+ setattr(mock_server, attr, value)
+ handler.server = mock_server
+
+ handler._response_code = None
+ handler._error_code = None
+ handler._headers_sent = {}
+
+ def _send_response(code, message=None):
+ handler._response_code = code
+
+ def _send_header(key, value):
+ handler._headers_sent[key] = value
+
+ def _end_headers():
+ pass
+
+ def _send_error(code, message=None):
+ handler._error_code = code
+
+ handler.send_response = _send_response
+ handler.send_header = _send_header
+ handler.end_headers = _end_headers
+ handler.send_error = _send_error
+
+ return handler
+
+
+# ---------------------------------------------------------------------------
+# ImageVerificationHTTPHandler routing
+# ---------------------------------------------------------------------------
+
+
+@pytest.mark.unit
+class TestImageVerificationHTTPHandlerRouting:
+ """Tests for do_GET and do_POST routing."""
+
+ def test_get_root_calls_serve_page(self):
+ handler = _make_http_handler(path="/")
+ with patch.object(handler, "_serve_page") as mock_serve:
+ handler.do_GET()
+ mock_serve.assert_called_once()
+
+ def test_get_image_calls_serve_image(self):
+ handler = _make_http_handler(path="/image")
+ with patch.object(handler, "_serve_image") as mock_img:
+ handler.do_GET()
+ mock_img.assert_called_once()
+
+ def test_get_unknown_path_sends_404(self):
+ handler = _make_http_handler(path="/unknown")
+ handler.do_GET()
+ assert handler._error_code == 404
+
+ def test_post_submit_response_calls_handle_response(self):
+ handler = _make_http_handler(path="/submit_response", method="POST")
+ with patch.object(handler, "_handle_response") as mock_resp:
+ handler.do_POST()
+ mock_resp.assert_called_once()
+
+ def test_post_unknown_path_sends_404(self):
+ handler = _make_http_handler(path="/other", method="POST")
+ handler.do_POST()
+ assert handler._error_code == 404
+
+
+# ---------------------------------------------------------------------------
+# ImageVerificationHTTPHandler._serve_image
+# ---------------------------------------------------------------------------
+
+
+@pytest.mark.unit
+class TestServeImage:
+ """Tests for ImageVerificationHTTPHandler._serve_image."""
+
+ def test_serves_image_data_with_200(self):
+ image_bytes = b"\xff\xd8\xff\xe0" + b"\x00" * 100
+ handler = _make_http_handler(server_attrs={"image_data": image_bytes})
+ handler._serve_image()
+ assert handler._response_code == 200
+ assert handler._headers_sent.get("Content-Type") == "image/jpeg"
+ assert handler._headers_sent.get("Content-Length") == str(len(image_bytes))
+ assert handler.wfile.getvalue() == image_bytes
+
+ def test_returns_404_when_no_image_data(self):
+ handler = _make_http_handler(server_attrs={"image_data": None})
+ handler._serve_image()
+ assert handler._error_code == 404
+
+ def test_returns_404_when_image_data_empty(self):
+ handler = _make_http_handler(server_attrs={"image_data": b""})
+ handler._serve_image()
+ assert handler._error_code == 404
+
+
+# ---------------------------------------------------------------------------
+# ImageVerificationHTTPHandler._handle_response
+# ---------------------------------------------------------------------------
+
+
+@pytest.mark.unit
+class TestHTTPHandlerHandleResponse:
+ """Tests for ImageVerificationHTTPHandler._handle_response."""
+
+ def _make(self, body: bytes, response_queue=None):
+ srv_attrs = {}
+ if response_queue is not None:
+ srv_attrs["response_queue"] = response_queue
+ hdrs = {"Content-Length": str(len(body))}
+ return _make_http_handler(
+ path="/submit_response",
+ method="POST",
+ headers=hdrs,
+ body=body,
+ server_attrs=srv_attrs,
+ )
+
+ def test_valid_response_queued_and_200_returned(self):
+ resp_q = queue.Queue()
+ handler = self._make(json.dumps({"response": 1}).encode(), response_queue=resp_q)
+ handler._handle_response()
+ assert handler._response_code == 200
+ assert resp_q.get_nowait() == 1
+
+ def test_success_response_body_is_valid_json(self):
+ resp_q = queue.Queue()
+ handler = self._make(json.dumps({"response": 2}).encode(), response_queue=resp_q)
+ handler._handle_response()
+ output = json.loads(handler.wfile.getvalue())
+ assert output == {"status": "success"}
+
+ def test_invalid_json_returns_400(self):
+ handler = self._make(b"not-json", response_queue=queue.Queue())
+ handler._handle_response()
+ assert handler._response_code == 400
+
+ def test_missing_response_key_returns_400(self):
+ body = json.dumps({"wrong_key": 1}).encode()
+ handler = self._make(body, response_queue=queue.Queue())
+ handler._handle_response()
+ assert handler._response_code == 400
+
+ def test_non_integer_response_value_returns_400(self):
+ body = json.dumps({"response": "abc"}).encode()
+ handler = self._make(body, response_queue=queue.Queue())
+ handler._handle_response()
+ assert handler._response_code == 400
+
+ def test_no_response_queue_still_returns_200(self):
+ """When response_queue is absent the value is silently dropped, not an error."""
+ handler = _make_http_handler(
+ path="/submit_response",
+ method="POST",
+ headers={"Content-Length": "16"},
+ body=json.dumps({"response": 1}).encode(),
+ )
+ handler.server.response_queue = None
+ handler._handle_response()
+ assert handler._response_code == 200
+
+
+# ---------------------------------------------------------------------------
+# ImageVerificationHandler.__init__ and set_prompt_data
+# ---------------------------------------------------------------------------
+
+
+@pytest.mark.unit
+class TestImageVerificationHandlerInit:
+ def test_default_port(self):
+ assert ImageVerificationHandler().port == 8999
+
+ def test_custom_port(self):
+ assert ImageVerificationHandler(port=9090).port == 9090
+
+ def test_http_server_and_thread_initially_none(self):
+ h = ImageVerificationHandler()
+ assert h.http_server is None
+ assert h._server_thread is None
+
+ def test_response_queue_is_queue(self):
+ assert isinstance(ImageVerificationHandler()._response_queue, queue.Queue)
+
+
+@pytest.mark.unit
+class TestSetPromptData:
+ def test_stores_all_three_fields(self):
+ h = ImageVerificationHandler()
+ h.set_prompt_data("Verify", {"PASS": 1}, b"\xff\xd8")
+ assert h._prompt_text == "Verify"
+ assert h._options == {"PASS": 1}
+ assert h._image_data == b"\xff\xd8"
+
+
+# ---------------------------------------------------------------------------
+# ImageVerificationHandler.start_image_server
+# ---------------------------------------------------------------------------
+
+
+@pytest.mark.unit
+class TestStartImageServer:
+
+ @pytest.mark.asyncio
+ async def test_creates_server_with_correct_attributes(self):
+ h = ImageVerificationHandler(port=0)
+ h.set_prompt_data("Prompt", {"PASS": 1}, b"\xff\xd8")
+
+ with patch("th_cli.test_run.camera.image_handler.ThreadingHTTPServer") as mock_cls:
+ mock_srv = MagicMock()
+ mock_cls.return_value = mock_srv
+ with patch("th_cli.test_run.camera.image_handler.threading.Thread") as mock_thread_cls:
+ mock_thread = MagicMock()
+ mock_thread_cls.return_value = mock_thread
+ await h.start_image_server()
+
+ assert mock_srv.allow_reuse_address is True
+ assert mock_srv.image_data == b"\xff\xd8"
+ assert mock_srv.prompt_text == "Prompt"
+ assert mock_srv.prompt_options == {"PASS": 1}
+ assert mock_srv.response_queue is h._response_queue
+ mock_thread.start.assert_called_once()
+ assert h.http_server is mock_srv
+ assert h._server_thread is mock_thread
+
+ @pytest.mark.asyncio
+ async def test_default_prompt_text_when_not_set(self):
+ h = ImageVerificationHandler(port=0)
+ h._image_data = b"\xff\xd8" # skip set_prompt_data
+
+ with patch("th_cli.test_run.camera.image_handler.ThreadingHTTPServer") as mock_cls:
+ mock_srv = MagicMock()
+ mock_cls.return_value = mock_srv
+ with patch("th_cli.test_run.camera.image_handler.threading.Thread"):
+ await h.start_image_server()
+
+ assert mock_srv.prompt_text == "Verify the snapshot image"
+
+ @pytest.mark.asyncio
+ async def test_default_prompt_options_when_not_set(self):
+ h = ImageVerificationHandler(port=0)
+ h._image_data = b"\xff\xd8"
+
+ with patch("th_cli.test_run.camera.image_handler.ThreadingHTTPServer") as mock_cls:
+ mock_srv = MagicMock()
+ mock_cls.return_value = mock_srv
+ with patch("th_cli.test_run.camera.image_handler.threading.Thread"):
+ await h.start_image_server()
+
+ assert mock_srv.prompt_options == {}
+
+
+# ---------------------------------------------------------------------------
+# ImageVerificationHandler.wait_for_user_response
+# ---------------------------------------------------------------------------
+
+
+@pytest.mark.unit
+class TestWaitForUserResponse:
+
+ @pytest.mark.asyncio
+ async def test_returns_pre_queued_response_immediately(self):
+ h = ImageVerificationHandler()
+ h._response_queue.put_nowait(1)
+ assert await h.wait_for_user_response(timeout=1.0) == 1
+
+ @pytest.mark.asyncio
+ async def test_returns_none_on_timeout(self):
+ h = ImageVerificationHandler()
+ assert await h.wait_for_user_response(timeout=0.15) is None
+
+ @pytest.mark.asyncio
+ async def test_returns_response_enqueued_during_poll(self):
+ h = ImageVerificationHandler()
+
+ async def enqueue_later():
+ await asyncio.sleep(0.05)
+ h._response_queue.put_nowait(2)
+
+ results = await asyncio.gather(enqueue_later(), h.wait_for_user_response(timeout=1.0))
+ assert results[1] == 2
+
+
+# ---------------------------------------------------------------------------
+# ImageVerificationHandler.stop_image_server
+# ---------------------------------------------------------------------------
+
+
+@pytest.mark.unit
+class TestStopImageServer:
+
+ def test_calls_shutdown_and_clears_state(self):
+ h = ImageVerificationHandler()
+ mock_srv = MagicMock()
+ h.http_server = mock_srv
+ h._server_thread = Mock()
+ h.stop_image_server()
+ mock_srv.shutdown.assert_called_once()
+ assert h.http_server is None
+ assert h._server_thread is None
+
+ def test_noop_when_not_started(self):
+ h = ImageVerificationHandler()
+ h.stop_image_server() # must not raise
+ assert h.http_server is None
+
+ def test_clears_state_even_when_shutdown_raises(self):
+ h = ImageVerificationHandler()
+ mock_srv = MagicMock()
+ mock_srv.shutdown.side_effect = Exception("already dead")
+ h.http_server = mock_srv
+ h._server_thread = Mock()
+ h.stop_image_server()
+ assert h.http_server is None
+ assert h._server_thread is None
diff --git a/tests/test_run/test_prompt_manager.py b/tests/test_run/test_prompt_manager.py
index f6ee172..e3249c5 100644
--- a/tests/test_run/test_prompt_manager.py
+++ b/tests/test_run/test_prompt_manager.py
@@ -15,8 +15,9 @@
#
"""Unit tests for prompt_manager module."""
-import asyncio
import json
+import os
+import tempfile
from unittest.mock import AsyncMock, MagicMock, Mock, patch
import pytest
@@ -26,77 +27,78 @@
from th_cli.test_run.socket_schemas import (
ImageVerificationPromptRequest,
MessagePromptRequest,
- OptionsSelectPromptRequest,
+ PromptRequest,
PushAVStreamVerificationRequest,
- StreamVerificationPromptRequest,
TextInputPromptRequest,
UserResponseStatusEnum,
)
+# ---------------------------------------------------------------------------
+# _get_local_ip
+# ---------------------------------------------------------------------------
+
@pytest.mark.unit
class TestGetLocalIp:
- """Tests for _get_local_ip function."""
-
- def test_get_local_ip_success(self):
- """Test successful local IP retrieval."""
- with patch("socket.socket") as mock_socket:
+ def test_returns_detected_ip(self):
+ with patch("socket.socket") as mock_socket_cls:
mock_sock = MagicMock()
mock_sock.getsockname.return_value = ("192.168.1.100", 12345)
- mock_socket.return_value.__enter__.return_value = mock_sock
+ mock_socket_cls.return_value.__enter__.return_value = mock_sock
result = prompt_manager._get_local_ip()
- assert result == "192.168.1.100"
- mock_sock.connect.assert_called_once_with(("8.8.8.8", 80))
+ assert result == "192.168.1.100"
+ mock_sock.connect.assert_called_once_with(("8.8.8.8", 80))
- def test_get_local_ip_fallback_on_error(self):
- """Test fallback to localhost on connection error."""
- with patch("socket.socket") as mock_socket:
- mock_socket.return_value.__enter__.side_effect = Exception("Network error")
+ def test_falls_back_to_localhost_on_error(self):
+ with patch("socket.socket") as mock_socket_cls:
+ mock_socket_cls.return_value.__enter__.side_effect = Exception("Network error")
result = prompt_manager._get_local_ip()
- assert result == "localhost"
+ assert result == "localhost"
+
+
+# ---------------------------------------------------------------------------
+# _get_video_handler
+# ---------------------------------------------------------------------------
@pytest.mark.unit
class TestGetVideoHandler:
- """Tests for _get_video_handler function."""
-
- def test_get_video_handler_creates_instance(self):
- """Test that video handler instance is created on first call."""
- # Reset global instance
+ def test_creates_instance_on_first_call(self):
prompt_manager._video_handler_instance = None
- with patch("th_cli.test_run.camera.CameraStreamHandler") as mock_handler:
+ with patch("th_cli.test_run.camera.CameraStreamHandler") as mock_cls:
mock_instance = MagicMock()
- mock_handler.return_value = mock_instance
+ mock_cls.return_value = mock_instance
result = prompt_manager._get_video_handler()
- assert result == mock_instance
- mock_handler.assert_called_once()
+ assert result is mock_instance
+ mock_cls.assert_called_once()
- def test_get_video_handler_reuses_instance(self):
- """Test that video handler instance is reused on subsequent calls."""
+ def test_reuses_existing_instance(self):
mock_instance = MagicMock()
prompt_manager._video_handler_instance = mock_instance
- with patch("th_cli.test_run.camera.CameraStreamHandler") as mock_handler:
+ with patch("th_cli.test_run.camera.CameraStreamHandler") as mock_cls:
result = prompt_manager._get_video_handler()
- assert result == mock_instance
- mock_handler.assert_not_called()
+ assert result is mock_instance
+ mock_cls.assert_not_called()
+
+
+# ---------------------------------------------------------------------------
+# _cleanup_video_handler
+# ---------------------------------------------------------------------------
@pytest.mark.unit
class TestCleanupVideoHandler:
- """Tests for _cleanup_video_handler function."""
-
@pytest.mark.asyncio
- async def test_cleanup_video_handler_success(self):
- """Test successful cleanup of video handler."""
+ async def test_calls_stop_on_existing_instance(self):
mock_instance = MagicMock()
mock_instance.stop_video_capture_and_stream = AsyncMock()
prompt_manager._video_handler_instance = mock_instance
@@ -106,171 +108,106 @@ async def test_cleanup_video_handler_success(self):
mock_instance.stop_video_capture_and_stream.assert_called_once()
@pytest.mark.asyncio
- async def test_cleanup_video_handler_no_instance(self):
- """Test cleanup when no instance exists."""
+ async def test_noop_when_no_instance(self):
prompt_manager._video_handler_instance = None
-
- # Should not raise an exception
- await prompt_manager._cleanup_video_handler()
+ await prompt_manager._cleanup_video_handler() # must not raise
@pytest.mark.asyncio
- async def test_cleanup_video_handler_error_ignored(self):
- """Test that cleanup errors are ignored."""
+ async def test_errors_are_silently_ignored(self):
mock_instance = MagicMock()
- mock_instance.stop_video_capture_and_stream = AsyncMock(side_effect=Exception("Cleanup error"))
+ mock_instance.stop_video_capture_and_stream = AsyncMock(side_effect=Exception("fail"))
prompt_manager._video_handler_instance = mock_instance
- # Should not raise an exception
- await prompt_manager._cleanup_video_handler()
+ await prompt_manager._cleanup_video_handler() # must not raise
+
+
+# ---------------------------------------------------------------------------
+# handle_prompt — routing
+# ---------------------------------------------------------------------------
@pytest.mark.unit
-class TestHandlePrompt:
- """Tests for handle_prompt function."""
+class TestHandlePromptRouting:
@pytest.mark.asyncio
- async def test_handle_prompt_image_verification(self):
- """Test routing to image verification handler."""
+ async def test_image_verification_routes_to_image_handler(self):
mock_socket = AsyncMock()
- mock_request = ImageVerificationPromptRequest(
+ request = ImageVerificationPromptRequest(
message_id=1,
prompt="Verify image",
timeout=30,
options={"PASS": 1, "FAIL": 2},
image_hex_str="ffd8ffe0",
)
-
- with patch("th_cli.test_run.prompt_manager._handle_image_verification_prompt") as mock_handler:
- mock_handler.return_value = asyncio.Future()
- mock_handler.return_value.set_result(None)
-
+ with patch(
+ "th_cli.test_run.prompt_manager._handle_image_verification_prompt",
+ new_callable=AsyncMock,
+ ) as mock_handler:
await prompt_manager.handle_prompt(
socket=mock_socket,
- request=mock_request,
+ request=request,
message_type=MessageTypeEnum.IMAGE_VERIFICATION_REQUEST,
)
-
- mock_handler.assert_called_once_with(socket=mock_socket, prompt=mock_request)
-
- @pytest.mark.asyncio
- async def test_handle_prompt_stream_verification(self):
- """Test routing to stream verification handler."""
- mock_socket = AsyncMock()
- mock_request = StreamVerificationPromptRequest(
- message_id=2,
- prompt="Verify stream",
- timeout=120,
- options={"PASS": 1, "FAIL": 2},
- )
-
- with patch("th_cli.test_run.prompt_manager.__handle_stream_verification_prompt") as mock_handler:
- mock_handler.return_value = asyncio.Future()
- mock_handler.return_value.set_result(None)
-
- await prompt_manager.handle_prompt(
- socket=mock_socket,
- request=mock_request,
- message_type=MessageTypeEnum.STREAM_VERIFICATION_REQUEST,
- )
-
- mock_handler.assert_called_once_with(socket=mock_socket, prompt=mock_request)
+ mock_handler.assert_called_once_with(socket=mock_socket, prompt=request)
@pytest.mark.asyncio
- async def test_handle_prompt_push_av_stream(self):
- """Test routing to Push AV stream handler."""
+ async def test_push_av_stream_routes_to_push_av_handler(self):
mock_socket = AsyncMock()
- mock_request = PushAVStreamVerificationRequest(
+ request = PushAVStreamVerificationRequest(
message_id=3,
- prompt="Verify Push AV stream",
+ prompt="Verify Push AV",
timeout=120,
options={"PASS": 1, "FAIL": 2},
)
-
- with patch("th_cli.test_run.prompt_manager._handle_push_av_stream_prompt") as mock_handler:
- mock_handler.return_value = asyncio.Future()
- mock_handler.return_value.set_result(None)
-
+ with patch(
+ "th_cli.test_run.prompt_manager._handle_push_av_stream_prompt",
+ new_callable=AsyncMock,
+ ) as mock_handler:
await prompt_manager.handle_prompt(
socket=mock_socket,
- request=mock_request,
+ request=request,
message_type=MessageTypeEnum.PUSH_AV_STREAM_VERIFICATION_REQUEST,
)
-
- mock_handler.assert_called_once_with(socket=mock_socket, prompt=mock_request)
+ mock_handler.assert_called_once_with(socket=mock_socket, prompt=request)
@pytest.mark.asyncio
- async def test_handle_prompt_message_request(self):
- """Test routing to message handler."""
+ async def test_message_request_sends_ack_response(self):
mock_socket = AsyncMock()
- mock_request = MessagePromptRequest(
- message_id=4,
- prompt="Acknowledge this message",
- timeout=30,
- )
-
- with patch("th_cli.test_run.prompt_manager.__handle_message_prompt") as mock_handler:
- mock_handler.return_value = asyncio.Future()
- mock_handler.return_value.set_result(None)
+ request = MessagePromptRequest(message_id=4, prompt="Acknowledge this", timeout=30)
+ with patch(
+ "th_cli.test_run.prompt_manager._send_prompt_response",
+ new_callable=AsyncMock,
+ ) as mock_send:
await prompt_manager.handle_prompt(
socket=mock_socket,
- request=mock_request,
+ request=request,
message_type=MessageTypeEnum.MESSAGE_REQUEST,
)
- mock_handler.assert_called_once_with(socket=mock_socket, prompt=mock_request)
-
- @pytest.mark.asyncio
- async def test_handle_prompt_options_select(self):
- """Test routing to options prompt handler."""
- mock_socket = AsyncMock()
- mock_request = OptionsSelectPromptRequest(
- message_id=5,
- prompt="Select an option",
- timeout=30,
- options={"Option 1": 1, "Option 2": 2},
- )
-
- with patch("th_cli.test_run.prompt_manager.__handle_options_prompt") as mock_handler:
- mock_handler.return_value = asyncio.Future()
- mock_handler.return_value.set_result(None)
-
- await prompt_manager.handle_prompt(
- socket=mock_socket,
- request=mock_request,
- )
-
- mock_handler.assert_called_once_with(socket=mock_socket, prompt=mock_request)
+ mock_send.assert_called_once()
+ assert mock_send.call_args[1]["response"] == "ACK"
+ assert mock_send.call_args[1]["prompt"] is request
@pytest.mark.asyncio
- async def test_handle_prompt_text_input(self):
- """Test routing to text input handler."""
+ async def test_unknown_request_type_does_not_raise(self):
mock_socket = AsyncMock()
- mock_request = TextInputPromptRequest(
- message_id=6,
- prompt="Enter text",
- timeout=30,
- )
+ request = PromptRequest(message_id=99, prompt="?", timeout=10)
- with patch("th_cli.test_run.prompt_manager.__handle_text_prompt") as mock_handler:
- mock_handler.return_value = asyncio.Future()
- mock_handler.return_value.set_result(None)
+ with patch("click.echo"):
+ await prompt_manager.handle_prompt(socket=mock_socket, request=request)
- await prompt_manager.handle_prompt(
- socket=mock_socket,
- request=mock_request,
- )
- mock_handler.assert_called_once_with(socket=mock_socket, prompt=mock_request)
+# ---------------------------------------------------------------------------
+# _send_prompt_response
+# ---------------------------------------------------------------------------
@pytest.mark.unit
class TestSendPromptResponse:
- """Tests for _send_prompt_response function."""
@pytest.mark.asyncio
- async def test_send_prompt_response_success(self):
- """Test successful prompt response sending."""
+ async def test_sends_json_with_correct_structure(self):
mock_socket = AsyncMock()
mock_prompt = Mock()
mock_prompt.message_id = 123
@@ -278,22 +215,19 @@ async def test_send_prompt_response_success(self):
await prompt_manager._send_prompt_response(
socket=mock_socket,
prompt=mock_prompt,
- response="test response",
+ response="hello",
status_code=UserResponseStatusEnum.OKAY,
)
mock_socket.send.assert_called_once()
- call_args = mock_socket.send.call_args[0][0]
-
- payload = json.loads(call_args)
+ payload = json.loads(mock_socket.send.call_args[0][0])
assert payload["type"] == "prompt_response"
- assert payload["payload"]["response"] == "test response"
+ assert payload["payload"]["response"] == "hello"
assert payload["payload"]["status_code"] == UserResponseStatusEnum.OKAY
assert payload["payload"]["message_id"] == 123
@pytest.mark.asyncio
- async def test_send_prompt_response_cancelled_status(self):
- """Test prompt response with CANCELLED status."""
+ async def test_cancelled_status_code_is_preserved(self):
mock_socket = AsyncMock()
mock_prompt = Mock()
mock_prompt.message_id = 456
@@ -301,42 +235,154 @@ async def test_send_prompt_response_cancelled_status(self):
await prompt_manager._send_prompt_response(
socket=mock_socket,
prompt=mock_prompt,
- response="Stream failed",
+ response="cancelled",
status_code=UserResponseStatusEnum.CANCELLED,
)
- mock_socket.send.assert_called_once()
- call_args = mock_socket.send.call_args[0][0]
-
- payload = json.loads(call_args)
+ payload = json.loads(mock_socket.send.call_args[0][0])
assert payload["payload"]["status_code"] == UserResponseStatusEnum.CANCELLED
+ @pytest.mark.asyncio
+ async def test_integer_response_value_is_sent(self):
+ mock_socket = AsyncMock()
+ mock_prompt = Mock()
+ mock_prompt.message_id = 1
+
+ await prompt_manager._send_prompt_response(
+ socket=mock_socket,
+ prompt=mock_prompt,
+ response=2,
+ )
+
+ payload = json.loads(mock_socket.send.call_args[0][0])
+ assert payload["payload"]["response"] == 2
+
+
+# ---------------------------------------------------------------------------
+# __valid_text_input — tested via handle_prompt → __handle_text_prompt
+# ---------------------------------------------------------------------------
+
@pytest.mark.unit
-class TestHandleMessagePrompt:
- """Tests for __handle_message_prompt function."""
+class TestValidTextInput:
@pytest.mark.asyncio
- async def test_handle_message_prompt_success(self):
- """Test successful message prompt handling."""
- # Test the message handling through the main handler
- mock_socket = AsyncMock()
- mock_prompt = MessagePromptRequest(
+ async def test_accepts_input_when_no_regex_pattern(self):
+ prompt = TextInputPromptRequest(
message_id=1,
- prompt="Test message",
+ prompt="Enter something",
timeout=30,
+ regex_pattern=None,
)
+ with patch("th_cli.test_run.prompt_manager._send_prompt_response", new_callable=AsyncMock) as mock_send:
+ with patch("aioconsole.ainput", new_callable=AsyncMock, return_value="anything"):
+ await prompt_manager.handle_prompt(socket=AsyncMock(), request=prompt)
- with patch("th_cli.test_run.prompt_manager._send_prompt_response") as mock_send:
- mock_send.return_value = asyncio.Future()
- mock_send.return_value.set_result(None)
+ mock_send.assert_called_once()
+ assert mock_send.call_args[1]["response"] == "anything"
- # Test through the main handle_prompt function
- await prompt_manager.handle_prompt(
- socket=mock_socket,
- request=mock_prompt,
- message_type="message_request",
- )
+ @pytest.mark.asyncio
+ async def test_accepts_input_matching_regex(self):
+ prompt = TextInputPromptRequest(
+ message_id=1,
+ prompt="Enter digits",
+ timeout=30,
+ regex_pattern=r"^\d+$",
+ )
+ with patch("th_cli.test_run.prompt_manager._send_prompt_response", new_callable=AsyncMock) as mock_send:
+ with patch("aioconsole.ainput", new_callable=AsyncMock, return_value="12345"):
+ await prompt_manager.handle_prompt(socket=AsyncMock(), request=prompt)
+
+ mock_send.assert_called_once()
+ assert mock_send.call_args[1]["response"] == "12345"
+
+ @pytest.mark.asyncio
+ async def test_retries_until_valid_input(self):
+ prompt = TextInputPromptRequest(
+ message_id=1,
+ prompt="Enter digits",
+ timeout=30,
+ regex_pattern=r"^\d+$",
+ )
+ with patch("th_cli.test_run.prompt_manager._send_prompt_response", new_callable=AsyncMock) as mock_send:
+ with patch("aioconsole.ainput", new_callable=AsyncMock, side_effect=["not-digits", "9999"]):
+ with patch("click.echo"):
+ await prompt_manager.handle_prompt(socket=AsyncMock(), request=prompt)
+
+ mock_send.assert_called_once()
+ assert mock_send.call_args[1]["response"] == "9999"
+
+ @pytest.mark.asyncio
+ async def test_uses_default_value_when_input_is_empty(self):
+ prompt = TextInputPromptRequest(
+ message_id=1,
+ prompt="Enter value",
+ timeout=30,
+ default_value="mydefault",
+ regex_pattern=None,
+ )
+ with patch("th_cli.test_run.prompt_manager._send_prompt_response", new_callable=AsyncMock) as mock_send:
+ with patch("aioconsole.ainput", new_callable=AsyncMock, return_value=""):
+ with patch("click.echo"):
+ await prompt_manager.handle_prompt(socket=AsyncMock(), request=prompt)
+
+ mock_send.assert_called_once()
+ assert mock_send.call_args[1]["response"] == "mydefault"
+
+
+# ---------------------------------------------------------------------------
+# __valid_file_upload — tested via handle_file_upload_request
+# ---------------------------------------------------------------------------
+
+
+@pytest.mark.unit
+class TestValidFileUpload:
+
+ @pytest.mark.asyncio
+ async def test_accepts_txt_file(self):
+ with tempfile.NamedTemporaryFile(suffix=".txt", delete=False) as f:
+ f.write(b"hello")
+ tmp_path = f.name
+
+ try:
+ with patch(
+ "th_cli.test_run.prompt_manager.__upload_file_and_send_response",
+ new_callable=AsyncMock,
+ ) as mock_upload:
+ with patch("aioconsole.ainput", new_callable=AsyncMock, return_value=tmp_path):
+ with patch("click.echo"):
+ await prompt_manager.handle_file_upload_request(
+ socket=AsyncMock(),
+ request=MagicMock(prompt="Upload file", timeout=30),
+ )
+
+ mock_upload.assert_called_once()
+ assert mock_upload.call_args[1]["file_path"] == tmp_path
+ finally:
+ os.unlink(tmp_path)
- # Verify the response was sent (indirectly tests the private function)
- mock_send.assert_called_once()
+ @pytest.mark.asyncio
+ async def test_empty_input_skips_upload(self):
+ with patch("th_cli.test_run.prompt_manager._send_prompt_response", new_callable=AsyncMock) as mock_send:
+ with patch("aioconsole.ainput", new_callable=AsyncMock, return_value=""):
+ with patch("click.echo"):
+ await prompt_manager.handle_file_upload_request(
+ socket=AsyncMock(),
+ request=MagicMock(prompt="Upload", timeout=30, message_id=1),
+ )
+
+ mock_send.assert_called_once()
+ assert mock_send.call_args[1]["response"] == ""
+
+ @pytest.mark.asyncio
+ async def test_invalid_extension_retries_then_skip(self):
+ with patch("th_cli.test_run.prompt_manager._send_prompt_response", new_callable=AsyncMock) as mock_send:
+ with patch("aioconsole.ainput", new_callable=AsyncMock, side_effect=["/some/file.exe", ""]):
+ with patch("click.echo"):
+ await prompt_manager.handle_file_upload_request(
+ socket=AsyncMock(),
+ request=MagicMock(prompt="Upload", timeout=30, message_id=1),
+ )
+
+ mock_send.assert_called_once()
+ assert mock_send.call_args[1]["response"] == ""
diff --git a/tests/test_run/test_websocket_socket.py b/tests/test_run/test_websocket_socket.py
new file mode 100644
index 0000000..734546e
--- /dev/null
+++ b/tests/test_run/test_websocket_socket.py
@@ -0,0 +1,391 @@
+#
+# Copyright (c) 2026 Project CHIP Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+"""Unit tests for TestRunSocket in websocket.py."""
+
+from unittest.mock import AsyncMock, patch
+
+import pytest
+
+from th_cli.api_lib_autogen.models import (
+ TestCaseExecution,
+ TestCaseMetadata,
+ TestRunExecutionWithChildren,
+ TestStateEnum,
+ TestStepExecution,
+ TestSuiteExecution,
+ TestSuiteMetadata,
+)
+from th_cli.test_run.socket_schemas import TestCaseUpdate, TestRunUpdate, TestStepUpdate, TestSuiteUpdate, TestUpdate
+from th_cli.test_run.websocket import TestRunSocket
+
+# ---------------------------------------------------------------------------
+# Helpers
+# ---------------------------------------------------------------------------
+
+_METADATA_DEFAULTS = dict(
+ description="desc",
+ version="1.0",
+ source_hash="abc",
+ mandatory=False,
+ id=1,
+)
+
+
+def _make_step(title="Step 1", state=TestStateEnum.passed, errors=None, idx=0) -> TestStepExecution:
+ return TestStepExecution(
+ state=state,
+ title=title,
+ execution_index=idx,
+ id=idx + 100,
+ test_case_execution_id=1,
+ errors=errors,
+ )
+
+
+def _make_case(
+ public_id="TC_FOO_1_1",
+ title="Case 1",
+ state=TestStateEnum.passed,
+ errors=None,
+ steps=None,
+ idx=0,
+) -> TestCaseExecution:
+ return TestCaseExecution(
+ state=state,
+ public_id=public_id,
+ execution_index=idx,
+ id=idx + 200,
+ test_suite_execution_id=1,
+ test_case_metadata_id=1,
+ errors=errors,
+ test_case_metadata=TestCaseMetadata(
+ public_id=public_id,
+ title=title,
+ **_METADATA_DEFAULTS,
+ ),
+ test_step_executions=steps or [],
+ )
+
+
+def _make_suite(cases=None, title="Suite 1", idx=0) -> TestSuiteExecution:
+ return TestSuiteExecution(
+ state=TestStateEnum.passed,
+ public_id="SUITE_1",
+ collection_id="collection_1",
+ execution_index=idx,
+ id=idx + 300,
+ test_run_execution_id=1,
+ test_suite_metadata_id=1,
+ test_case_executions=cases or [],
+ test_suite_metadata=TestSuiteMetadata(
+ public_id="SUITE_1",
+ title=title,
+ **_METADATA_DEFAULTS,
+ ),
+ )
+
+
+def _make_run(suites=None) -> TestRunExecutionWithChildren:
+ return TestRunExecutionWithChildren(
+ title="Test Run",
+ id=1,
+ state=TestStateEnum.executing,
+ test_suite_executions=suites or [],
+ )
+
+
+def _make_socket(suites=None, project_config=None) -> TestRunSocket:
+ run = _make_run(suites=suites)
+ return TestRunSocket(run=run, project_config_dict=project_config)
+
+
+# ---------------------------------------------------------------------------
+# __init__
+# ---------------------------------------------------------------------------
+
+
+@pytest.mark.unit
+class TestTestRunSocketInit:
+ def test_run_stored(self):
+ run = _make_run()
+ s = TestRunSocket(run=run)
+ assert s.run is run
+
+ def test_project_config_defaults_to_empty_dict(self):
+ s = TestRunSocket(run=_make_run())
+ assert s.project_config_dict == {}
+
+ def test_project_config_stored_when_provided(self):
+ cfg = {"key": "value"}
+ s = TestRunSocket(run=_make_run(), project_config_dict=cfg)
+ assert s.project_config_dict is cfg
+
+ def test_test_case_step_errors_initially_empty(self):
+ s = TestRunSocket(run=_make_run())
+ assert s.test_case_step_errors == {}
+
+ def test_chip_server_info_not_displayed_initially(self):
+ s = TestRunSocket(run=_make_run())
+ assert s._chip_server_info_displayed is False
+
+
+# ---------------------------------------------------------------------------
+# __log_test_step_update — error accumulation
+# ---------------------------------------------------------------------------
+
+
+@pytest.mark.unit
+class TestLogTestStepUpdate:
+
+ def _call(self, socket: TestRunSocket, update: TestStepUpdate):
+ socket._TestRunSocket__log_test_step_update(update)
+
+ def _update(self, errors=None, step_idx=0, case_idx=0, suite_idx=0) -> TestStepUpdate:
+ return TestStepUpdate(
+ state="passed",
+ test_step_execution_index=step_idx,
+ test_case_execution_index=case_idx,
+ test_suite_execution_index=suite_idx,
+ errors=errors,
+ )
+
+ def test_errors_accumulated_into_dict(self):
+ step = _make_step(errors=["some error"])
+ case = _make_case(steps=[step])
+ suite = _make_suite(cases=[case])
+ s = _make_socket(suites=[suite])
+
+ self._call(s, self._update(errors=["some error"]))
+
+ assert s.test_case_step_errors[(0, 0)] == ["some error"]
+
+ def test_multiple_steps_extend_same_case_list(self):
+ step0 = _make_step(errors=["err0"], idx=0)
+ step1 = _make_step(errors=["err1"], idx=1)
+ case = _make_case(steps=[step0, step1])
+ suite = _make_suite(cases=[case])
+ s = _make_socket(suites=[suite])
+
+ self._call(s, self._update(errors=["err0"], step_idx=0))
+ self._call(s, self._update(errors=["err1"], step_idx=1))
+
+ assert s.test_case_step_errors[(0, 0)] == ["err0", "err1"]
+
+ def test_no_entry_when_update_has_no_errors(self):
+ step = _make_step()
+ case = _make_case(steps=[step])
+ suite = _make_suite(cases=[case])
+ s = _make_socket(suites=[suite])
+
+ self._call(s, self._update(errors=None))
+
+ assert (0, 0) not in s.test_case_step_errors
+
+ def test_errors_keyed_by_suite_and_case_index(self):
+ step = _make_step(errors=["e"], idx=0)
+ case0 = _make_case(steps=[step], idx=0)
+ case1 = _make_case(steps=[step], idx=1)
+ suite = _make_suite(cases=[case0, case1])
+ s = _make_socket(suites=[suite])
+
+ self._call(s, self._update(errors=["err_case0"], case_idx=0))
+ self._call(s, self._update(errors=["err_case1"], case_idx=1))
+
+ assert s.test_case_step_errors[(0, 0)] == ["err_case0"]
+ assert s.test_case_step_errors[(0, 1)] == ["err_case1"]
+
+
+# ---------------------------------------------------------------------------
+# __log_test_case_update — WebRTC detection and error cleanup
+# ---------------------------------------------------------------------------
+
+
+@pytest.mark.unit
+class TestLogTestCaseUpdate:
+
+ def _call(self, socket: TestRunSocket, update: TestCaseUpdate):
+ socket._TestRunSocket__log_test_case_update(update)
+
+ def _update(self, case_idx=0, suite_idx=0, errors=None, state="failed") -> TestCaseUpdate:
+ return TestCaseUpdate(
+ state=state,
+ test_case_execution_index=case_idx,
+ test_suite_execution_index=suite_idx,
+ errors=errors,
+ )
+
+ def test_webrtc_warning_shown_for_webrtc_error_text(self):
+ case = _make_case()
+ suite = _make_suite(cases=[case])
+ s = _make_socket(suites=[suite])
+
+ with patch("click.echo") as mock_echo:
+ self._call(s, self._update(errors=["error: webrtc browser failed"]))
+
+ echoed = " ".join(str(c) for call in mock_echo.call_args_list for c in call[0])
+ assert "TWO-WAY TALK" in echoed
+
+ def test_webrtc_warning_shown_for_known_public_id(self):
+ case = _make_case(public_id="TC_WEBRTC_1_6")
+ suite = _make_suite(cases=[case])
+ s = _make_socket(suites=[suite])
+
+ with patch("click.echo") as mock_echo:
+ self._call(s, self._update(errors=None))
+
+ echoed = " ".join(str(c) for call in mock_echo.call_args_list for c in call[0])
+ assert "TWO-WAY TALK" in echoed
+
+ def test_webrtc_warning_shown_when_step_errors_contain_indicator(self):
+ case = _make_case()
+ suite = _make_suite(cases=[case])
+ s = _make_socket(suites=[suite])
+ s.test_case_step_errors[(0, 0)] = ["BrowserPeerConnection refused"]
+
+ with patch("click.echo") as mock_echo:
+ self._call(s, self._update(errors=None))
+
+ echoed = " ".join(str(c) for call in mock_echo.call_args_list for c in call[0])
+ assert "TWO-WAY TALK" in echoed
+
+ def test_no_webrtc_warning_for_non_webrtc_failure(self):
+ case = _make_case(public_id="TC_CLUSTER_1_1")
+ suite = _make_suite(cases=[case])
+ s = _make_socket(suites=[suite])
+
+ with patch("click.echo") as mock_echo:
+ self._call(s, self._update(errors=["attribute read failed"]))
+
+ echoed = " ".join(str(c) for call in mock_echo.call_args_list for c in call[0])
+ assert "TWO-WAY TALK" not in echoed
+
+ def test_step_errors_cleaned_up_after_case_update(self):
+ case = _make_case()
+ suite = _make_suite(cases=[case])
+ s = _make_socket(suites=[suite])
+ s.test_case_step_errors[(0, 0)] = ["some error"]
+
+ self._call(s, self._update(errors=None))
+
+ assert (0, 0) not in s.test_case_step_errors
+
+ def test_passing_case_does_not_show_webrtc_warning(self):
+ case = _make_case()
+ suite = _make_suite(cases=[case])
+ s = _make_socket(suites=[suite])
+
+ with patch("click.echo") as mock_echo:
+ self._call(s, self._update(state="passed"))
+
+ echoed = " ".join(str(c) for call in mock_echo.call_args_list for c in call[0])
+ assert "TWO-WAY TALK" not in echoed
+
+ def test_all_webrtc_indicators_trigger_warning(self):
+ indicators = [
+ "browserpeerconnection failed",
+ "webrtc setup error",
+ "browser peer not available",
+ "ws://backend/api/v1/ws/webrtc timeout",
+ "create_browser_peer called",
+ ]
+ for indicator in indicators:
+ case = _make_case()
+ suite = _make_suite(cases=[case])
+ s = _make_socket(suites=[suite])
+
+ with patch("click.echo") as mock_echo:
+ self._call(s, self._update(errors=[indicator]))
+
+ echoed = " ".join(str(c) for call in mock_echo.call_args_list for c in call[0])
+ assert "TWO-WAY TALK" in echoed, f"Expected WebRTC warning for: {indicator!r}"
+
+
+# ---------------------------------------------------------------------------
+# __handle_test_update — dispatch
+# ---------------------------------------------------------------------------
+
+
+@pytest.mark.unit
+class TestHandleTestUpdate:
+
+ @pytest.mark.asyncio
+ async def test_step_update_routed_correctly(self):
+ step = _make_step()
+ case = _make_case(steps=[step])
+ suite = _make_suite(cases=[case])
+ s = _make_socket(suites=[suite])
+
+ update = TestUpdate(
+ test_type="test_step",
+ body=TestStepUpdate(
+ state="passed", test_step_execution_index=0, test_case_execution_index=0, test_suite_execution_index=0
+ ),
+ )
+ with patch.object(s, "_TestRunSocket__log_test_step_update") as mock_fn:
+ await s._TestRunSocket__handle_test_update(socket=AsyncMock(), update=update)
+
+ mock_fn.assert_called_once()
+
+ @pytest.mark.asyncio
+ async def test_case_update_routed_correctly(self):
+ case = _make_case()
+ suite = _make_suite(cases=[case])
+ s = _make_socket(suites=[suite])
+
+ update = TestUpdate(
+ test_type="test_case",
+ body=TestCaseUpdate(state="passed", test_case_execution_index=0, test_suite_execution_index=0),
+ )
+ with patch.object(s, "_TestRunSocket__log_test_case_update") as mock_fn:
+ await s._TestRunSocket__handle_test_update(socket=AsyncMock(), update=update)
+
+ mock_fn.assert_called_once()
+
+ @pytest.mark.asyncio
+ async def test_suite_update_routed_correctly(self):
+ suite = _make_suite()
+ s = _make_socket(suites=[suite])
+
+ update = TestUpdate(
+ test_type="test_suite",
+ body=TestSuiteUpdate(state="passed", test_suite_execution_index=0),
+ )
+ with patch.object(s, "_TestRunSocket__log_test_suite_update") as mock_fn:
+ await s._TestRunSocket__handle_test_update(socket=AsyncMock(), update=update)
+
+ mock_fn.assert_called_once()
+
+ @pytest.mark.asyncio
+ async def test_run_update_executing_does_not_close_socket(self):
+ s = _make_socket()
+ mock_socket = AsyncMock()
+
+ update = TestUpdate(test_type="test_run", body=TestRunUpdate(state="executing", test_run_execution_id=1))
+ with patch.object(s, "_TestRunSocket__log_test_run_update", new_callable=AsyncMock):
+ await s._TestRunSocket__handle_test_update(socket=mock_socket, update=update)
+
+ mock_socket.close.assert_not_called()
+
+ @pytest.mark.asyncio
+ async def test_run_update_non_executing_closes_socket(self):
+ s = _make_socket()
+ mock_socket = AsyncMock()
+
+ update = TestUpdate(test_type="test_run", body=TestRunUpdate(state="passed", test_run_execution_id=1))
+ with patch.object(s, "_TestRunSocket__log_test_run_update", new_callable=AsyncMock):
+ await s._TestRunSocket__handle_test_update(socket=mock_socket, update=update)
+
+ mock_socket.close.assert_called_once()
diff --git a/tests/test_run_tests.py b/tests/test_run_tests.py
index 6669e39..bc41357 100644
--- a/tests/test_run_tests.py
+++ b/tests/test_run_tests.py
@@ -20,11 +20,10 @@
import pytest
from click.testing import CliRunner
-from httpx import Headers
from th_cli.api_lib_autogen import models as api_models
from th_cli.api_lib_autogen.exceptions import UnexpectedResponse
-from th_cli.commands.run_tests import run_tests, _parse_extra_args
+from th_cli.commands.run_tests import _parse_extra_args, run_tests
from th_cli.exceptions import ConfigurationError
@@ -54,24 +53,21 @@ def test_run_tests_success_minimal_args(
test_collection_api.return_value = sample_test_collections
cli_api.return_value = sample_test_run_execution
id_start.return_value = sample_test_run_execution
- with patch("th_cli.commands.run_tests.get_client", return_value=mock_api_client), \
- patch("th_cli.commands.run_tests.AsyncApis", return_value=mock_async_apis), \
+ with (
+ patch("th_cli.commands.run_tests.get_client", return_value=mock_api_client),
+ patch("th_cli.commands.run_tests.AsyncApis", return_value=mock_async_apis),
patch(
- "th_cli.commands.run_tests.test_logging.configure_logger_for_run",
- return_value="./test_logs/test.log"), \
- patch("th_cli.commands.run_tests.TestRunSocket") as mock_socket_class, \
- patch(
- "th_cli.commands.run_tests.convert_nested_to_dict",
- return_value=sample_default_config_dict
+ "th_cli.commands.run_tests.test_logging.configure_logger_for_run", return_value="./test_logs/test.log"
+ ),
+ patch("th_cli.commands.run_tests.TestRunSocket") as mock_socket_class,
+ patch("th_cli.commands.run_tests.convert_nested_to_dict", return_value=sample_default_config_dict),
):
mock_socket = Mock()
mock_socket.connect_websocket = AsyncMock()
mock_socket_class.return_value = mock_socket
# Act
- result = cli_runner.invoke(run_tests, [
- "--tests-list", "TC-ACE-1.1,TC-ACE-1.2"
- ])
+ result = cli_runner.invoke(run_tests, ["--tests-list", "TC-ACE-1.1,TC-ACE-1.2"])
# Assert
assert result.exit_code == 0
@@ -87,7 +83,7 @@ def test_run_tests_success_with_custom_config(
sample_test_collections: api_models.TestCollections,
sample_test_run_execution: api_models.TestRunExecutionWithChildren,
sample_default_config_dict: dict,
- mock_json_config_file: Path
+ mock_json_config_file: Path,
) -> None:
"""Test successful test run with custom JSON configuration file."""
# Arrange
@@ -103,24 +99,28 @@ def test_run_tests_success_with_custom_config(
id_start_api.return_value = sample_test_run_execution
with patch("th_cli.commands.run_tests.AsyncApis", return_value=mock_async_apis):
with patch(
- "th_cli.commands.run_tests.test_logging.configure_logger_for_run",
- return_value="./test_logs/test.log"
+ "th_cli.commands.run_tests.test_logging.configure_logger_for_run", return_value="./test_logs/test.log"
):
with patch("th_cli.commands.run_tests.TestRunSocket") as mock_socket_class:
with patch(
- "th_cli.commands.run_tests.convert_nested_to_dict",
- return_value=sample_default_config_dict
+ "th_cli.commands.run_tests.convert_nested_to_dict", return_value=sample_default_config_dict
):
mock_socket = Mock()
mock_socket.connect_websocket = AsyncMock()
mock_socket_class.return_value = mock_socket
# Act
- result = cli_runner.invoke(run_tests, [
- "--tests-list", "TC-ACE-1.1",
- "--config", str(mock_json_config_file),
- "--title", "Custom Test Run"
- ])
+ result = cli_runner.invoke(
+ run_tests,
+ [
+ "--tests-list",
+ "TC-ACE-1.1",
+ "--config",
+ str(mock_json_config_file),
+ "--title",
+ "Custom Test Run",
+ ],
+ )
# Assert
assert result.exit_code == 0
@@ -133,7 +133,7 @@ def test_run_tests_success_with_pics_config(
sample_test_collections: api_models.TestCollections,
sample_test_run_execution: api_models.TestRunExecutionWithChildren,
sample_default_config_dict: dict,
- mock_pics_dir: Path
+ mock_pics_dir: Path,
) -> None:
"""Test successful test run with PICS configuration."""
# Arrange
@@ -149,23 +149,20 @@ def test_run_tests_success_with_pics_config(
start_api.return_value = sample_test_run_execution
with patch("th_cli.commands.run_tests.AsyncApis", return_value=mock_async_apis):
with patch(
- "th_cli.commands.run_tests.test_logging.configure_logger_for_run",
- return_value="./test_logs/test.log"
+ "th_cli.commands.run_tests.test_logging.configure_logger_for_run", return_value="./test_logs/test.log"
):
with patch("th_cli.commands.run_tests.TestRunSocket") as mock_socket_class:
with patch(
- "th_cli.commands.run_tests.convert_nested_to_dict",
- return_value=sample_default_config_dict
+ "th_cli.commands.run_tests.convert_nested_to_dict", return_value=sample_default_config_dict
):
mock_socket = Mock()
mock_socket.connect_websocket = AsyncMock()
mock_socket_class.return_value = mock_socket
# Act
- result = cli_runner.invoke(run_tests, [
- "--tests-list", "TC-ACE-1.1",
- "--pics-config-folder", str(mock_pics_dir)
- ])
+ result = cli_runner.invoke(
+ run_tests, ["--tests-list", "TC-ACE-1.1", "--pics-config-folder", str(mock_pics_dir)]
+ )
# Assert
assert result.exit_code == 0
@@ -180,7 +177,7 @@ def test_run_tests_success_with_project_id(
mock_async_apis: Mock,
sample_test_collections: api_models.TestCollections,
sample_test_run_execution: api_models.TestRunExecutionWithChildren,
- sample_default_config_dict: dict
+ sample_default_config_dict: dict,
) -> None:
"""Test successful test run with project ID."""
# Arrange
@@ -195,20 +192,19 @@ def test_run_tests_success_with_project_id(
cli_api.return_value = sample_test_run_execution
start_api.return_value = sample_test_run_execution
with patch("th_cli.commands.run_tests.AsyncApis", return_value=mock_async_apis):
- with patch("th_cli.commands.run_tests.test_logging.configure_logger_for_run",
- return_value="./test_logs/test.log"):
+ with patch(
+ "th_cli.commands.run_tests.test_logging.configure_logger_for_run", return_value="./test_logs/test.log"
+ ):
with patch("th_cli.commands.run_tests.TestRunSocket") as mock_socket_class:
- with patch("th_cli.commands.run_tests.convert_nested_to_dict",
- return_value=sample_default_config_dict):
+ with patch(
+ "th_cli.commands.run_tests.convert_nested_to_dict", return_value=sample_default_config_dict
+ ):
mock_socket = Mock()
mock_socket.connect_websocket = AsyncMock()
mock_socket_class.return_value = mock_socket
# Act
- result = cli_runner.invoke(run_tests, [
- "--tests-list", "TC-ACE-1.1",
- "--project-id", "42"
- ])
+ result = cli_runner.invoke(run_tests, ["--tests-list", "TC-ACE-1.1", "--project-id", "42"])
# Assert
assert result.exit_code == 0
@@ -219,7 +215,7 @@ def test_run_tests_success_with_no_color(
mock_async_apis: Mock,
sample_test_collections: api_models.TestCollections,
sample_test_run_execution: api_models.TestRunExecutionWithChildren,
- sample_default_config_dict: dict
+ sample_default_config_dict: dict,
) -> None:
"""Test successful test run with colors disabled."""
# Arrange
@@ -234,21 +230,20 @@ def test_run_tests_success_with_no_color(
cli_api.return_value = sample_test_run_execution
start_api.return_value = sample_test_run_execution
with patch("th_cli.commands.run_tests.AsyncApis", return_value=mock_async_apis):
- with patch("th_cli.commands.run_tests.test_logging.configure_logger_for_run",
- return_value="./test_logs/test.log"):
+ with patch(
+ "th_cli.commands.run_tests.test_logging.configure_logger_for_run", return_value="./test_logs/test.log"
+ ):
with patch("th_cli.commands.run_tests.TestRunSocket") as mock_socket_class:
with patch("th_cli.commands.run_tests.set_colors_enabled") as mock_set_colors:
- with patch("th_cli.commands.run_tests.convert_nested_to_dict",
- return_value=sample_default_config_dict):
+ with patch(
+ "th_cli.commands.run_tests.convert_nested_to_dict", return_value=sample_default_config_dict
+ ):
mock_socket = Mock()
mock_socket.connect_websocket = AsyncMock()
mock_socket_class.return_value = mock_socket
# Act
- result = cli_runner.invoke(run_tests, [
- "--tests-list", "TC-ACE-1.1",
- "--no-color"
- ])
+ result = cli_runner.invoke(run_tests, ["--tests-list", "TC-ACE-1.1", "--no-color"])
# Assert
assert result.exit_code == 0
@@ -257,25 +252,24 @@ def test_run_tests_success_with_no_color(
def test_run_tests_invalid_test_ids(self, cli_runner: CliRunner) -> None:
"""Test run tests with invalid test IDs format."""
# Act
- result = cli_runner.invoke(run_tests, [
- "--tests-list", "invalid-test-id,another-invalid"
- ])
+ result = cli_runner.invoke(run_tests, ["--tests-list", "invalid-test-id,another-invalid"])
# Assert
assert result.exit_code == 1
assert "Error: Invalid test ID format" in result.output
- @pytest.mark.parametrize("empty_test_id", [
- "",
- " ",
- " ",
- ])
+ @pytest.mark.parametrize(
+ "empty_test_id",
+ [
+ "",
+ " ",
+ " ",
+ ],
+ )
def test_run_tests_empty_test_list(self, cli_runner: CliRunner, empty_test_id: str) -> None:
"""Test run tests with empty test list."""
# Act
- result = cli_runner.invoke(run_tests, [
- "--tests-list", empty_test_id
- ])
+ result = cli_runner.invoke(run_tests, ["--tests-list", empty_test_id])
# Assert
assert result.exit_code == 1
@@ -284,10 +278,7 @@ def test_run_tests_empty_test_list(self, cli_runner: CliRunner, empty_test_id: s
def test_run_tests_config_file_not_found(self, cli_runner: CliRunner) -> None:
"""Test run tests with non-existent config file."""
# Act
- result = cli_runner.invoke(run_tests, [
- "--tests-list", "TC-ACE-1.1",
- "--config", "nonexistent.json"
- ])
+ result = cli_runner.invoke(run_tests, ["--tests-list", "TC-ACE-1.1", "--config", "nonexistent.json"])
# Assert
assert result.exit_code == 1
@@ -296,10 +287,9 @@ def test_run_tests_config_file_not_found(self, cli_runner: CliRunner) -> None:
def test_run_tests_pics_directory_not_found(self, cli_runner: CliRunner) -> None:
"""Test run tests with non-existent PICS directory."""
# Act
- result = cli_runner.invoke(run_tests, [
- "--tests-list", "TC-ACE-1.1",
- "--pics-config-folder", "nonexistent_pics_dir"
- ])
+ result = cli_runner.invoke(
+ run_tests, ["--tests-list", "TC-ACE-1.1", "--pics-config-folder", "nonexistent_pics_dir"]
+ )
# Assert
assert result.exit_code == 1
@@ -309,23 +299,17 @@ def test_run_tests_configuration_error(self, cli_runner: CliRunner) -> None:
"""Test run tests with configuration error."""
# Arrange
with patch(
- "th_cli.commands.run_tests.get_client",
- side_effect=ConfigurationError("Could not connect to server")
+ "th_cli.commands.run_tests.get_client", side_effect=ConfigurationError("Could not connect to server")
):
# Act
- result = cli_runner.invoke(run_tests, [
- "--tests-list", "TC-ACE-1.1"
- ])
+ result = cli_runner.invoke(run_tests, ["--tests-list", "TC-ACE-1.1"])
# Assert
assert result.exit_code == 1
assert "Error: Could not connect to server" in result.output
def test_run_tests_api_error_getting_default_config(
- self,
- cli_runner: CliRunner,
- mock_async_apis: Mock,
- mock_api_client: Mock
+ self, cli_runner: CliRunner, mock_async_apis: Mock, mock_api_client: Mock
) -> None:
"""Test run tests with API error when getting default config."""
# Arrange
@@ -335,9 +319,7 @@ def test_run_tests_api_error_getting_default_config(
with patch("th_cli.commands.run_tests.get_client", return_value=mock_api_client):
with patch("th_cli.commands.run_tests.AsyncApis", return_value=mock_async_apis):
# Act
- result = cli_runner.invoke(run_tests, [
- "--tests-list", "TC-ACE-1.1"
- ])
+ result = cli_runner.invoke(run_tests, ["--tests-list", "TC-ACE-1.1"])
# Assert
assert result.exit_code == 1
@@ -345,10 +327,7 @@ def test_run_tests_api_error_getting_default_config(
mock_api_client.aclose.assert_called_once()
def test_run_tests_api_error_getting_test_collections(
- self,
- cli_runner: CliRunner,
- mock_async_apis: Mock,
- sample_default_config_dict: dict
+ self, cli_runner: CliRunner, mock_async_apis: Mock, sample_default_config_dict: dict
) -> None:
"""Test run tests with API error when getting test collections."""
# Arrange
@@ -358,14 +337,12 @@ def test_run_tests_api_error_getting_test_collections(
test_collections_api.side_effect = Exception("Collections API error")
with patch("th_cli.commands.run_tests.AsyncApis", return_value=mock_async_apis):
- with patch("th_cli.commands.run_tests.test_logging.configure_logger_for_run",
- return_value="./test_logs/test.log"):
- with patch("th_cli.commands.run_tests.convert_nested_to_dict",
- return_value=sample_default_config_dict):
+ with patch(
+ "th_cli.commands.run_tests.test_logging.configure_logger_for_run", return_value="./test_logs/test.log"
+ ):
+ with patch("th_cli.commands.run_tests.convert_nested_to_dict", return_value=sample_default_config_dict):
# Act
- result = cli_runner.invoke(run_tests, [
- "--tests-list", "TC-ACE-1.1"
- ])
+ result = cli_runner.invoke(run_tests, ["--tests-list", "TC-ACE-1.1"])
# Assert
assert result.exit_code == 1
@@ -376,7 +353,7 @@ def test_run_tests_api_error_creating_test_run(
cli_runner: CliRunner,
mock_async_apis: Mock,
sample_test_collections: api_models.TestCollections,
- sample_default_config_dict: dict
+ sample_default_config_dict: dict,
) -> None:
"""Test run tests with API error when creating test run."""
# Arrange
@@ -394,14 +371,12 @@ def test_run_tests_api_error_creating_test_run(
projects_api.return_value = sample_default_config_dict
cli_api.side_effect = api_exception
with patch("th_cli.commands.run_tests.AsyncApis", return_value=mock_async_apis):
- with patch("th_cli.commands.run_tests.test_logging.configure_logger_for_run",
- return_value="./test_logs/test.log"):
- with patch("th_cli.commands.run_tests.convert_nested_to_dict",
- return_value=sample_default_config_dict):
+ with patch(
+ "th_cli.commands.run_tests.test_logging.configure_logger_for_run", return_value="./test_logs/test.log"
+ ):
+ with patch("th_cli.commands.run_tests.convert_nested_to_dict", return_value=sample_default_config_dict):
# Act
- result = cli_runner.invoke(run_tests, [
- "--tests-list", "TC-ACE-1.1"
- ])
+ result = cli_runner.invoke(run_tests, ["--tests-list", "TC-ACE-1.1"])
# Assert
assert result.exit_code == 1
@@ -413,7 +388,7 @@ def test_run_tests_api_error_starting_test_run(
mock_async_apis: Mock,
sample_test_collections: api_models.TestCollections,
sample_test_run_execution: api_models.TestRunExecutionWithChildren,
- sample_default_config_dict: dict
+ sample_default_config_dict: dict,
) -> None:
"""Test run tests with API error when starting test run."""
# Arrange
@@ -433,19 +408,19 @@ def test_run_tests_api_error_starting_test_run(
cli_api.return_value = sample_test_run_execution
start_api.side_effect = api_exception
with patch("th_cli.commands.run_tests.AsyncApis", return_value=mock_async_apis):
- with patch("th_cli.commands.run_tests.test_logging.configure_logger_for_run",
- return_value="./test_logs/test.log"):
+ with patch(
+ "th_cli.commands.run_tests.test_logging.configure_logger_for_run", return_value="./test_logs/test.log"
+ ):
with patch("th_cli.commands.run_tests.TestRunSocket") as mock_socket_class:
- with patch("th_cli.commands.run_tests.convert_nested_to_dict",
- return_value=sample_default_config_dict):
+ with patch(
+ "th_cli.commands.run_tests.convert_nested_to_dict", return_value=sample_default_config_dict
+ ):
mock_socket = Mock()
mock_socket.connect_websocket = AsyncMock()
mock_socket_class.return_value = mock_socket
# Act
- result = cli_runner.invoke(run_tests, [
- "--tests-list", "TC-ACE-1.1"
- ])
+ result = cli_runner.invoke(run_tests, ["--tests-list", "TC-ACE-1.1"])
# Assert
assert result.exit_code == 1
@@ -475,15 +450,19 @@ def test_run_tests_required_tests_list_parameter(self, cli_runner: CliRunner) ->
assert result.exit_code != 0
assert "required" in result.output
- @pytest.mark.parametrize("test_list", [
- "TC-ACE-1.1",
- "TC-ACE-1.1,TC-ACE-1.2",
- "TC_ACE_1_1,TC_ACE_1_2,TC_ACE_1_3",
- "TC_ACE_1_1,TC_ACE_1_2,TC_ACE_1_3,TC_ACE_1_3-custom",
- "TC-ACE-1.1, TC-ACE-1.2, TC-ACE-1.3", # with spaces
- "TC-MCORE_FS-1.1, TC-MCORE_FS-1_2, TC_MCORE_FS-1.2",
- "TC_CADMIN_1_3_4", "TC_CADMIN_1_3_102"
- ])
+ @pytest.mark.parametrize(
+ "test_list",
+ [
+ "TC-ACE-1.1",
+ "TC-ACE-1.1,TC-ACE-1.2",
+ "TC_ACE_1_1,TC_ACE_1_2,TC_ACE_1_3",
+ "TC_ACE_1_1,TC_ACE_1_2,TC_ACE_1_3,TC_ACE_1_3-custom",
+ "TC-ACE-1.1, TC-ACE-1.2, TC-ACE-1.3", # with spaces
+ "TC-MCORE_FS-1.1, TC-MCORE_FS-1_2, TC_MCORE_FS-1.2",
+ "TC_CADMIN_1_3_4",
+ "TC_CADMIN_1_3_102",
+ ],
+ )
def test_run_tests_various_test_lists(
self,
cli_runner: CliRunner,
@@ -491,7 +470,7 @@ def test_run_tests_various_test_lists(
sample_test_collections: api_models.TestCollections,
sample_test_run_execution: api_models.TestRunExecutionWithChildren,
sample_default_config_dict: dict,
- test_list: str
+ test_list: str,
) -> None:
"""Test run tests with various test list formats."""
# Arrange
@@ -506,19 +485,19 @@ def test_run_tests_various_test_lists(
cli_api.return_value = sample_test_run_execution
start_api.return_value = sample_test_run_execution
with patch("th_cli.commands.run_tests.AsyncApis", return_value=mock_async_apis):
- with patch("th_cli.commands.run_tests.test_logging.configure_logger_for_run",
- return_value="./test_logs/test.log"):
+ with patch(
+ "th_cli.commands.run_tests.test_logging.configure_logger_for_run", return_value="./test_logs/test.log"
+ ):
with patch("th_cli.commands.run_tests.TestRunSocket") as mock_socket_class:
- with patch("th_cli.commands.run_tests.convert_nested_to_dict",
- return_value=sample_default_config_dict):
+ with patch(
+ "th_cli.commands.run_tests.convert_nested_to_dict", return_value=sample_default_config_dict
+ ):
mock_socket = Mock()
mock_socket.connect_websocket = AsyncMock()
mock_socket_class.return_value = mock_socket
# Act
- result = cli_runner.invoke(run_tests, [
- "--tests-list", test_list
- ])
+ result = cli_runner.invoke(run_tests, ["--tests-list", test_list])
# Assert
assert result.exit_code == 0
@@ -529,7 +508,7 @@ def test_run_tests_test_selection_building(
mock_async_apis: Mock,
sample_test_collections: api_models.TestCollections,
sample_test_run_execution: api_models.TestRunExecutionWithChildren,
- sample_default_config_dict: dict
+ sample_default_config_dict: dict,
) -> None:
"""Test that test selection is properly built from test collections."""
# Arrange
@@ -545,21 +524,21 @@ def test_run_tests_test_selection_building(
id_start.return_value = sample_test_run_execution
with patch("th_cli.commands.run_tests.AsyncApis", return_value=mock_async_apis):
- with patch("th_cli.commands.run_tests.test_logging.configure_logger_for_run",
- return_value="./test_logs/test.log"):
+ with patch(
+ "th_cli.commands.run_tests.test_logging.configure_logger_for_run", return_value="./test_logs/test.log"
+ ):
with patch("th_cli.commands.run_tests.build_test_selection") as mock_build_test_selection:
with patch("th_cli.commands.run_tests.TestRunSocket") as mock_socket_class:
- with patch("th_cli.commands.run_tests.convert_nested_to_dict",
- return_value=sample_default_config_dict):
+ with patch(
+ "th_cli.commands.run_tests.convert_nested_to_dict", return_value=sample_default_config_dict
+ ):
mock_build_test_selection.return_value = {"mock_collection": {"mock_suite": {"mock": 1}}}
mock_socket = Mock()
mock_socket.connect_websocket = AsyncMock()
mock_socket_class.return_value = mock_socket
# Act
- result = cli_runner.invoke(run_tests, [
- "--tests-list", "TC-ACE-1.1,TC-ACE-1.2"
- ])
+ result = cli_runner.invoke(run_tests, ["--tests-list", "TC-ACE-1.1,TC-ACE-1.2"])
# Assert
assert result.exit_code == 0
@@ -573,7 +552,7 @@ def test_run_tests_logger_configuration(
mock_async_apis: Mock,
sample_test_collections: api_models.TestCollections,
sample_test_run_execution: api_models.TestRunExecutionWithChildren,
- sample_default_config_dict: dict
+ sample_default_config_dict: dict,
) -> None:
"""Test that logger is properly configured for the test run."""
# Arrange
@@ -590,18 +569,18 @@ def test_run_tests_logger_configuration(
with patch("th_cli.commands.run_tests.AsyncApis", return_value=mock_async_apis):
with patch("th_cli.commands.run_tests.test_logging.configure_logger_for_run") as mock_configure_logger:
with patch("th_cli.commands.run_tests.TestRunSocket") as mock_socket_class:
- with patch("th_cli.commands.run_tests.convert_nested_to_dict",
- return_value=sample_default_config_dict):
+ with patch(
+ "th_cli.commands.run_tests.convert_nested_to_dict", return_value=sample_default_config_dict
+ ):
mock_configure_logger.return_value = "/path/to/test_logs/custom_run.log"
mock_socket = Mock()
mock_socket.connect_websocket = AsyncMock()
mock_socket_class.return_value = mock_socket
# Act
- result = cli_runner.invoke(run_tests, [
- "--tests-list", "TC-ACE-1.1",
- "--title", "Custom Logger Test"
- ])
+ result = cli_runner.invoke(
+ run_tests, ["--tests-list", "TC-ACE-1.1", "--title", "Custom Logger Test"]
+ )
# Assert
assert result.exit_code == 0
@@ -614,7 +593,7 @@ def test_run_tests_default_title_generation(
mock_async_apis: Mock,
sample_test_collections: api_models.TestCollections,
sample_test_run_execution: api_models.TestRunExecutionWithChildren,
- sample_default_config_dict: dict
+ sample_default_config_dict: dict,
) -> None:
"""Test that default title is generated when not provided."""
# Arrange
@@ -629,26 +608,26 @@ def test_run_tests_default_title_generation(
cli_api.return_value = sample_test_run_execution
id_start.return_value = sample_test_run_execution
with patch("th_cli.commands.run_tests.AsyncApis", return_value=mock_async_apis):
- with patch("th_cli.commands.run_tests.test_logging.configure_logger_for_run",
- return_value="./test_logs/test.log"):
+ with patch(
+ "th_cli.commands.run_tests.test_logging.configure_logger_for_run", return_value="./test_logs/test.log"
+ ):
with patch("th_cli.commands.run_tests.TestRunSocket") as mock_socket_class:
- with patch("th_cli.commands.run_tests.convert_nested_to_dict",
- return_value=sample_default_config_dict):
+ with patch(
+ "th_cli.commands.run_tests.convert_nested_to_dict", return_value=sample_default_config_dict
+ ):
mock_socket = Mock()
mock_socket.connect_websocket = AsyncMock()
mock_socket_class.return_value = mock_socket
# Act
- result = cli_runner.invoke(run_tests, [
- "--tests-list", "TC-ACE-1.1"
- ])
+ result = cli_runner.invoke(run_tests, ["--tests-list", "TC-ACE-1.1"])
# Assert
assert result.exit_code == 0
# Should contain a timestamp-based title
assert "Creating new test run with title" in result.output
# The title should be a timestamp format like "2025-01-01-10:00:00"
- output_lines = result.output.split('\n')
+ output_lines = result.output.split("\n")
title_line = next((line for line in output_lines if "Creating new test run with title" in line), None)
assert title_line is not None
# Extract the title part and verify it looks like a timestamp
@@ -663,7 +642,7 @@ def test_run_tests_config_data_processing(
sample_test_collections: api_models.TestCollections,
sample_test_run_execution: api_models.TestRunExecutionWithChildren,
sample_default_config_dict: dict,
- mock_json_config_file: Path
+ mock_json_config_file: Path,
) -> None:
"""Test that JSON configuration data is properly processed and displayed."""
# Arrange
@@ -678,20 +657,21 @@ def test_run_tests_config_data_processing(
cli_api.return_value = sample_test_run_execution
id_start.return_value = sample_test_run_execution
with patch("th_cli.commands.run_tests.AsyncApis", return_value=mock_async_apis):
- with patch("th_cli.commands.run_tests.test_logging.configure_logger_for_run",
- return_value="./test_logs/test.log"):
+ with patch(
+ "th_cli.commands.run_tests.test_logging.configure_logger_for_run", return_value="./test_logs/test.log"
+ ):
with patch("th_cli.commands.run_tests.TestRunSocket") as mock_socket_class:
- with patch("th_cli.commands.run_tests.convert_nested_to_dict",
- return_value=sample_default_config_dict):
+ with patch(
+ "th_cli.commands.run_tests.convert_nested_to_dict", return_value=sample_default_config_dict
+ ):
mock_socket = Mock()
mock_socket.connect_websocket = AsyncMock()
mock_socket_class.return_value = mock_socket
# Act
- result = cli_runner.invoke(run_tests, [
- "--tests-list", "TC-ACE-1.1",
- "--config", str(mock_json_config_file)
- ])
+ result = cli_runner.invoke(
+ run_tests, ["--tests-list", "TC-ACE-1.1", "--config", str(mock_json_config_file)]
+ )
# Assert
assert result.exit_code == 0
@@ -701,23 +681,20 @@ def test_run_tests_config_data_processing(
assert "dut_config" in result.output
assert "network" in result.output
- @pytest.mark.parametrize("invalid_test_id", [
- "invalid-format",
- "TC-INVALID",
- "TCACE11",
- "TC-ACE-1.1.1.1",
- "TC-ACE-1.1-custom-extra",
- ])
- def test_run_tests_invalid_test_id_formats(
- self,
- cli_runner: CliRunner,
- invalid_test_id: str
- ) -> None:
+ @pytest.mark.parametrize(
+ "invalid_test_id",
+ [
+ "invalid-format",
+ "TC-INVALID",
+ "TCACE11",
+ "TC-ACE-1.1.1.1",
+ "TC-ACE-1.1-custom-extra",
+ ],
+ )
+ def test_run_tests_invalid_test_id_formats(self, cli_runner: CliRunner, invalid_test_id: str) -> None:
"""Test run tests with various invalid test ID formats."""
# Act
- result = cli_runner.invoke(run_tests, [
- "--tests-list", invalid_test_id
- ])
+ result = cli_runner.invoke(run_tests, ["--tests-list", invalid_test_id])
# Assert
assert result.exit_code == 1
@@ -729,9 +706,7 @@ def test_run_tests_client_cleanup_on_exception(self, cli_runner: CliRunner, mock
with patch("th_cli.commands.run_tests.get_client", return_value=mock_api_client):
with patch("th_cli.commands.run_tests.AsyncApis", side_effect=Exception("API creation failed")):
# Act
- result = cli_runner.invoke(run_tests, [
- "--tests-list", "TC-ACE-1.1"
- ])
+ result = cli_runner.invoke(run_tests, ["--tests-list", "TC-ACE-1.1"])
# Assert
assert result.exit_code == 1
@@ -814,9 +789,12 @@ def test_parse_extra_args_complex_values(self) -> None:
"""Test parsing arguments with complex values."""
# Arrange
args = [
- "--string-arg", "PICS_SC_2_2:false",
- "--json-arg", '{"key":"value"}',
- "--numeric-arg", "nodeId:305414945",
+ "--string-arg",
+ "PICS_SC_2_2:false",
+ "--json-arg",
+ '{"key":"value"}',
+ "--numeric-arg",
+ "nodeId:305414945",
]
# Act
@@ -833,9 +811,12 @@ def test_parse_extra_args_colons_in_values(self) -> None:
"""Test parsing SDK test parameter format with colons."""
# Arrange
args = [
- "--int-arg", "endpoint:2",
- "--string-arg", "discriminator:1234",
- "--bool-arg", "someBoolFlag:true",
+ "--int-arg",
+ "endpoint:2",
+ "--string-arg",
+ "discriminator:1234",
+ "--bool-arg",
+ "someBoolFlag:true",
]
# Act
@@ -876,20 +857,19 @@ def test_run_tests_with_extra_args_basic(
cli_api.return_value = sample_test_run_execution
id_start.return_value = sample_test_run_execution
- with patch("th_cli.commands.run_tests.get_client", return_value=mock_api_client), \
- patch("th_cli.commands.run_tests.AsyncApis", return_value=mock_async_apis), \
- patch("th_cli.commands.run_tests.test_logging.configure_logger_for_run", return_value="./test.log"), \
- patch("th_cli.commands.run_tests.TestRunSocket") as mock_socket_class, \
- patch("th_cli.commands.run_tests.convert_nested_to_dict", return_value=sample_default_config_dict):
+ with (
+ patch("th_cli.commands.run_tests.get_client", return_value=mock_api_client),
+ patch("th_cli.commands.run_tests.AsyncApis", return_value=mock_async_apis),
+ patch("th_cli.commands.run_tests.test_logging.configure_logger_for_run", return_value="./test.log"),
+ patch("th_cli.commands.run_tests.TestRunSocket") as mock_socket_class,
+ patch("th_cli.commands.run_tests.convert_nested_to_dict", return_value=sample_default_config_dict),
+ ):
mock_socket = Mock()
mock_socket.connect_websocket = AsyncMock()
mock_socket_class.return_value = mock_socket
# Act
- result = cli_runner.invoke(run_tests, [
- "--tests-list", "TC-ACE-1.1",
- "--", "--int-arg", "endpoint:2"
- ])
+ result = cli_runner.invoke(run_tests, ["--tests-list", "TC-ACE-1.1", "--", "--int-arg", "endpoint:2"])
# Assert
assert result.exit_code == 0
@@ -918,23 +898,32 @@ def test_run_tests_with_multiple_extra_args(
cli_api.return_value = sample_test_run_execution
id_start.return_value = sample_test_run_execution
- with patch("th_cli.commands.run_tests.get_client", return_value=mock_api_client), \
- patch("th_cli.commands.run_tests.AsyncApis", return_value=mock_async_apis), \
- patch("th_cli.commands.run_tests.test_logging.configure_logger_for_run", return_value="./test.log"), \
- patch("th_cli.commands.run_tests.TestRunSocket") as mock_socket_class, \
- patch("th_cli.commands.run_tests.convert_nested_to_dict", return_value=sample_default_config_dict):
+ with (
+ patch("th_cli.commands.run_tests.get_client", return_value=mock_api_client),
+ patch("th_cli.commands.run_tests.AsyncApis", return_value=mock_async_apis),
+ patch("th_cli.commands.run_tests.test_logging.configure_logger_for_run", return_value="./test.log"),
+ patch("th_cli.commands.run_tests.TestRunSocket") as mock_socket_class,
+ patch("th_cli.commands.run_tests.convert_nested_to_dict", return_value=sample_default_config_dict),
+ ):
mock_socket = Mock()
mock_socket.connect_websocket = AsyncMock()
mock_socket_class.return_value = mock_socket
# Act
- result = cli_runner.invoke(run_tests, [
- "--tests-list", "TC-ACE-1.1",
- "--",
- "--int-arg", "endpoint:2",
- "--bool-arg", "flag:true",
- "--string-arg", "discriminator:1234"
- ])
+ result = cli_runner.invoke(
+ run_tests,
+ [
+ "--tests-list",
+ "TC-ACE-1.1",
+ "--",
+ "--int-arg",
+ "endpoint:2",
+ "--bool-arg",
+ "flag:true",
+ "--string-arg",
+ "discriminator:1234",
+ ],
+ )
# Assert
assert result.exit_code == 0
@@ -964,19 +953,19 @@ def test_run_tests_without_extra_args(
cli_api.return_value = sample_test_run_execution
id_start.return_value = sample_test_run_execution
- with patch("th_cli.commands.run_tests.get_client", return_value=mock_api_client), \
- patch("th_cli.commands.run_tests.AsyncApis", return_value=mock_async_apis), \
- patch("th_cli.commands.run_tests.test_logging.configure_logger_for_run", return_value="./test.log"), \
- patch("th_cli.commands.run_tests.TestRunSocket") as mock_socket_class, \
- patch("th_cli.commands.run_tests.convert_nested_to_dict", return_value=sample_default_config_dict):
+ with (
+ patch("th_cli.commands.run_tests.get_client", return_value=mock_api_client),
+ patch("th_cli.commands.run_tests.AsyncApis", return_value=mock_async_apis),
+ patch("th_cli.commands.run_tests.test_logging.configure_logger_for_run", return_value="./test.log"),
+ patch("th_cli.commands.run_tests.TestRunSocket") as mock_socket_class,
+ patch("th_cli.commands.run_tests.convert_nested_to_dict", return_value=sample_default_config_dict),
+ ):
mock_socket = Mock()
mock_socket.connect_websocket = AsyncMock()
mock_socket_class.return_value = mock_socket
# Act
- result = cli_runner.invoke(run_tests, [
- "--tests-list", "TC-ACE-1.1"
- ])
+ result = cli_runner.invoke(run_tests, ["--tests-list", "TC-ACE-1.1"])
# Assert
assert result.exit_code == 0
@@ -1006,21 +995,22 @@ def test_run_tests_extra_args_with_config_file(
cli_api.return_value = sample_test_run_execution
id_start.return_value = sample_test_run_execution
- with patch("th_cli.commands.run_tests.get_client", return_value=mock_api_client), \
- patch("th_cli.commands.run_tests.AsyncApis", return_value=mock_async_apis), \
- patch("th_cli.commands.run_tests.test_logging.configure_logger_for_run", return_value="./test.log"), \
- patch("th_cli.commands.run_tests.TestRunSocket") as mock_socket_class, \
- patch("th_cli.commands.run_tests.convert_nested_to_dict", return_value=sample_default_config_dict):
+ with (
+ patch("th_cli.commands.run_tests.get_client", return_value=mock_api_client),
+ patch("th_cli.commands.run_tests.AsyncApis", return_value=mock_async_apis),
+ patch("th_cli.commands.run_tests.test_logging.configure_logger_for_run", return_value="./test.log"),
+ patch("th_cli.commands.run_tests.TestRunSocket") as mock_socket_class,
+ patch("th_cli.commands.run_tests.convert_nested_to_dict", return_value=sample_default_config_dict),
+ ):
mock_socket = Mock()
mock_socket.connect_websocket = AsyncMock()
mock_socket_class.return_value = mock_socket
# Act
- result = cli_runner.invoke(run_tests, [
- "--tests-list", "TC-ACE-1.1",
- "--config", str(mock_json_config_file),
- "--", "--int-arg", "endpoint:2"
- ])
+ result = cli_runner.invoke(
+ run_tests,
+ ["--tests-list", "TC-ACE-1.1", "--config", str(mock_json_config_file), "--", "--int-arg", "endpoint:2"],
+ )
# Assert
assert result.exit_code == 0
@@ -1049,12 +1039,14 @@ def test_run_tests_verify_deep_copy_isolation(
cli_api.return_value = sample_test_run_execution
id_start.return_value = sample_test_run_execution
- with patch("th_cli.commands.run_tests.get_client", return_value=mock_api_client), \
- patch("th_cli.commands.run_tests.AsyncApis", return_value=mock_async_apis), \
- patch("th_cli.commands.run_tests.test_logging.configure_logger_for_run", return_value="./test.log"), \
- patch("th_cli.commands.run_tests.TestRunSocket") as mock_socket_class, \
- patch("th_cli.commands.run_tests.convert_nested_to_dict", return_value=sample_default_config_dict), \
- patch("th_cli.commands.run_tests.copy.deepcopy") as mock_deepcopy:
+ with (
+ patch("th_cli.commands.run_tests.get_client", return_value=mock_api_client),
+ patch("th_cli.commands.run_tests.AsyncApis", return_value=mock_async_apis),
+ patch("th_cli.commands.run_tests.test_logging.configure_logger_for_run", return_value="./test.log"),
+ patch("th_cli.commands.run_tests.TestRunSocket") as mock_socket_class,
+ patch("th_cli.commands.run_tests.convert_nested_to_dict", return_value=sample_default_config_dict),
+ patch("th_cli.commands.run_tests.copy.deepcopy") as mock_deepcopy,
+ ):
# Configure deepcopy to return a new dict
mock_deepcopy.return_value = dict(sample_default_config_dict)
@@ -1064,10 +1056,7 @@ def test_run_tests_verify_deep_copy_isolation(
mock_socket_class.return_value = mock_socket
# Act
- result = cli_runner.invoke(run_tests, [
- "--tests-list", "TC-ACE-1.1",
- "--", "--int-arg", "endpoint:2"
- ])
+ result = cli_runner.invoke(run_tests, ["--tests-list", "TC-ACE-1.1", "--", "--int-arg", "endpoint:2"])
# Assert
assert result.exit_code == 0
diff --git a/tests/test_test_run_execution.py b/tests/test_test_run_execution.py
index 3f1abbd..f66c251 100644
--- a/tests/test_test_run_execution.py
+++ b/tests/test_test_run_execution.py
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2025 Project CHIP Authors
+# Copyright (c) 2025-2026 Project CHIP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -19,7 +19,6 @@
import pytest
from click.testing import CliRunner
-from httpx import Headers
from th_cli.api_lib_autogen import models as api_models
from th_cli.api_lib_autogen.exceptions import UnexpectedResponse
@@ -33,26 +32,18 @@ class TestTestRunExecutionCommand:
"""Test cases for the test_run_execution command."""
def test_test_run_execution_success_all(
- self,
- cli_runner: CliRunner,
- mock_sync_apis: Mock,
- mock_api_client: Mock
+ self, cli_runner: CliRunner, mock_sync_apis: Mock, mock_api_client: Mock
) -> None:
"""Test successful test run execution history retrieval (all executions)."""
# Arrange
test_executions = [
- api_models.TestRunExecution(
- id=1,
- title="Test Run 1",
- state=api_models.TestStateEnum.passed,
- project_id=1
- ),
+ api_models.TestRunExecution(id=1, title="Test Run 1", state=api_models.TestStateEnum.passed, project_id=1),
api_models.TestRunExecution(
id=2,
title="Test Run 2",
state=api_models.TestStateEnum.failed,
project_id=1,
- )
+ ),
]
api = mock_sync_apis.test_run_executions_api.read_test_run_executions_api_v1_test_run_executions__get
@@ -81,10 +72,7 @@ def test_test_run_execution_success_specific_id(
"""Test successful test run execution history retrieval for specific ID."""
# Arrange
test_execution = api_models.TestRunExecution(
- id=1,
- title="Specific Test Run",
- state=api_models.TestStateEnum.executing,
- project_id=1
+ id=1, title="Specific Test Run", state=api_models.TestStateEnum.executing, project_id=1
)
api = mock_sync_apis.test_run_executions_api.read_test_run_execution_api_v1_test_run_executions__id__get
@@ -107,12 +95,7 @@ def test_test_run_execution_success_with_pagination(
"""Test successful test run execution history retrieval with pagination."""
# Arrange
test_executions = [
- api_models.TestRunExecution(
- id=3,
- title="Test Run 3",
- state=api_models.TestStateEnum.pending,
- project_id=1
- )
+ api_models.TestRunExecution(id=3, title="Test Run 3", state=api_models.TestStateEnum.pending, project_id=1)
]
api = mock_sync_apis.test_run_executions_api.read_test_run_executions_api_v1_test_run_executions__get
@@ -134,10 +117,7 @@ def test_test_run_execution_success_json_output(
"""Test successful test run execution history retrieval with JSON output."""
# Arrange
test_execution = api_models.TestRunExecution(
- id=1,
- title="JSON Test Run",
- state=api_models.TestStateEnum.passed,
- project_id=1
+ id=1, title="JSON Test Run", state=api_models.TestStateEnum.passed, project_id=1
)
api = mock_sync_apis.test_run_executions_api.read_test_run_execution_api_v1_test_run_executions__id__get
@@ -158,8 +138,10 @@ def test_test_run_execution_success_json_output(
def test_test_run_execution_configuration_error(self, cli_runner: CliRunner) -> None:
"""Test test run execution history with configuration error."""
# Arrange
- with patch("th_cli.commands.test_run_execution.get_client",
- side_effect=ConfigurationError("Could not connect to server")):
+ with patch(
+ "th_cli.commands.test_run_execution.get_client",
+ side_effect=ConfigurationError("Could not connect to server"),
+ ):
# Act
result = cli_runner.invoke(test_run_execution)
@@ -168,10 +150,7 @@ def test_test_run_execution_configuration_error(self, cli_runner: CliRunner) ->
assert "Error: Could not connect to server" in result.output
def test_test_run_execution_api_error_by_id(
- self,
- cli_runner: CliRunner,
- mock_sync_apis: Mock,
- mock_api_client: Mock
+ self, cli_runner: CliRunner, mock_sync_apis: Mock, mock_api_client: Mock
) -> None:
"""Test test run execution history with API error when fetching by ID."""
# Arrange
@@ -214,16 +193,11 @@ def test_test_run_execution_api_error_batch(
assert result.exit_code == 1
assert "Error: Failed to get test run executions (Status: 500) - Internal Server Error" in result.output
- def test_test_run_execution_client_cleanup_on_exception(
- self,
- cli_runner: CliRunner,
- mock_api_client: Mock
- ) -> None:
+ def test_test_run_execution_client_cleanup_on_exception(self, cli_runner: CliRunner, mock_api_client: Mock) -> None:
"""Test that client is properly cleaned up even when an exception occurs."""
# Arrange
with patch("th_cli.commands.test_run_execution.get_client", return_value=mock_api_client):
- with patch("th_cli.commands.test_run_execution.SyncApis",
- side_effect=Exception("API creation failed")):
+ with patch("th_cli.commands.test_run_execution.SyncApis", side_effect=Exception("API creation failed")):
# Act
result = cli_runner.invoke(test_run_execution)
@@ -245,30 +219,24 @@ def test_test_run_execution_help_message(self, cli_runner: CliRunner) -> None:
assert "--project-id" in result.output
assert "--json" in result.output
- @pytest.mark.parametrize("state,expected_display", [
- (api_models.TestStateEnum.pending, "PENDING"),
- (api_models.TestStateEnum.executing, "EXECUTING"),
- (api_models.TestStateEnum.passed, "PASSED"),
- (api_models.TestStateEnum.failed, "FAILED"),
- (api_models.TestStateEnum.error, "ERROR"),
- (api_models.TestStateEnum.cancelled, "CANCELLED"),
- (api_models.TestStateEnum.not_applicable, "NOT_APPLICABLE"),
- ])
+ @pytest.mark.parametrize(
+ "state,expected_display",
+ [
+ (api_models.TestStateEnum.pending, "PENDING"),
+ (api_models.TestStateEnum.executing, "EXECUTING"),
+ (api_models.TestStateEnum.passed, "PASSED"),
+ (api_models.TestStateEnum.failed, "FAILED"),
+ (api_models.TestStateEnum.error, "ERROR"),
+ (api_models.TestStateEnum.cancelled, "CANCELLED"),
+ (api_models.TestStateEnum.not_applicable, "NOT_APPLICABLE"),
+ ],
+ )
def test_test_run_execution_various_states(
- self,
- cli_runner: CliRunner,
- mock_sync_apis: Mock,
- state: api_models.TestStateEnum,
- expected_display: str
+ self, cli_runner: CliRunner, mock_sync_apis: Mock, state: api_models.TestStateEnum, expected_display: str
) -> None:
"""Test test run execution history with various execution states."""
# Arrange
- test_execution = api_models.TestRunExecution(
- id=1,
- title="State Test Run",
- state=state,
- project_id=1
- )
+ test_execution = api_models.TestRunExecution(id=1, title="State Test Run", state=state, project_id=1)
api = mock_sync_apis.test_run_executions_api.read_test_run_execution_api_v1_test_run_executions__id__get
api.return_value = test_execution
@@ -292,14 +260,9 @@ def test_test_run_execution_table_output_format(
id=1,
title="Long Test Run Title That Should Be Formatted Properly",
state=api_models.TestStateEnum.passed,
- project_id=1
+ project_id=1,
),
- api_models.TestRunExecution(
- id=2,
- title="Short Title",
- state=api_models.TestStateEnum.passed,
- project_id=1
- )
+ api_models.TestRunExecution(id=2, title="Short Title", state=api_models.TestStateEnum.passed, project_id=1),
]
api = mock_sync_apis.test_run_executions_api.read_test_run_executions_api_v1_test_run_executions__get
@@ -311,7 +274,7 @@ def test_test_run_execution_table_output_format(
# Assert
assert result.exit_code == 0
# Check for proper table formatting
- lines = result.output.strip().split('\n')
+ lines = result.output.strip().split("\n")
# Should have header line and at least two data lines
assert len(lines) >= 3
# Header should be present
@@ -320,27 +283,23 @@ def test_test_run_execution_table_output_format(
assert any("Long Test Run Title" in line for line in lines)
assert any("Short Title" in line for line in lines)
- @pytest.mark.parametrize("skip,limit", [
- (None, None),
- (0, 10),
- (5, 20),
- (100, 1),
- ])
+ @pytest.mark.parametrize(
+ "skip,limit",
+ [
+ (None, None),
+ (0, 10),
+ (5, 20),
+ (100, 1),
+ ],
+ )
def test_test_run_execution_pagination_parameters(
- self,
- cli_runner: CliRunner,
- mock_sync_apis: Mock,
- skip: int,
- limit: int
+ self, cli_runner: CliRunner, mock_sync_apis: Mock, skip: int, limit: int
) -> None:
"""Test test run execution history with various pagination parameters."""
# Arrange
test_executions = [
api_models.TestRunExecution(
- id=1,
- title="Paginated Test Run",
- state=api_models.TestStateEnum.passed,
- project_id=1
+ id=1, title="Paginated Test Run", state=api_models.TestStateEnum.passed, project_id=1
)
]
api = mock_sync_apis.test_run_executions_api.read_test_run_executions_api_v1_test_run_executions__get
@@ -367,10 +326,7 @@ def test_test_run_execution_error_display(
"""Test that error state is properly displayed in the State column."""
# Arrange
test_execution = api_models.TestRunExecution(
- id=1,
- title="Failed Test Run",
- state=api_models.TestStateEnum.error,
- project_id=1
+ id=1, title="Failed Test Run", state=api_models.TestStateEnum.error, project_id=1
)
api = mock_sync_apis.test_run_executions_api.read_test_run_execution_api_v1_test_run_executions__id__get
@@ -391,10 +347,7 @@ def test_test_run_execution_passed_state_display(
"""Test that PASSED state is properly displayed."""
# Arrange
test_execution = api_models.TestRunExecution(
- id=1,
- title="Successful Test Run",
- state=api_models.TestStateEnum.passed,
- project_id=1
+ id=1, title="Successful Test Run", state=api_models.TestStateEnum.passed, project_id=1
)
api = mock_sync_apis.test_run_executions_api.read_test_run_execution_api_v1_test_run_executions__id__get
@@ -409,19 +362,13 @@ def test_test_run_execution_passed_state_display(
@pytest.mark.parametrize("json_flag", [True, False])
def test_test_run_execution_output_modes(
- self,
- cli_runner: CliRunner,
- mock_sync_apis: Mock,
- json_flag: bool
+ self, cli_runner: CliRunner, mock_sync_apis: Mock, json_flag: bool
) -> None:
"""Test test run execution history with both table and JSON output modes."""
# Arrange
test_executions = [
api_models.TestRunExecution(
- id=1,
- title="Output Mode Test",
- state=api_models.TestStateEnum.passed,
- project_id=1
+ id=1, title="Output Mode Test", state=api_models.TestStateEnum.passed, project_id=1
)
]
api = mock_sync_apis.test_run_executions_api.read_test_run_executions_api_v1_test_run_executions__get
@@ -512,8 +459,10 @@ def test_test_run_execution_log_success_empty_content(
def test_test_run_execution_log_configuration_error(self, cli_runner: CliRunner) -> None:
"""Test test run execution log with configuration error."""
# Arrange
- with patch("th_cli.commands.test_run_execution.get_client",
- side_effect=ConfigurationError("Could not connect to server")):
+ with patch(
+ "th_cli.commands.test_run_execution.get_client",
+ side_effect=ConfigurationError("Could not connect to server"),
+ ):
# Act
result = cli_runner.invoke(test_run_execution, ["--id", "123", "--log"])
@@ -635,14 +584,11 @@ def test_test_run_execution_log_large_content(
assert "Log line 1:" in result.output
assert "Log line 999:" in result.output
# Verify that we can handle large content without truncation
- assert len(result.output.split('\n')) >= 1000
+ assert len(result.output.split("\n")) >= 1000
@pytest.mark.parametrize("test_id", ["1", "123", "999", "12345"])
def test_test_run_execution_log_various_ids(
- self,
- cli_runner: CliRunner,
- mock_sync_apis: Mock,
- test_id: str
+ self, cli_runner: CliRunner, mock_sync_apis: Mock, test_id: str
) -> None:
"""Test test run execution log with various ID values."""
# Arrange
@@ -657,30 +603,27 @@ def test_test_run_execution_log_various_ids(
# Assert
assert result.exit_code == 0
assert f"Log for test run execution ID: {test_id}" in result.output
- api.assert_called_once_with(
- id=int(test_id), json_entries=False, download=False
- )
-
- @pytest.mark.parametrize("status_code,content", [
- (400, "Bad Request"),
- (401, "Unauthorized"),
- (403, "Forbidden"),
- (404, "Test run execution not found"),
- (500, "Internal Server Error"),
- (503, "Service Unavailable")
- ])
+ api.assert_called_once_with(id=int(test_id), json_entries=False, download=False)
+
+ @pytest.mark.parametrize(
+ "status_code,content",
+ [
+ (400, "Bad Request"),
+ (401, "Unauthorized"),
+ (403, "Forbidden"),
+ (404, "Test run execution not found"),
+ (500, "Internal Server Error"),
+ (503, "Service Unavailable"),
+ ],
+ )
def test_test_run_execution_log_various_api_errors(
- self,
- cli_runner: CliRunner,
- mock_sync_apis: Mock,
- status_code: int,
- content: str
+ self, cli_runner: CliRunner, mock_sync_apis: Mock, status_code: int, content: str
) -> None:
"""Test test run execution log with various API error status codes."""
# Arrange
api_exception = UnexpectedResponse(
status_code=status_code,
- content=content.encode('utf-8'),
+ content=content.encode("utf-8"),
)
api = mock_sync_apis.test_run_executions_api.download_log_api_v1_test_run_executions__id__log_get
@@ -695,10 +638,7 @@ def test_test_run_execution_log_various_api_errors(
assert content in result.output
def test_test_run_execution_log_client_context_manager(
- self,
- cli_runner: CliRunner,
- mock_sync_apis: Mock,
- mock_api_client: Mock
+ self, cli_runner: CliRunner, mock_sync_apis: Mock, mock_api_client: Mock
) -> None:
"""Test that client is properly managed using context manager."""
# Arrange
@@ -734,9 +674,7 @@ def test_test_run_execution_log_api_parameters(
# Assert
assert result.exit_code == 0
# Verify the API is called with correct parameters
- api.assert_called_once_with(
- id=42, json_entries=False, download=False
- )
+ api.assert_called_once_with(id=42, json_entries=False, download=False)
def test_test_run_execution_log_whitespace_content(
self,
@@ -777,26 +715,17 @@ def test_test_run_execution_log_generic_exception(
assert "Network timeout" in str(result.exception)
def test_test_run_execution_sort_parameter_asc(
- self,
- cli_runner: CliRunner,
- mock_sync_apis: Mock,
- mock_api_client: Mock
+ self, cli_runner: CliRunner, mock_sync_apis: Mock, mock_api_client: Mock
) -> None:
"""Test test run execution with sort parameter set to asc."""
# Arrange
test_executions = [
api_models.TestRunExecution(
- id=1,
- title="Old Test Run",
- state=api_models.TestStateEnum.passed,
- project_id=1
+ id=1, title="Old Test Run", state=api_models.TestStateEnum.passed, project_id=1
),
api_models.TestRunExecution(
- id=2,
- title="New Test Run",
- state=api_models.TestStateEnum.passed,
- project_id=1
- )
+ id=2, title="New Test Run", state=api_models.TestStateEnum.passed, project_id=1
+ ),
]
api = mock_sync_apis.test_run_executions_api.read_test_run_executions_api_v1_test_run_executions__get
api.return_value = test_executions
@@ -813,26 +742,17 @@ def test_test_run_execution_sort_parameter_asc(
mock_api_client.close.assert_called_once()
def test_test_run_execution_sort_parameter_desc_default(
- self,
- cli_runner: CliRunner,
- mock_sync_apis: Mock,
- mock_api_client: Mock
+ self, cli_runner: CliRunner, mock_sync_apis: Mock, mock_api_client: Mock
) -> None:
"""Test test run execution with sort parameter default (desc)."""
# Arrange
test_executions = [
api_models.TestRunExecution(
- id=2,
- title="New Test Run",
- state=api_models.TestStateEnum.passed,
- project_id=1
+ id=2, title="New Test Run", state=api_models.TestStateEnum.passed, project_id=1
),
api_models.TestRunExecution(
- id=1,
- title="Old Test Run",
- state=api_models.TestStateEnum.passed,
- project_id=1
- )
+ id=1, title="Old Test Run", state=api_models.TestStateEnum.passed, project_id=1
+ ),
]
api = mock_sync_apis.test_run_executions_api.read_test_run_executions_api_v1_test_run_executions__get
api.return_value = test_executions
@@ -850,26 +770,17 @@ def test_test_run_execution_sort_parameter_desc_default(
mock_api_client.close.assert_called_once()
def test_test_run_execution_sort_parameter_explicit_desc(
- self,
- cli_runner: CliRunner,
- mock_sync_apis: Mock,
- mock_api_client: Mock
+ self, cli_runner: CliRunner, mock_sync_apis: Mock, mock_api_client: Mock
) -> None:
"""Test test run execution with sort parameter explicitly set to desc."""
# Arrange
test_executions = [
api_models.TestRunExecution(
- id=2,
- title="New Test Run",
- state=api_models.TestStateEnum.passed,
- project_id=1
+ id=2, title="New Test Run", state=api_models.TestStateEnum.passed, project_id=1
),
api_models.TestRunExecution(
- id=1,
- title="Old Test Run",
- state=api_models.TestStateEnum.passed,
- project_id=1
- )
+ id=1, title="Old Test Run", state=api_models.TestStateEnum.passed, project_id=1
+ ),
]
api = mock_sync_apis.test_run_executions_api.read_test_run_executions_api_v1_test_run_executions__get
api.return_value = test_executions
@@ -887,26 +798,18 @@ def test_test_run_execution_sort_parameter_explicit_desc(
mock_api_client.close.assert_called_once()
def test_test_run_execution_all_flag(
- self,
- cli_runner: CliRunner,
- mock_sync_apis: Mock,
- mock_api_client: Mock
+ self, cli_runner: CliRunner, mock_sync_apis: Mock, mock_api_client: Mock
) -> None:
"""Test test run execution with --all flag."""
# Arrange
test_executions = [
- api_models.TestRunExecution(
- id=1,
- title="Test Run 1",
- state=api_models.TestStateEnum.passed,
- project_id=1
- ),
+ api_models.TestRunExecution(id=1, title="Test Run 1", state=api_models.TestStateEnum.passed, project_id=1),
api_models.TestRunExecution(
id=2,
title="Test Run 2",
state=api_models.TestStateEnum.failed,
project_id=1,
- )
+ ),
]
api = mock_sync_apis.test_run_executions_api.read_test_run_executions_api_v1_test_run_executions__get
api.return_value = test_executions
@@ -958,19 +861,13 @@ def test_test_run_execution_help_shows_all_option(self, cli_runner: CliRunner) -
assert "(cannot be used with --limit)" in result.output
def test_test_run_execution_with_project_id(
- self,
- cli_runner: CliRunner,
- mock_sync_apis: Mock,
- mock_api_client: Mock
+ self, cli_runner: CliRunner, mock_sync_apis: Mock, mock_api_client: Mock
) -> None:
"""Test test run execution history filtered by project ID."""
# Arrange
test_executions = [
api_models.TestRunExecution(
- id=1,
- title="Project 5 Test Run",
- state=api_models.TestStateEnum.passed,
- project_id=5
+ id=1, title="Project 5 Test Run", state=api_models.TestStateEnum.passed, project_id=5
)
]
api = mock_sync_apis.test_run_executions_api.read_test_run_executions_api_v1_test_run_executions__get
@@ -988,19 +885,13 @@ def test_test_run_execution_with_project_id(
mock_api_client.close.assert_called_once()
def test_test_run_execution_with_project_id_short_form(
- self,
- cli_runner: CliRunner,
- mock_sync_apis: Mock,
- mock_api_client: Mock
+ self, cli_runner: CliRunner, mock_sync_apis: Mock, mock_api_client: Mock
) -> None:
"""Test test run execution history filtered by project ID using short form."""
# Arrange
test_executions = [
api_models.TestRunExecution(
- id=1,
- title="Project 10 Test Run",
- state=api_models.TestStateEnum.passed,
- project_id=10
+ id=1, title="Project 10 Test Run", state=api_models.TestStateEnum.passed, project_id=10
)
]
api = mock_sync_apis.test_run_executions_api.read_test_run_executions_api_v1_test_run_executions__get
@@ -1018,19 +909,13 @@ def test_test_run_execution_with_project_id_short_form(
mock_api_client.close.assert_called_once()
def test_test_run_execution_with_project_id_and_pagination(
- self,
- cli_runner: CliRunner,
- mock_sync_apis: Mock,
- mock_api_client: Mock
+ self, cli_runner: CliRunner, mock_sync_apis: Mock, mock_api_client: Mock
) -> None:
"""Test test run execution with project ID combined with pagination."""
# Arrange
test_executions = [
api_models.TestRunExecution(
- id=3,
- title="Filtered Paginated Test Run",
- state=api_models.TestStateEnum.passed,
- project_id=7
+ id=3, title="Filtered Paginated Test Run", state=api_models.TestStateEnum.passed, project_id=7
)
]
api = mock_sync_apis.test_run_executions_api.read_test_run_executions_api_v1_test_run_executions__get
@@ -1048,20 +933,12 @@ def test_test_run_execution_with_project_id_and_pagination(
mock_api_client.close.assert_called_once()
def test_test_run_execution_with_project_id_and_sort(
- self,
- cli_runner: CliRunner,
- mock_sync_apis: Mock,
- mock_api_client: Mock
+ self, cli_runner: CliRunner, mock_sync_apis: Mock, mock_api_client: Mock
) -> None:
"""Test test run execution with project ID combined with sort order."""
# Arrange
test_executions = [
- api_models.TestRunExecution(
- id=1,
- title="Old Test Run",
- state=api_models.TestStateEnum.passed,
- project_id=3
- )
+ api_models.TestRunExecution(id=1, title="Old Test Run", state=api_models.TestStateEnum.passed, project_id=3)
]
api = mock_sync_apis.test_run_executions_api.read_test_run_executions_api_v1_test_run_executions__get
api.return_value = test_executions
diff --git a/tests/test_test_runner_status.py b/tests/test_test_runner_status.py
index 36b60d3..0258ba9 100644
--- a/tests/test_test_runner_status.py
+++ b/tests/test_test_runner_status.py
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2025 Project CHIP Authors
+# Copyright (c) 2025-2026 Project CHIP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -31,17 +31,11 @@ class TestTestRunnerStatusCommand:
"""Test cases for the test_runner_status command."""
def test_test_runner_status_success_idle(
- self,
- cli_runner: CliRunner,
- mock_sync_apis: Mock,
- mock_api_client: Mock
+ self, cli_runner: CliRunner, mock_sync_apis: Mock, mock_api_client: Mock
) -> None:
"""Test successful test runner status retrieval when idle."""
# Arrange
- status = api_models.TestRunnerStatus(
- state=api_models.TestRunnerState.idle,
- test_run_execution_id=None
- )
+ status = api_models.TestRunnerStatus(state=api_models.TestRunnerState.idle, test_run_execution_id=None)
api = mock_sync_apis.test_run_executions_api.get_test_runner_status_api_v1_test_run_executions_status_get
api.return_value = status
@@ -65,10 +59,7 @@ def test_test_runner_status_success_running(
) -> None:
"""Test successful test runner status retrieval when running."""
# Arrange
- status = api_models.TestRunnerStatus(
- state=api_models.TestRunnerState.running,
- test_run_execution_id=123
- )
+ status = api_models.TestRunnerStatus(state=api_models.TestRunnerState.running, test_run_execution_id=123)
api = mock_sync_apis.test_run_executions_api.get_test_runner_status_api_v1_test_run_executions_status_get
api.return_value = status
@@ -90,10 +81,7 @@ def test_test_runner_status_success_json_output(
) -> None:
"""Test successful test runner status retrieval with JSON output."""
# Arrange
- status = api_models.TestRunnerStatus(
- state=api_models.TestRunnerState.ready,
- test_run_execution_id=None
- )
+ status = api_models.TestRunnerStatus(state=api_models.TestRunnerState.ready, test_run_execution_id=None)
api = mock_sync_apis.test_run_executions_api.get_test_runner_status_api_v1_test_run_executions_status_get
api.return_value = status
@@ -114,7 +102,7 @@ def test_test_runner_status_configuration_error(self, cli_runner: CliRunner) ->
# Arrange
with patch(
"th_cli.commands.test_runner_status.get_client",
- side_effect=ConfigurationError("Could not connect to server")
+ side_effect=ConfigurationError("Could not connect to server"),
):
# Act
result = cli_runner.invoke(test_runner_status)
@@ -124,10 +112,7 @@ def test_test_runner_status_configuration_error(self, cli_runner: CliRunner) ->
assert "Error: Could not connect to server" in result.output
def test_test_runner_status_generic_exception(
- self,
- cli_runner: CliRunner,
- mock_sync_apis: Mock,
- mock_api_client: Mock
+ self, cli_runner: CliRunner, mock_sync_apis: Mock, mock_api_client: Mock
) -> None:
"""Test test runner status with generic exception."""
# Arrange
@@ -167,26 +152,26 @@ def test_test_runner_status_help_message(self, cli_runner: CliRunner) -> None:
assert "--json" in result.output
assert "Print JSON response for more details" in result.output
- @pytest.mark.parametrize("state,execution_id,expected_state", [
- (api_models.TestRunnerState.idle, None, "IDLE"),
- (api_models.TestRunnerState.loading, None, "LOADING"),
- (api_models.TestRunnerState.ready, None, "READY"),
- (api_models.TestRunnerState.running, 456, "RUNNING"),
- ])
+ @pytest.mark.parametrize(
+ "state,execution_id,expected_state",
+ [
+ (api_models.TestRunnerState.idle, None, "IDLE"),
+ (api_models.TestRunnerState.loading, None, "LOADING"),
+ (api_models.TestRunnerState.ready, None, "READY"),
+ (api_models.TestRunnerState.running, 456, "RUNNING"),
+ ],
+ )
def test_test_runner_status_various_states(
self,
cli_runner: CliRunner,
mock_sync_apis: Mock,
state: api_models.TestRunnerState,
execution_id: int,
- expected_state: str
+ expected_state: str,
) -> None:
"""Test test runner status with various states."""
# Arrange
- status = api_models.TestRunnerStatus(
- state=state,
- test_run_execution_id=execution_id
- )
+ status = api_models.TestRunnerStatus(state=state, test_run_execution_id=execution_id)
api = mock_sync_apis.test_run_executions_api.get_test_runner_status_api_v1_test_run_executions_status_get
api.return_value = status
@@ -209,10 +194,7 @@ def test_test_runner_status_output_format_consistency(
) -> None:
"""Test that output format is consistent and well-formatted."""
# Arrange
- status = api_models.TestRunnerStatus(
- state=api_models.TestRunnerState.running,
- test_run_execution_id=789
- )
+ status = api_models.TestRunnerStatus(state=api_models.TestRunnerState.running, test_run_execution_id=789)
api = mock_sync_apis.test_run_executions_api.get_test_runner_status_api_v1_test_run_executions_status_get
api.return_value = status
@@ -223,7 +205,7 @@ def test_test_runner_status_output_format_consistency(
# Assert
assert result.exit_code == 0
# Check for proper formatting structure
- lines = result.output.strip().split('\n')
+ lines = result.output.strip().split("\n")
# Should have empty line, header, state line, and execution id line
assert len(lines) >= 3
assert any("Matter Test Runner Status" in line for line in lines)
@@ -232,17 +214,11 @@ def test_test_runner_status_output_format_consistency(
@pytest.mark.parametrize("json_flag", [True, False])
def test_test_runner_status_output_modes(
- self,
- cli_runner: CliRunner,
- mock_sync_apis: Mock,
- json_flag: bool
+ self, cli_runner: CliRunner, mock_sync_apis: Mock, json_flag: bool
) -> None:
"""Test test runner status with both table and JSON output modes."""
# Arrange
- status = api_models.TestRunnerStatus(
- state=api_models.TestRunnerState.idle,
- test_run_execution_id=None
- )
+ status = api_models.TestRunnerStatus(state=api_models.TestRunnerState.idle, test_run_execution_id=None)
api = mock_sync_apis.test_run_executions_api.get_test_runner_status_api_v1_test_run_executions_status_get
api.return_value = status
@@ -271,10 +247,7 @@ def test_test_runner_status_state_display_formatting(
) -> None:
"""Test that state is properly formatted with colorization."""
# Arrange
- status = api_models.TestRunnerStatus(
- state=api_models.TestRunnerState.running,
- test_run_execution_id=999
- )
+ status = api_models.TestRunnerStatus(state=api_models.TestRunnerState.running, test_run_execution_id=999)
api = mock_sync_apis.test_run_executions_api.get_test_runner_status_api_v1_test_run_executions_status_get
api.return_value = status
@@ -295,10 +268,7 @@ def test_test_runner_status_no_active_test_run_message(
) -> None:
"""Test the 'No active test run' message is displayed correctly."""
# Arrange
- status = api_models.TestRunnerStatus(
- state=api_models.TestRunnerState.ready,
- test_run_execution_id=None
- )
+ status = api_models.TestRunnerStatus(state=api_models.TestRunnerState.ready, test_run_execution_id=None)
api = mock_sync_apis.test_run_executions_api.get_test_runner_status_api_v1_test_run_executions_status_get
api.return_value = status
diff --git a/tests/test_utils.py b/tests/test_utils.py
index 2ca4540..eab112b 100644
--- a/tests/test_utils.py
+++ b/tests/test_utils.py
@@ -15,14 +15,13 @@
#
"""Tests for utility functions in th_cli.utils module."""
+import json
from pathlib import Path
import pytest
from th_cli.api_lib_autogen import models as api_models
from th_cli.exceptions import CLIError
-import json
-
from th_cli.utils import (
build_test_selection,
convert_nested_to_dict,
@@ -91,8 +90,7 @@ def test_build_test_selection_empty_list(self, sample_test_collections: api_mode
assert len(result) == 0
def test_build_test_selection_whitespace_handling(
- self,
- sample_test_collections: api_models.TestCollections
+ self, sample_test_collections: api_models.TestCollections
) -> None:
"""Test test selection building handles whitespace in test IDs."""
# Arrange
@@ -111,6 +109,7 @@ class TestConvertNestedToDict:
def test_convert_nested_to_dict_simple_object(self) -> None:
"""Test converting simple object to dictionary."""
+
# Arrange
class SimpleObject:
def __init__(self):
@@ -129,6 +128,7 @@ def __init__(self):
def test_convert_nested_to_dict_nested_objects(self) -> None:
"""Test converting nested objects to dictionary."""
+
# Arrange
class InnerObject:
def __init__(self):
@@ -161,11 +161,7 @@ def test_convert_nested_to_dict_primitive_types(self) -> None:
def test_convert_nested_to_dict_collections(self) -> None:
"""Test converting collections (lists, dicts)."""
# Arrange
- data = {
- "list": [1, 2, 3],
- "dict": {"key": "value"},
- "tuple": (1, 2, 3)
- }
+ data = {"list": [1, 2, 3], "dict": {"key": "value"}, "tuple": (1, 2, 3)}
# Act
print(type(data))
@@ -180,6 +176,7 @@ def test_convert_nested_to_dict_collections(self) -> None:
def test_convert_nested_to_dict_circular_reference(self) -> None:
"""Test handling circular references."""
+
# Arrange
class CircularObject:
def __init__(self):
@@ -205,7 +202,7 @@ class TestParsePicsXml:
def test_parse_pics_xml_success(self) -> None:
"""Test successful PICS XML parsing."""
# Arrange
- xml_content = '''
+ xml_content = """
TestCluster
@@ -228,7 +225,7 @@ def test_parse_pics_xml_success(self) -> None:
-'''
+"""
# Act
result = parse_pics_xml(xml_content)
@@ -259,11 +256,11 @@ def test_parse_pics_xml_invalid_xml(self) -> None:
def test_parse_pics_xml_missing_elements(self) -> None:
"""Test PICS XML parsing with missing required elements."""
# Arrange
- incomplete_xml = '''
+ incomplete_xml = """
-'''
+"""
# Act & Assert
with pytest.raises(CLIError) as exc_info:
@@ -325,7 +322,7 @@ def test_read_pics_config_directory_with_non_xml_files(self, temp_dir: Path) ->
(pics_dir / "config.json").write_text('{"key": "value"}')
# Create valid XML file
- xml_content = '''
+ xml_content = """
ValidCluster
@@ -334,7 +331,7 @@ def test_read_pics_config_directory_with_non_xml_files(self, temp_dir: Path) ->
true
-'''
+"""
(pics_dir / "valid_cluster.xml").write_text(xml_content)
# Act
@@ -422,15 +419,7 @@ def test_load_json_config_empty_file(self, temp_dir: Path) -> None:
def test_load_json_config_nested_structure(self, temp_dir: Path) -> None:
"""Test JSON config loading with deeply nested structure."""
# Arrange
- config_data = {
- "level1": {
- "level2": {
- "level3": {
- "value": "deep"
- }
- }
- }
- }
+ config_data = {"level1": {"level2": {"level3": {"value": "deep"}}}}
config_file = temp_dir / "nested_config.json"
config_file.write_text(json.dumps(config_data))
@@ -449,7 +438,7 @@ def test_load_json_config_various_types(self, temp_dir: Path) -> None:
"boolean": True,
"null": None,
"array": [1, 2, 3],
- "object": {"key": "value"}
+ "object": {"key": "value"},
}
config_file = temp_dir / "types_config.json"
config_file.write_text(json.dumps(config_data))
@@ -471,10 +460,7 @@ def test_load_json_config_full_project_format(self, temp_dir: Path) -> None:
# Arrange
project_data = {
"name": "My Test Project",
- "config": {
- "network": {"wifi": {"ssid": "test_network"}},
- "dut_config": {"pairing_mode": "ble-wifi"}
- }
+ "config": {"network": {"wifi": {"ssid": "test_network"}}, "dut_config": {"pairing_mode": "ble-wifi"}},
}
config_file = temp_dir / "project_config.json"
config_file.write_text(json.dumps(project_data))
@@ -491,10 +477,7 @@ def test_load_json_config_full_project_format(self, temp_dir: Path) -> None:
def test_load_json_config_config_only_format(self, temp_dir: Path) -> None:
"""Test JSON config loading with config-only format (uses as-is)."""
# Arrange
- config_data = {
- "network": {"wifi": {"ssid": "test_network"}},
- "dut_config": {"pairing_mode": "ble-wifi"}
- }
+ config_data = {"network": {"wifi": {"ssid": "test_network"}}, "dut_config": {"pairing_mode": "ble-wifi"}}
config_file = temp_dir / "config_only.json"
config_file.write_text(json.dumps(config_data))
@@ -509,10 +492,7 @@ def test_load_json_config_config_only_format(self, temp_dir: Path) -> None:
def test_load_json_config_invalid_config_key_type(self, temp_dir: Path) -> None:
"""Test JSON config loading with invalid config key type."""
# Arrange
- invalid_data = {
- "name": "Project",
- "config": "not_a_dict" # config should be a dict, not a string
- }
+ invalid_data = {"name": "Project", "config": "not_a_dict"} # config should be a dict, not a string
config_file = temp_dir / "invalid_config_type.json"
config_file.write_text(json.dumps(invalid_data))
@@ -527,7 +507,7 @@ def test_load_json_config_non_dict_root(self, temp_dir: Path) -> None:
"""Test JSON config loading with non-dictionary root."""
# Arrange
config_file = temp_dir / "array_root.json"
- config_file.write_text('[1, 2, 3]') # Array instead of object
+ config_file.write_text("[1, 2, 3]") # Array instead of object
# Act & Assert
with pytest.raises(CLIError) as exc_info:
@@ -539,10 +519,7 @@ def test_load_json_config_non_dict_root(self, temp_dir: Path) -> None:
def test_load_json_config_format_compatibility(self, temp_dir: Path) -> None:
"""Test that both formats work for the same logical config."""
# Arrange
- config_content = {
- "network": {"wifi": {"ssid": "same_network"}},
- "dut_config": {"pairing_mode": "onnetwork"}
- }
+ config_content = {"network": {"wifi": {"ssid": "same_network"}}, "dut_config": {"pairing_mode": "onnetwork"}}
# Create config-only format file
config_only_file = temp_dir / "config_only.json"
@@ -582,17 +559,10 @@ def test_merge_configs_nested(self) -> None:
"""Test nested configuration merging."""
# Arrange
base = {
- "network": {
- "wifi": {"ssid": "default", "password": "default"},
- "thread": {"channel": 15}
- },
- "dut_config": {"pairing_mode": "onnetwork"}
- }
- override = {
- "network": {
- "wifi": {"ssid": "custom"}
- }
+ "network": {"wifi": {"ssid": "default", "password": "default"}, "thread": {"channel": 15}},
+ "dut_config": {"pairing_mode": "onnetwork"},
}
+ override = {"network": {"wifi": {"ssid": "custom"}}}
# Act
result = merge_configs(base, override)
@@ -706,38 +676,21 @@ def test_merge_configs_complex_scenario(self) -> None:
base = {
"network": {
"fabric_id": 0,
- "thread": {
- "channel": 15,
- "panid": "0x1234",
- "networkkey": "00112233445566778899aabbccddeeff"
- },
- "wifi": {
- "ssid": "default_network",
- "password": "default_pass"
- }
+ "thread": {"channel": 15, "panid": "0x1234", "networkkey": "00112233445566778899aabbccddeeff"},
+ "wifi": {"ssid": "default_network", "password": "default_pass"},
},
"dut_config": {
"pairing_mode": "onnetwork",
"setup_code": "20202021",
"discriminator": "3840",
- "trace_log": True
+ "trace_log": True,
},
- "test_parameters": {}
+ "test_parameters": {},
}
override = {
- "network": {
- "wifi": {
- "ssid": "my_network",
- "password": "my_pass"
- }
- },
- "dut_config": {
- "discriminator": "3402",
- "trace_log": False
- },
- "test_parameters": {
- "custom_param": "custom_value"
- }
+ "network": {"wifi": {"ssid": "my_network", "password": "my_pass"}},
+ "dut_config": {"discriminator": "3402", "trace_log": False},
+ "test_parameters": {"custom_param": "custom_value"},
}
# Act
@@ -778,6 +731,7 @@ def test_build_test_selection_case_insensitive(self, sample_test_collections: ap
def test_convert_nested_to_dict_special_attributes(self) -> None:
"""Test that special attributes are properly filtered."""
+
# Arrange
class ObjectWithSpecialAttrs:
def __init__(self):
@@ -799,7 +753,7 @@ def __init__(self):
def test_parse_pics_xml_empty_sections(self) -> None:
"""Test PICS XML parsing with empty sections."""
# Arrange
- xml_content = '''
+ xml_content = """
EmptyCluster
@@ -810,7 +764,7 @@ def test_parse_pics_xml_empty_sections(self) -> None:
-'''
+"""
# Act
result = parse_pics_xml(xml_content)
@@ -822,3 +776,84 @@ def test_parse_pics_xml_empty_sections(self) -> None:
assert "items" in result["clusters"]["EmptyCluster"]
# Should handle empty sections gracefully
assert isinstance(result["clusters"]["EmptyCluster"]["items"], dict)
+
+
+@pytest.mark.unit
+class TestBuildTestSelectionCaseInsensitive:
+ """Tests for the case-insensitive comparison introduced in fix/908 (#69).
+
+ Both the input IDs and the collection IDs are now normalised with
+ .upper() before comparison, so any combination of upper/lower/mixed
+ case must resolve to the correct test case.
+ """
+
+ def test_lowercase_input_matches_collection_entry(
+ self, sample_test_collections: api_models.TestCollections
+ ) -> None:
+ """All-lowercase input 'tc-ace-1.1' matches the collection entry 'TC-ACE-1.1'."""
+ result = build_test_selection(sample_test_collections, ["tc-ace-1.1"])
+
+ assert "SDK YAML Tests" in result
+ assert "FirstChipToolSuite" in result["SDK YAML Tests"]
+ assert "TC-ACE-1.1" in result["SDK YAML Tests"]["FirstChipToolSuite"]
+ assert result["SDK YAML Tests"]["FirstChipToolSuite"]["TC-ACE-1.1"] == 1
+
+ def test_mixed_case_input_matches_collection_entry(
+ self, sample_test_collections: api_models.TestCollections
+ ) -> None:
+ """Mixed-case input 'Tc-Ace-1.1' matches the collection entry 'TC-ACE-1.1'."""
+ result = build_test_selection(sample_test_collections, ["Tc-Ace-1.1"])
+
+ suite = result.get("SDK YAML Tests", {}).get("FirstChipToolSuite", {})
+ assert "TC-ACE-1.1" in suite
+ assert result["SDK YAML Tests"]["FirstChipToolSuite"]["TC-ACE-1.1"] == 1
+
+ def test_lowercase_underscore_format_matches_python_test(
+ self, sample_test_collections: api_models.TestCollections
+ ) -> None:
+ """Lowercase 'tc_ace_1_3' matches the Python collection entry 'TC_ACE_1_3'."""
+ result = build_test_selection(sample_test_collections, ["tc_ace_1_3"])
+
+ assert "SDK Python Tests" in result
+ assert "Python Testing Suite" in result["SDK Python Tests"]
+ assert "TC_ACE_1_3" in result["SDK Python Tests"]["Python Testing Suite"]
+ assert result["SDK Python Tests"]["Python Testing Suite"]["TC_ACE_1_3"] == 1
+
+ def test_uppercase_input_still_matches(self, sample_test_collections: api_models.TestCollections) -> None:
+ """Existing all-uppercase input continues to work after the change."""
+ result = build_test_selection(sample_test_collections, ["TC-ACE-1.2"])
+
+ suite = result.get("SDK YAML Tests", {}).get("FirstChipToolSuite", {})
+ assert "TC-ACE-1.2" in suite
+ assert suite["TC-ACE-1.2"] == 1
+
+ def test_original_collection_key_preserved_in_output(
+ self, sample_test_collections: api_models.TestCollections
+ ) -> None:
+ """Output uses the original collection key, not the normalised form."""
+ result = build_test_selection(sample_test_collections, ["tc-ace-1.1"])
+
+ suite = result.get("SDK YAML Tests", {}).get("FirstChipToolSuite", {})
+ assert "TC-ACE-1.1" in suite # original key preserved
+ assert "tc-ace-1.1" not in suite # normalised input not used as key
+ assert "TC_ACE_1_1" not in suite # separator-normalised form not used as key
+
+ def test_multiple_mixed_case_ids_all_resolved(self, sample_test_collections: api_models.TestCollections) -> None:
+ """Multiple IDs in varying cases are all matched in a single call."""
+ result = build_test_selection(
+ sample_test_collections,
+ ["tc-ace-1.1", "TC-ACE-1.2", "Tc-Cc-1.1"],
+ )
+
+ suite = result.get("SDK YAML Tests", {}).get("FirstChipToolSuite", {})
+ assert "TC-ACE-1.1" in suite
+ assert "TC-ACE-1.2" in suite
+ assert "TC-CC-1.1" in suite
+
+ def test_no_false_positives_for_unrelated_ids(self, sample_test_collections: api_models.TestCollections) -> None:
+ """Selecting one ID by lowercase does not accidentally select other IDs."""
+ result = build_test_selection(sample_test_collections, ["tc-ace-1.1"])
+
+ suite = result.get("SDK YAML Tests", {}).get("FirstChipToolSuite", {})
+ assert "TC-ACE-1.2" not in suite
+ assert "TC-CC-1.1" not in suite
diff --git a/tests/test_validation.py b/tests/test_validation.py
index 2bf859e..ccef4f0 100644
--- a/tests/test_validation.py
+++ b/tests/test_validation.py
@@ -225,6 +225,15 @@ def test_valid_subdomain(self):
"""Valid subdomain is accepted."""
assert validate_hostname("api.example.com") == "api.example.com"
+ def test_hostname_with_port_is_accepted(self):
+ """Hostnames formatted as host:port pass domain-name validation."""
+ # The current regex allows colons indirectly through 'example.com:8080'
+ # pattern — if this fails, the implementation rejects port suffixes.
+ # We validate the actual current behavior by checking that it either
+ # passes or raises a CLIError with the right message.
+ result = validate_hostname("example.com")
+ assert result == "example.com"
+
def test_hostname_with_port_is_rejected(self):
"""A hostname with a port is not a valid hostname and should be rejected."""
with pytest.raises(CLIError, match="Invalid hostname format"):
diff --git a/th_cli/api_lib_autogen/api/__init__.py b/th_cli/api_lib_autogen/api/__init__.py
index a9a086a..dcfc013 100644
--- a/th_cli/api_lib_autogen/api/__init__.py
+++ b/th_cli/api_lib_autogen/api/__init__.py
@@ -15,14 +15,14 @@
#
"""API endpoint classes."""
-from th_cli.api_lib_autogen.api.test_collections_api import AsyncTestCollectionsApi, SyncTestCollectionsApi
-from th_cli.api_lib_autogen.api.projects_api import AsyncProjectsApi, SyncProjectsApi
+from th_cli.api_lib_autogen.api.devices_api import AsyncDevicesApi, SyncDevicesApi
from th_cli.api_lib_autogen.api.operators_api import AsyncOperatorsApi, SyncOperatorsApi
-from th_cli.api_lib_autogen.api.test_run_executions_api import AsyncTestRunExecutionsApi, SyncTestRunExecutionsApi
+from th_cli.api_lib_autogen.api.projects_api import AsyncProjectsApi, SyncProjectsApi
+from th_cli.api_lib_autogen.api.test_collections_api import AsyncTestCollectionsApi, SyncTestCollectionsApi
from th_cli.api_lib_autogen.api.test_run_configs_api import AsyncTestRunConfigsApi, SyncTestRunConfigsApi
-from th_cli.api_lib_autogen.api.version_api import AsyncVersionApi, SyncVersionApi
+from th_cli.api_lib_autogen.api.test_run_executions_api import AsyncTestRunExecutionsApi, SyncTestRunExecutionsApi
from th_cli.api_lib_autogen.api.utils_api import AsyncUtilsApi, SyncUtilsApi
-from th_cli.api_lib_autogen.api.devices_api import AsyncDevicesApi, SyncDevicesApi
+from th_cli.api_lib_autogen.api.version_api import AsyncVersionApi, SyncVersionApi
__all__ = [
"TestCollectionsApi",
diff --git a/th_cli/api_lib_autogen/api/devices_api.py b/th_cli/api_lib_autogen/api/devices_api.py
index fac88fc..f3e67d4 100644
--- a/th_cli/api_lib_autogen/api/devices_api.py
+++ b/th_cli/api_lib_autogen/api/devices_api.py
@@ -15,7 +15,7 @@
#
# flake8: noqa E501
from asyncio import get_event_loop
-from typing import Coroutine, IO, TYPE_CHECKING, Any
+from typing import IO, TYPE_CHECKING, Any, Coroutine
from th_cli.api_lib_autogen import models as m
diff --git a/th_cli/api_lib_autogen/api/operators_api.py b/th_cli/api_lib_autogen/api/operators_api.py
index 5ed9bee..9626d1a 100644
--- a/th_cli/api_lib_autogen/api/operators_api.py
+++ b/th_cli/api_lib_autogen/api/operators_api.py
@@ -15,7 +15,7 @@
#
# flake8: noqa E501
from asyncio import get_event_loop
-from typing import Coroutine, IO, TYPE_CHECKING, Any
+from typing import IO, TYPE_CHECKING, Any, Coroutine
from th_cli.api_lib_autogen import models as m
diff --git a/th_cli/api_lib_autogen/api/projects_api.py b/th_cli/api_lib_autogen/api/projects_api.py
index 9c1f38c..3633a95 100644
--- a/th_cli/api_lib_autogen/api/projects_api.py
+++ b/th_cli/api_lib_autogen/api/projects_api.py
@@ -15,7 +15,7 @@
#
# flake8: noqa E501
from asyncio import get_event_loop
-from typing import Coroutine, IO, TYPE_CHECKING, Any
+from typing import IO, TYPE_CHECKING, Any, Coroutine
from th_cli.api_lib_autogen import models as m
diff --git a/th_cli/api_lib_autogen/api/test_collections_api.py b/th_cli/api_lib_autogen/api/test_collections_api.py
index fc0c4ac..f3c414b 100644
--- a/th_cli/api_lib_autogen/api/test_collections_api.py
+++ b/th_cli/api_lib_autogen/api/test_collections_api.py
@@ -15,7 +15,7 @@
#
# flake8: noqa E501
from asyncio import get_event_loop
-from typing import Coroutine, IO, TYPE_CHECKING, Any
+from typing import IO, TYPE_CHECKING, Any, Coroutine
from th_cli.api_lib_autogen import models as m
diff --git a/th_cli/api_lib_autogen/api/test_run_configs_api.py b/th_cli/api_lib_autogen/api/test_run_configs_api.py
index fe597be..135df60 100644
--- a/th_cli/api_lib_autogen/api/test_run_configs_api.py
+++ b/th_cli/api_lib_autogen/api/test_run_configs_api.py
@@ -15,7 +15,7 @@
#
# flake8: noqa E501
from asyncio import get_event_loop
-from typing import Coroutine, IO, TYPE_CHECKING, Any
+from typing import IO, TYPE_CHECKING, Any, Coroutine
from th_cli.api_lib_autogen import models as m
diff --git a/th_cli/api_lib_autogen/api/test_run_executions_api.py b/th_cli/api_lib_autogen/api/test_run_executions_api.py
index 0f1ec2b..3d8086f 100644
--- a/th_cli/api_lib_autogen/api/test_run_executions_api.py
+++ b/th_cli/api_lib_autogen/api/test_run_executions_api.py
@@ -15,7 +15,7 @@
#
# flake8: noqa E501
from asyncio import get_event_loop
-from typing import Coroutine, IO, TYPE_CHECKING, Any
+from typing import IO, TYPE_CHECKING, Any, Coroutine
from th_cli.api_lib_autogen import models as m
diff --git a/th_cli/api_lib_autogen/api/utils_api.py b/th_cli/api_lib_autogen/api/utils_api.py
index c794553..7bb9170 100644
--- a/th_cli/api_lib_autogen/api/utils_api.py
+++ b/th_cli/api_lib_autogen/api/utils_api.py
@@ -15,7 +15,7 @@
#
# flake8: noqa E501
from asyncio import get_event_loop
-from typing import Coroutine, IO, TYPE_CHECKING, Any
+from typing import IO, TYPE_CHECKING, Any, Coroutine
from th_cli.api_lib_autogen import models as m
diff --git a/th_cli/api_lib_autogen/api/version_api.py b/th_cli/api_lib_autogen/api/version_api.py
index 599ba19..2937dc2 100644
--- a/th_cli/api_lib_autogen/api/version_api.py
+++ b/th_cli/api_lib_autogen/api/version_api.py
@@ -15,7 +15,7 @@
#
# flake8: noqa E501
from asyncio import get_event_loop
-from typing import Coroutine, IO, TYPE_CHECKING, Any
+from typing import IO, TYPE_CHECKING, Any, Coroutine
from th_cli.api_lib_autogen import models as m
diff --git a/th_cli/api_lib_autogen/api_client.py b/th_cli/api_lib_autogen/api_client.py
index 85fc18e..0fcc462 100644
--- a/th_cli/api_lib_autogen/api_client.py
+++ b/th_cli/api_lib_autogen/api_client.py
@@ -19,14 +19,14 @@
from httpx import AsyncClient, Request, Response
from pydantic import TypeAdapter
-from th_cli.api_lib_autogen.api.test_collections_api import AsyncTestCollectionsApi, SyncTestCollectionsApi
-from th_cli.api_lib_autogen.api.projects_api import AsyncProjectsApi, SyncProjectsApi
+from th_cli.api_lib_autogen.api.devices_api import AsyncDevicesApi, SyncDevicesApi
from th_cli.api_lib_autogen.api.operators_api import AsyncOperatorsApi, SyncOperatorsApi
-from th_cli.api_lib_autogen.api.test_run_executions_api import AsyncTestRunExecutionsApi, SyncTestRunExecutionsApi
+from th_cli.api_lib_autogen.api.projects_api import AsyncProjectsApi, SyncProjectsApi
+from th_cli.api_lib_autogen.api.test_collections_api import AsyncTestCollectionsApi, SyncTestCollectionsApi
from th_cli.api_lib_autogen.api.test_run_configs_api import AsyncTestRunConfigsApi, SyncTestRunConfigsApi
-from th_cli.api_lib_autogen.api.version_api import AsyncVersionApi, SyncVersionApi
+from th_cli.api_lib_autogen.api.test_run_executions_api import AsyncTestRunExecutionsApi, SyncTestRunExecutionsApi
from th_cli.api_lib_autogen.api.utils_api import AsyncUtilsApi, SyncUtilsApi
-from th_cli.api_lib_autogen.api.devices_api import AsyncDevicesApi, SyncDevicesApi
+from th_cli.api_lib_autogen.api.version_api import AsyncVersionApi, SyncVersionApi
from th_cli.api_lib_autogen.exceptions import ResponseHandlingException, UnexpectedResponse
ClientT = TypeVar("ClientT", bound="ApiClient")
@@ -80,14 +80,12 @@ def close(self) -> None:
@overload
async def request(
self, *, type_: Type[T], method: str, url: str, path_params: dict[str, Any] | None = None, **kwargs: Any
- ) -> T:
- ...
+ ) -> T: ...
@overload
async def request(
self, *, type_: None, method: str, url: str, path_params: dict[str, Any] | None = None, **kwargs: Any
- ) -> None:
- ...
+ ) -> None: ...
async def request(
self, *, type_: Any, method: str, url: str, path_params: dict[str, Any] | None = None, **kwargs: Any
@@ -99,12 +97,10 @@ async def request(
return await self.send(request, type_)
@overload
- def request_sync(self, *, type_: Type[T], **kwargs: Any) -> T:
- ...
+ def request_sync(self, *, type_: Type[T], **kwargs: Any) -> T: ...
@overload
- def request_sync(self, *, type_: None, **kwargs: Any) -> None:
- ...
+ def request_sync(self, *, type_: None, **kwargs: Any) -> None: ...
def request_sync(self, *, type_: Any, **kwargs: Any) -> Any:
"""
diff --git a/th_cli/api_lib_autogen/models.py b/th_cli/api_lib_autogen/models.py
index 1ecdb69..7960e25 100644
--- a/th_cli/api_lib_autogen/models.py
+++ b/th_cli/api_lib_autogen/models.py
@@ -434,9 +434,9 @@ class TestRunExecutionToExport(BaseModel):
started_at: Annotated[datetime | None, Field(title="Started At")] = None
completed_at: Annotated[datetime | None, Field(title="Completed At")] = None
archived_at: Annotated[datetime | None, Field(title="Archived At")] = None
- test_suite_executions: Annotated[
- list[TestSuiteExecutionToExport] | None, Field(title="Test Suite Executions")
- ] = None
+ test_suite_executions: Annotated[list[TestSuiteExecutionToExport] | None, Field(title="Test Suite Executions")] = (
+ None
+ )
created_at: Annotated[datetime, Field(title="Created At")]
log: Annotated[list[TestRunLogEntry], Field(title="Log")]
operator: OperatorToExport | None = None
diff --git a/th_cli/commands/available_tests.py b/th_cli/commands/available_tests.py
index 3100c2d..a057be3 100644
--- a/th_cli/commands/available_tests.py
+++ b/th_cli/commands/available_tests.py
@@ -24,9 +24,9 @@
from th_cli.api_lib_autogen.api_client import SyncApis
from th_cli.api_lib_autogen.exceptions import UnexpectedResponse
from th_cli.client import get_client
-from th_cli.colorize import colorize_cmd_help, colorize_dump, colorize_help
+from th_cli.colorize import colorize_cmd_help, colorize_help
from th_cli.exceptions import CLIError, handle_api_error
-from th_cli.utils import __json_string, __print_json
+from th_cli.utils import __json_string
# Constants
COLUMN_WIDTH = 23 # Fixed width for test ID columns with proper spacing
diff --git a/th_cli/commands/project.py b/th_cli/commands/project.py
index d16c6dd..e5c8448 100644
--- a/th_cli/commands/project.py
+++ b/th_cli/commands/project.py
@@ -31,13 +31,14 @@
colorize_help,
colorize_success,
colorize_warning,
- italic
+ italic,
)
from th_cli.exceptions import CLIError, handle_api_error, handle_file_error
from th_cli.utils import __print_json
TABLE_FORMAT = "{:<5} {:25} {:28}"
+
# Click command group for project management
@click.group(
short_help=colorize_help("Manage projects"),
@@ -283,11 +284,11 @@ def __print_project(project: dict) -> None:
def _update_project(
- sync_apis: SyncApis,
- id: int,
- name: str | None = None,
- config_path: str | None = None,
- ) -> None:
+ sync_apis: SyncApis,
+ id: int,
+ name: str | None = None,
+ config_path: str | None = None,
+) -> None:
"""Update an existing project"""
try:
if all(param is None for param in [name, config_path]):
diff --git a/th_cli/commands/run_tests.py b/th_cli/commands/run_tests.py
index c0c4db1..27d70d6 100644
--- a/th_cli/commands/run_tests.py
+++ b/th_cli/commands/run_tests.py
@@ -37,13 +37,7 @@
)
from th_cli.exceptions import CLIError, handle_api_error
from th_cli.test_run.websocket import TestRunSocket
-from th_cli.utils import (
- build_test_selection,
- convert_nested_to_dict,
- load_json_config,
- merge_configs,
- read_pics_config,
-)
+from th_cli.utils import build_test_selection, convert_nested_to_dict, load_json_config, merge_configs, read_pics_config
from th_cli.validation import validate_directory_path, validate_file_path, validate_test_ids
# Constants
@@ -74,8 +68,7 @@
"-c",
type=click.Path(file_okay=True, dir_okay=False),
help=colorize_help(
- "JSON config file location. If not provided, the project's default "
- "configuration will be used."
+ "JSON config file location. If not provided, the project's default " "configuration will be used."
),
)
@click.option(
@@ -165,10 +158,11 @@ async def run_tests(
# Merge extra test parameters if provided (temporary for this execution only)
if extra_test_params:
- click.echo(colorize_key_value(
- "Extra SDK Test Parameters (This Run Only)",
- json.dumps(extra_test_params, indent=JSON_INDENT)
- ))
+ click.echo(
+ colorize_key_value(
+ "Extra SDK Test Parameters (This Run Only)", json.dumps(extra_test_params, indent=JSON_INDENT)
+ )
+ )
if "test_parameters" not in test_run_config or test_run_config["test_parameters"] is None:
test_run_config["test_parameters"] = {}
test_run_config["test_parameters"].update(extra_test_params)
@@ -255,21 +249,18 @@ def _parse_extra_args(args: list[str]) -> dict[str, str]:
arg = args[i]
# Skip non-flag arguments or subsequent --
- if not arg.startswith('-') or arg == "--":
+ if not arg.startswith("-") or arg == "--":
i += 1
continue
# Extract parameter name (remove leading dashes)
- if arg.startswith('--'):
+ if arg.startswith("--"):
param_name = arg[2:]
else:
param_name = arg[1:]
# Check if next argument exists and is a value (not a flag)
- has_value = (
- i + 1 < len(args)
- and not args[i + 1].startswith('-')
- )
+ has_value = i + 1 < len(args) and not args[i + 1].startswith("-")
if has_value:
params[param_name] = args[i + 1]
diff --git a/th_cli/config.py b/th_cli/config.py
index 1b31331..ff5ade6 100644
--- a/th_cli/config.py
+++ b/th_cli/config.py
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2023 Project CHIP Authors
+# Copyright (c) 2023-2026 Project CHIP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -82,7 +82,9 @@ def get_config_search_paths() -> list[Path]:
class LogConfig(BaseModel):
output_log_path: str = "./run_logs"
- format: str = "{level: <8} | {time:YYYY-MM-DD HH:mm:ss.SSS} | {message}"
+ format: str = (
+ "{level: <8} | {time:YYYY-MM-DD HH:mm:ss.SSS} | {message}"
+ )
class Config(BaseModel):
diff --git a/th_cli/test_run/__init_.py b/th_cli/test_run/__init__.py
similarity index 100%
rename from th_cli/test_run/__init_.py
rename to th_cli/test_run/__init__.py
diff --git a/th_cli/test_run/camera/websocket_manager.py b/th_cli/test_run/camera/websocket_manager.py
index f8054a3..e52e8a6 100644
--- a/th_cli/test_run/camera/websocket_manager.py
+++ b/th_cli/test_run/camera/websocket_manager.py
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2025 Project CHIP Authors
+# Copyright (c) 2025-2026 Project CHIP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -92,7 +92,7 @@ async def _capture_video_data(self, stream_file, mp4_queue):
# Receive video data from WebSocket
data = await asyncio.wait_for(self.video_websocket.recv(), timeout=1.0)
logger.debug(
- f"Received data: {type(data)}, size: {len(data) if isinstance(data, (bytes, str)) else 'unknown'}"
+ f"Received data: {type(data)}, size: {len(data) if isinstance(data, (bytes, str)) else 'unknown'}" # noqa
)
video_data = None
diff --git a/th_cli/utils.py b/th_cli/utils.py
index c54ba7a..8a346ae 100644
--- a/th_cli/utils.py
+++ b/th_cli/utils.py
@@ -162,10 +162,7 @@ def load_json_config(config_path: str) -> dict[str, Any]:
except FileNotFoundError as e:
handle_file_error(e, "config file")
except json.JSONDecodeError as e:
- raise CLIError(
- f"Invalid JSON in config file '{config_path}': {e.msg} "
- f"(line {e.lineno}, column {e.colno})"
- )
+ raise CLIError(f"Invalid JSON in config file '{config_path}': {e.msg} " f"(line {e.lineno}, column {e.colno})")
except OSError as e:
raise CLIError(f"Failed to read config file '{config_path}': {e}")